Statistics
| Branch: | Revision:

root / target-i386 / translate.c @ 09d85fb8

History | View | Annotate | Download (256.9 kB)

1
/*
2
 *  i386 translation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "disas.h"
29
#include "tcg-op.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#define PREFIX_REPZ   0x01
36
#define PREFIX_REPNZ  0x02
37
#define PREFIX_LOCK   0x04
38
#define PREFIX_DATA   0x08
39
#define PREFIX_ADR    0x10
40

    
41
#ifdef TARGET_X86_64
42
#define X86_64_ONLY(x) x
43
#define X86_64_DEF(...)  __VA_ARGS__
44
#define CODE64(s) ((s)->code64)
45
#define REX_X(s) ((s)->rex_x)
46
#define REX_B(s) ((s)->rex_b)
47
/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
48
#if 1
49
#define BUGGY_64(x) NULL
50
#endif
51
#else
52
#define X86_64_ONLY(x) NULL
53
#define X86_64_DEF(...)
54
#define CODE64(s) 0
55
#define REX_X(s) 0
56
#define REX_B(s) 0
57
#endif
58

    
59
//#define MACRO_TEST   1
60

    
61
/* global register indexes */
62
static TCGv_ptr cpu_env;
63
static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
64
static TCGv_i32 cpu_cc_op;
65
static TCGv cpu_regs[CPU_NB_REGS];
66
/* local temps */
67
static TCGv cpu_T[2], cpu_T3;
68
/* local register indexes (only used inside old micro ops) */
69
static TCGv cpu_tmp0, cpu_tmp4;
70
static TCGv_ptr cpu_ptr0, cpu_ptr1;
71
static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72
static TCGv_i64 cpu_tmp1_i64;
73
static TCGv cpu_tmp5;
74

    
75
#include "gen-icount.h"
76

    
77
#ifdef TARGET_X86_64
78
static int x86_64_hregs;
79
#endif
80

    
81
typedef struct DisasContext {
82
    /* current insn context */
83
    int override; /* -1 if no override */
84
    int prefix;
85
    int aflag, dflag;
86
    target_ulong pc; /* pc = eip + cs_base */
87
    int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
88
                   static state change (stop translation) */
89
    /* current block context */
90
    target_ulong cs_base; /* base of CS segment */
91
    int pe;     /* protected mode */
92
    int code32; /* 32 bit code segment */
93
#ifdef TARGET_X86_64
94
    int lma;    /* long mode active */
95
    int code64; /* 64 bit code segment */
96
    int rex_x, rex_b;
97
#endif
98
    int ss32;   /* 32 bit stack segment */
99
    int cc_op;  /* current CC operation */
100
    int addseg; /* non zero if either DS/ES/SS have a non zero base */
101
    int f_st;   /* currently unused */
102
    int vm86;   /* vm86 mode */
103
    int cpl;
104
    int iopl;
105
    int tf;     /* TF cpu flag */
106
    int singlestep_enabled; /* "hardware" single step enabled */
107
    int jmp_opt; /* use direct block chaining for direct jumps */
108
    int mem_index; /* select memory access functions */
109
    uint64_t flags; /* all execution flags */
110
    struct TranslationBlock *tb;
111
    int popl_esp_hack; /* for correct popl with esp base handling */
112
    int rip_offset; /* only used in x86_64, but left for simplicity */
113
    int cpuid_features;
114
    int cpuid_ext_features;
115
    int cpuid_ext2_features;
116
    int cpuid_ext3_features;
117
} DisasContext;
118

    
119
static void gen_eob(DisasContext *s);
120
static void gen_jmp(DisasContext *s, target_ulong eip);
121
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
122

    
123
/* i386 arith/logic operations */
124
enum {
125
    OP_ADDL,
126
    OP_ORL,
127
    OP_ADCL,
128
    OP_SBBL,
129
    OP_ANDL,
130
    OP_SUBL,
131
    OP_XORL,
132
    OP_CMPL,
133
};
134

    
135
/* i386 shift ops */
136
enum {
137
    OP_ROL,
138
    OP_ROR,
139
    OP_RCL,
140
    OP_RCR,
141
    OP_SHL,
142
    OP_SHR,
143
    OP_SHL1, /* undocumented */
144
    OP_SAR = 7,
145
};
146

    
147
enum {
148
    JCC_O,
149
    JCC_B,
150
    JCC_Z,
151
    JCC_BE,
152
    JCC_S,
153
    JCC_P,
154
    JCC_L,
155
    JCC_LE,
156
};
157

    
158
/* operand size */
159
enum {
160
    OT_BYTE = 0,
161
    OT_WORD,
162
    OT_LONG,
163
    OT_QUAD,
164
};
165

    
166
enum {
167
    /* I386 int registers */
168
    OR_EAX,   /* MUST be even numbered */
169
    OR_ECX,
170
    OR_EDX,
171
    OR_EBX,
172
    OR_ESP,
173
    OR_EBP,
174
    OR_ESI,
175
    OR_EDI,
176

    
177
    OR_TMP0 = 16,    /* temporary operand register */
178
    OR_TMP1,
179
    OR_A0, /* temporary register used when doing address evaluation */
180
};
181

    
182
static inline void gen_op_movl_T0_0(void)
183
{
184
    tcg_gen_movi_tl(cpu_T[0], 0);
185
}
186

    
187
static inline void gen_op_movl_T0_im(int32_t val)
188
{
189
    tcg_gen_movi_tl(cpu_T[0], val);
190
}
191

    
192
static inline void gen_op_movl_T0_imu(uint32_t val)
193
{
194
    tcg_gen_movi_tl(cpu_T[0], val);
195
}
196

    
197
static inline void gen_op_movl_T1_im(int32_t val)
198
{
199
    tcg_gen_movi_tl(cpu_T[1], val);
200
}
201

    
202
static inline void gen_op_movl_T1_imu(uint32_t val)
203
{
204
    tcg_gen_movi_tl(cpu_T[1], val);
205
}
206

    
207
static inline void gen_op_movl_A0_im(uint32_t val)
208
{
209
    tcg_gen_movi_tl(cpu_A0, val);
210
}
211

    
212
#ifdef TARGET_X86_64
213
static inline void gen_op_movq_A0_im(int64_t val)
214
{
215
    tcg_gen_movi_tl(cpu_A0, val);
216
}
217
#endif
218

    
219
static inline void gen_movtl_T0_im(target_ulong val)
220
{
221
    tcg_gen_movi_tl(cpu_T[0], val);
222
}
223

    
224
static inline void gen_movtl_T1_im(target_ulong val)
225
{
226
    tcg_gen_movi_tl(cpu_T[1], val);
227
}
228

    
229
static inline void gen_op_andl_T0_ffff(void)
230
{
231
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
232
}
233

    
234
static inline void gen_op_andl_T0_im(uint32_t val)
235
{
236
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
237
}
238

    
239
static inline void gen_op_movl_T0_T1(void)
240
{
241
    tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
242
}
243

    
244
static inline void gen_op_andl_A0_ffff(void)
245
{
246
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
247
}
248

    
249
#ifdef TARGET_X86_64
250

    
251
#define NB_OP_SIZES 4
252

    
253
#else /* !TARGET_X86_64 */
254

    
255
#define NB_OP_SIZES 3
256

    
257
#endif /* !TARGET_X86_64 */
258

    
259
#if defined(HOST_WORDS_BIGENDIAN)
260
#define REG_B_OFFSET (sizeof(target_ulong) - 1)
261
#define REG_H_OFFSET (sizeof(target_ulong) - 2)
262
#define REG_W_OFFSET (sizeof(target_ulong) - 2)
263
#define REG_L_OFFSET (sizeof(target_ulong) - 4)
264
#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
265
#else
266
#define REG_B_OFFSET 0
267
#define REG_H_OFFSET 1
268
#define REG_W_OFFSET 0
269
#define REG_L_OFFSET 0
270
#define REG_LH_OFFSET 4
271
#endif
272

    
273
static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
274
{
275
    TCGv tmp;
276

    
277
    switch(ot) {
278
    case OT_BYTE:
279
        tmp = tcg_temp_new();
280
        tcg_gen_ext8u_tl(tmp, t0);
281
        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
282
            tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff);
283
            tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
284
        } else {
285
            tcg_gen_shli_tl(tmp, tmp, 8);
286
            tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00);
287
            tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp);
288
        }
289
        tcg_temp_free(tmp);
290
        break;
291
    case OT_WORD:
292
        tmp = tcg_temp_new();
293
        tcg_gen_ext16u_tl(tmp, t0);
294
        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
295
        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
296
        tcg_temp_free(tmp);
297
        break;
298
    default: /* XXX this shouldn't be reached;  abort? */
299
    case OT_LONG:
300
        /* For x86_64, this sets the higher half of register to zero.
301
           For i386, this is equivalent to a mov. */
302
        tcg_gen_ext32u_tl(cpu_regs[reg], t0);
303
        break;
304
#ifdef TARGET_X86_64
305
    case OT_QUAD:
306
        tcg_gen_mov_tl(cpu_regs[reg], t0);
307
        break;
308
#endif
309
    }
310
}
311

    
312
static inline void gen_op_mov_reg_T0(int ot, int reg)
313
{
314
    gen_op_mov_reg_v(ot, reg, cpu_T[0]);
315
}
316

    
317
static inline void gen_op_mov_reg_T1(int ot, int reg)
318
{
319
    gen_op_mov_reg_v(ot, reg, cpu_T[1]);
320
}
321

    
322
static inline void gen_op_mov_reg_A0(int size, int reg)
323
{
324
    TCGv tmp;
325

    
326
    switch(size) {
327
    case 0:
328
        tmp = tcg_temp_new();
329
        tcg_gen_ext16u_tl(tmp, cpu_A0);
330
        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
331
        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
332
        tcg_temp_free(tmp);
333
        break;
334
    default: /* XXX this shouldn't be reached;  abort? */
335
    case 1:
336
        /* For x86_64, this sets the higher half of register to zero.
337
           For i386, this is equivalent to a mov. */
338
        tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
339
        break;
340
#ifdef TARGET_X86_64
341
    case 2:
342
        tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
343
        break;
344
#endif
345
    }
346
}
347

    
348
static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
349
{
350
    switch(ot) {
351
    case OT_BYTE:
352
        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
353
            goto std_case;
354
        } else {
355
            tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
356
            tcg_gen_ext8u_tl(t0, t0);
357
        }
358
        break;
359
    default:
360
    std_case:
361
        tcg_gen_mov_tl(t0, cpu_regs[reg]);
362
        break;
363
    }
364
}
365

    
366
static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
367
{
368
    gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
369
}
370

    
371
static inline void gen_op_movl_A0_reg(int reg)
372
{
373
    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
374
}
375

    
376
static inline void gen_op_addl_A0_im(int32_t val)
377
{
378
    tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
379
#ifdef TARGET_X86_64
380
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
381
#endif
382
}
383

    
384
#ifdef TARGET_X86_64
385
static inline void gen_op_addq_A0_im(int64_t val)
386
{
387
    tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
388
}
389
#endif
390
    
391
static void gen_add_A0_im(DisasContext *s, int val)
392
{
393
#ifdef TARGET_X86_64
394
    if (CODE64(s))
395
        gen_op_addq_A0_im(val);
396
    else
397
#endif
398
        gen_op_addl_A0_im(val);
399
}
400

    
401
static inline void gen_op_addl_T0_T1(void)
402
{
403
    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
404
}
405

    
406
static inline void gen_op_jmp_T0(void)
407
{
408
    tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
409
}
410

    
411
static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
412
{
413
    switch(size) {
414
    case 0:
415
        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
416
        tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
417
        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
418
        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
419
        break;
420
    case 1:
421
        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
422
        /* For x86_64, this sets the higher half of register to zero.
423
           For i386, this is equivalent to a nop. */
424
        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
425
        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
426
        break;
427
#ifdef TARGET_X86_64
428
    case 2:
429
        tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
430
        break;
431
#endif
432
    }
433
}
434

    
435
static inline void gen_op_add_reg_T0(int size, int reg)
436
{
437
    switch(size) {
438
    case 0:
439
        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
440
        tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
441
        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
442
        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
443
        break;
444
    case 1:
445
        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
446
        /* For x86_64, this sets the higher half of register to zero.
447
           For i386, this is equivalent to a nop. */
448
        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
449
        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
450
        break;
451
#ifdef TARGET_X86_64
452
    case 2:
453
        tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
454
        break;
455
#endif
456
    }
457
}
458

    
459
static inline void gen_op_set_cc_op(int32_t val)
460
{
461
    tcg_gen_movi_i32(cpu_cc_op, val);
462
}
463

    
464
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
465
{
466
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
467
    if (shift != 0)
468
        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
469
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
470
    /* For x86_64, this sets the higher half of register to zero.
471
       For i386, this is equivalent to a nop. */
472
    tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
473
}
474

    
475
static inline void gen_op_movl_A0_seg(int reg)
476
{
477
    tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
478
}
479

    
480
static inline void gen_op_addl_A0_seg(int reg)
481
{
482
    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
483
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
484
#ifdef TARGET_X86_64
485
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
486
#endif
487
}
488

    
489
#ifdef TARGET_X86_64
490
static inline void gen_op_movq_A0_seg(int reg)
491
{
492
    tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
493
}
494

    
495
static inline void gen_op_addq_A0_seg(int reg)
496
{
497
    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
498
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
499
}
500

    
501
static inline void gen_op_movq_A0_reg(int reg)
502
{
503
    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
504
}
505

    
506
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
507
{
508
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
509
    if (shift != 0)
510
        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
511
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
512
}
513
#endif
514

    
515
static inline void gen_op_lds_T0_A0(int idx)
516
{
517
    int mem_index = (idx >> 2) - 1;
518
    switch(idx & 3) {
519
    case 0:
520
        tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
521
        break;
522
    case 1:
523
        tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
524
        break;
525
    default:
526
    case 2:
527
        tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
528
        break;
529
    }
530
}
531

    
532
static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
533
{
534
    int mem_index = (idx >> 2) - 1;
535
    switch(idx & 3) {
536
    case 0:
537
        tcg_gen_qemu_ld8u(t0, a0, mem_index);
538
        break;
539
    case 1:
540
        tcg_gen_qemu_ld16u(t0, a0, mem_index);
541
        break;
542
    case 2:
543
        tcg_gen_qemu_ld32u(t0, a0, mem_index);
544
        break;
545
    default:
546
    case 3:
547
        /* Should never happen on 32-bit targets.  */
548
#ifdef TARGET_X86_64
549
        tcg_gen_qemu_ld64(t0, a0, mem_index);
550
#endif
551
        break;
552
    }
553
}
554

    
555
/* XXX: always use ldu or lds */
556
static inline void gen_op_ld_T0_A0(int idx)
557
{
558
    gen_op_ld_v(idx, cpu_T[0], cpu_A0);
559
}
560

    
561
static inline void gen_op_ldu_T0_A0(int idx)
562
{
563
    gen_op_ld_v(idx, cpu_T[0], cpu_A0);
564
}
565

    
566
static inline void gen_op_ld_T1_A0(int idx)
567
{
568
    gen_op_ld_v(idx, cpu_T[1], cpu_A0);
569
}
570

    
571
static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
572
{
573
    int mem_index = (idx >> 2) - 1;
574
    switch(idx & 3) {
575
    case 0:
576
        tcg_gen_qemu_st8(t0, a0, mem_index);
577
        break;
578
    case 1:
579
        tcg_gen_qemu_st16(t0, a0, mem_index);
580
        break;
581
    case 2:
582
        tcg_gen_qemu_st32(t0, a0, mem_index);
583
        break;
584
    default:
585
    case 3:
586
        /* Should never happen on 32-bit targets.  */
587
#ifdef TARGET_X86_64
588
        tcg_gen_qemu_st64(t0, a0, mem_index);
589
#endif
590
        break;
591
    }
592
}
593

    
594
static inline void gen_op_st_T0_A0(int idx)
595
{
596
    gen_op_st_v(idx, cpu_T[0], cpu_A0);
597
}
598

    
599
static inline void gen_op_st_T1_A0(int idx)
600
{
601
    gen_op_st_v(idx, cpu_T[1], cpu_A0);
602
}
603

    
604
static inline void gen_jmp_im(target_ulong pc)
605
{
606
    tcg_gen_movi_tl(cpu_tmp0, pc);
607
    tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
608
}
609

    
610
static inline void gen_string_movl_A0_ESI(DisasContext *s)
611
{
612
    int override;
613

    
614
    override = s->override;
615
#ifdef TARGET_X86_64
616
    if (s->aflag == 2) {
617
        if (override >= 0) {
618
            gen_op_movq_A0_seg(override);
619
            gen_op_addq_A0_reg_sN(0, R_ESI);
620
        } else {
621
            gen_op_movq_A0_reg(R_ESI);
622
        }
623
    } else
624
#endif
625
    if (s->aflag) {
626
        /* 32 bit address */
627
        if (s->addseg && override < 0)
628
            override = R_DS;
629
        if (override >= 0) {
630
            gen_op_movl_A0_seg(override);
631
            gen_op_addl_A0_reg_sN(0, R_ESI);
632
        } else {
633
            gen_op_movl_A0_reg(R_ESI);
634
        }
635
    } else {
636
        /* 16 address, always override */
637
        if (override < 0)
638
            override = R_DS;
639
        gen_op_movl_A0_reg(R_ESI);
640
        gen_op_andl_A0_ffff();
641
        gen_op_addl_A0_seg(override);
642
    }
643
}
644

    
645
static inline void gen_string_movl_A0_EDI(DisasContext *s)
646
{
647
#ifdef TARGET_X86_64
648
    if (s->aflag == 2) {
649
        gen_op_movq_A0_reg(R_EDI);
650
    } else
651
#endif
652
    if (s->aflag) {
653
        if (s->addseg) {
654
            gen_op_movl_A0_seg(R_ES);
655
            gen_op_addl_A0_reg_sN(0, R_EDI);
656
        } else {
657
            gen_op_movl_A0_reg(R_EDI);
658
        }
659
    } else {
660
        gen_op_movl_A0_reg(R_EDI);
661
        gen_op_andl_A0_ffff();
662
        gen_op_addl_A0_seg(R_ES);
663
    }
664
}
665

    
666
static inline void gen_op_movl_T0_Dshift(int ot) 
667
{
668
    tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
669
    tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
670
};
671

    
672
static void gen_extu(int ot, TCGv reg)
673
{
674
    switch(ot) {
675
    case OT_BYTE:
676
        tcg_gen_ext8u_tl(reg, reg);
677
        break;
678
    case OT_WORD:
679
        tcg_gen_ext16u_tl(reg, reg);
680
        break;
681
    case OT_LONG:
682
        tcg_gen_ext32u_tl(reg, reg);
683
        break;
684
    default:
685
        break;
686
    }
687
}
688

    
689
static void gen_exts(int ot, TCGv reg)
690
{
691
    switch(ot) {
692
    case OT_BYTE:
693
        tcg_gen_ext8s_tl(reg, reg);
694
        break;
695
    case OT_WORD:
696
        tcg_gen_ext16s_tl(reg, reg);
697
        break;
698
    case OT_LONG:
699
        tcg_gen_ext32s_tl(reg, reg);
700
        break;
701
    default:
702
        break;
703
    }
704
}
705

    
706
static inline void gen_op_jnz_ecx(int size, int label1)
707
{
708
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
709
    gen_extu(size + 1, cpu_tmp0);
710
    tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
711
}
712

    
713
static inline void gen_op_jz_ecx(int size, int label1)
714
{
715
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
716
    gen_extu(size + 1, cpu_tmp0);
717
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
718
}
719

    
720
static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
721
{
722
    switch (ot) {
723
    case 0: gen_helper_inb(v, n); break;
724
    case 1: gen_helper_inw(v, n); break;
725
    case 2: gen_helper_inl(v, n); break;
726
    }
727

    
728
}
729

    
730
static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
731
{
732
    switch (ot) {
733
    case 0: gen_helper_outb(v, n); break;
734
    case 1: gen_helper_outw(v, n); break;
735
    case 2: gen_helper_outl(v, n); break;
736
    }
737

    
738
}
739

    
740
static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
741
                         uint32_t svm_flags)
742
{
743
    int state_saved;
744
    target_ulong next_eip;
745

    
746
    state_saved = 0;
747
    if (s->pe && (s->cpl > s->iopl || s->vm86)) {
748
        if (s->cc_op != CC_OP_DYNAMIC)
749
            gen_op_set_cc_op(s->cc_op);
750
        gen_jmp_im(cur_eip);
751
        state_saved = 1;
752
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
753
        switch (ot) {
754
        case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
755
        case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
756
        case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
757
        }
758
    }
759
    if(s->flags & HF_SVMI_MASK) {
760
        if (!state_saved) {
761
            if (s->cc_op != CC_OP_DYNAMIC)
762
                gen_op_set_cc_op(s->cc_op);
763
            gen_jmp_im(cur_eip);
764
            state_saved = 1;
765
        }
766
        svm_flags |= (1 << (4 + ot));
767
        next_eip = s->pc - s->cs_base;
768
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
769
        gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
770
                                tcg_const_i32(next_eip - cur_eip));
771
    }
772
}
773

    
774
static inline void gen_movs(DisasContext *s, int ot)
775
{
776
    gen_string_movl_A0_ESI(s);
777
    gen_op_ld_T0_A0(ot + s->mem_index);
778
    gen_string_movl_A0_EDI(s);
779
    gen_op_st_T0_A0(ot + s->mem_index);
780
    gen_op_movl_T0_Dshift(ot);
781
    gen_op_add_reg_T0(s->aflag, R_ESI);
782
    gen_op_add_reg_T0(s->aflag, R_EDI);
783
}
784

    
785
static inline void gen_update_cc_op(DisasContext *s)
786
{
787
    if (s->cc_op != CC_OP_DYNAMIC) {
788
        gen_op_set_cc_op(s->cc_op);
789
        s->cc_op = CC_OP_DYNAMIC;
790
    }
791
}
792

    
793
static void gen_op_update1_cc(void)
794
{
795
    tcg_gen_discard_tl(cpu_cc_src);
796
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
797
}
798

    
799
static void gen_op_update2_cc(void)
800
{
801
    tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
802
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
803
}
804

    
805
static inline void gen_op_cmpl_T0_T1_cc(void)
806
{
807
    tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
808
    tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
809
}
810

    
811
static inline void gen_op_testl_T0_T1_cc(void)
812
{
813
    tcg_gen_discard_tl(cpu_cc_src);
814
    tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
815
}
816

    
817
static void gen_op_update_neg_cc(void)
818
{
819
    tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
820
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
821
}
822

    
823
/* compute eflags.C to reg */
824
static void gen_compute_eflags_c(TCGv reg)
825
{
826
    gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
827
    tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
828
}
829

    
830
/* compute all eflags to cc_src */
831
static void gen_compute_eflags(TCGv reg)
832
{
833
    gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
834
    tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
835
}
836

    
837
static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
838
{
839
    if (s->cc_op != CC_OP_DYNAMIC)
840
        gen_op_set_cc_op(s->cc_op);
841
    switch(jcc_op) {
842
    case JCC_O:
843
        gen_compute_eflags(cpu_T[0]);
844
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
845
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
846
        break;
847
    case JCC_B:
848
        gen_compute_eflags_c(cpu_T[0]);
849
        break;
850
    case JCC_Z:
851
        gen_compute_eflags(cpu_T[0]);
852
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
853
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
854
        break;
855
    case JCC_BE:
856
        gen_compute_eflags(cpu_tmp0);
857
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
858
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
859
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
860
        break;
861
    case JCC_S:
862
        gen_compute_eflags(cpu_T[0]);
863
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
864
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
865
        break;
866
    case JCC_P:
867
        gen_compute_eflags(cpu_T[0]);
868
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
869
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
870
        break;
871
    case JCC_L:
872
        gen_compute_eflags(cpu_tmp0);
873
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
874
        tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
875
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
876
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
877
        break;
878
    default:
879
    case JCC_LE:
880
        gen_compute_eflags(cpu_tmp0);
881
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
882
        tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
883
        tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
884
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
885
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
886
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
887
        break;
888
    }
889
}
890

    
891
/* return true if setcc_slow is not needed (WARNING: must be kept in
892
   sync with gen_jcc1) */
893
static int is_fast_jcc_case(DisasContext *s, int b)
894
{
895
    int jcc_op;
896
    jcc_op = (b >> 1) & 7;
897
    switch(s->cc_op) {
898
        /* we optimize the cmp/jcc case */
899
    case CC_OP_SUBB:
900
    case CC_OP_SUBW:
901
    case CC_OP_SUBL:
902
    case CC_OP_SUBQ:
903
        if (jcc_op == JCC_O || jcc_op == JCC_P)
904
            goto slow_jcc;
905
        break;
906

    
907
        /* some jumps are easy to compute */
908
    case CC_OP_ADDB:
909
    case CC_OP_ADDW:
910
    case CC_OP_ADDL:
911
    case CC_OP_ADDQ:
912

    
913
    case CC_OP_LOGICB:
914
    case CC_OP_LOGICW:
915
    case CC_OP_LOGICL:
916
    case CC_OP_LOGICQ:
917

    
918
    case CC_OP_INCB:
919
    case CC_OP_INCW:
920
    case CC_OP_INCL:
921
    case CC_OP_INCQ:
922

    
923
    case CC_OP_DECB:
924
    case CC_OP_DECW:
925
    case CC_OP_DECL:
926
    case CC_OP_DECQ:
927

    
928
    case CC_OP_SHLB:
929
    case CC_OP_SHLW:
930
    case CC_OP_SHLL:
931
    case CC_OP_SHLQ:
932
        if (jcc_op != JCC_Z && jcc_op != JCC_S)
933
            goto slow_jcc;
934
        break;
935
    default:
936
    slow_jcc:
937
        return 0;
938
    }
939
    return 1;
940
}
941

    
942
/* generate a conditional jump to label 'l1' according to jump opcode
943
   value 'b'. In the fast case, T0 is guaranted not to be used. */
944
static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
945
{
946
    int inv, jcc_op, size, cond;
947
    TCGv t0;
948

    
949
    inv = b & 1;
950
    jcc_op = (b >> 1) & 7;
951

    
952
    switch(cc_op) {
953
        /* we optimize the cmp/jcc case */
954
    case CC_OP_SUBB:
955
    case CC_OP_SUBW:
956
    case CC_OP_SUBL:
957
    case CC_OP_SUBQ:
958
        
959
        size = cc_op - CC_OP_SUBB;
960
        switch(jcc_op) {
961
        case JCC_Z:
962
        fast_jcc_z:
963
            switch(size) {
964
            case 0:
965
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
966
                t0 = cpu_tmp0;
967
                break;
968
            case 1:
969
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
970
                t0 = cpu_tmp0;
971
                break;
972
#ifdef TARGET_X86_64
973
            case 2:
974
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
975
                t0 = cpu_tmp0;
976
                break;
977
#endif
978
            default:
979
                t0 = cpu_cc_dst;
980
                break;
981
            }
982
            tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
983
            break;
984
        case JCC_S:
985
        fast_jcc_s:
986
            switch(size) {
987
            case 0:
988
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
989
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
990
                                   0, l1);
991
                break;
992
            case 1:
993
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
994
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
995
                                   0, l1);
996
                break;
997
#ifdef TARGET_X86_64
998
            case 2:
999
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1000
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
1001
                                   0, l1);
1002
                break;
1003
#endif
1004
            default:
1005
                tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst, 
1006
                                   0, l1);
1007
                break;
1008
            }
1009
            break;
1010
            
1011
        case JCC_B:
1012
            cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1013
            goto fast_jcc_b;
1014
        case JCC_BE:
1015
            cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1016
        fast_jcc_b:
1017
            tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1018
            switch(size) {
1019
            case 0:
1020
                t0 = cpu_tmp0;
1021
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1022
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1023
                break;
1024
            case 1:
1025
                t0 = cpu_tmp0;
1026
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1027
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1028
                break;
1029
#ifdef TARGET_X86_64
1030
            case 2:
1031
                t0 = cpu_tmp0;
1032
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1033
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1034
                break;
1035
#endif
1036
            default:
1037
                t0 = cpu_cc_src;
1038
                break;
1039
            }
1040
            tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1041
            break;
1042
            
1043
        case JCC_L:
1044
            cond = inv ? TCG_COND_GE : TCG_COND_LT;
1045
            goto fast_jcc_l;
1046
        case JCC_LE:
1047
            cond = inv ? TCG_COND_GT : TCG_COND_LE;
1048
        fast_jcc_l:
1049
            tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1050
            switch(size) {
1051
            case 0:
1052
                t0 = cpu_tmp0;
1053
                tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1054
                tcg_gen_ext8s_tl(t0, cpu_cc_src);
1055
                break;
1056
            case 1:
1057
                t0 = cpu_tmp0;
1058
                tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1059
                tcg_gen_ext16s_tl(t0, cpu_cc_src);
1060
                break;
1061
#ifdef TARGET_X86_64
1062
            case 2:
1063
                t0 = cpu_tmp0;
1064
                tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1065
                tcg_gen_ext32s_tl(t0, cpu_cc_src);
1066
                break;
1067
#endif
1068
            default:
1069
                t0 = cpu_cc_src;
1070
                break;
1071
            }
1072
            tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1073
            break;
1074
            
1075
        default:
1076
            goto slow_jcc;
1077
        }
1078
        break;
1079
        
1080
        /* some jumps are easy to compute */
1081
    case CC_OP_ADDB:
1082
    case CC_OP_ADDW:
1083
    case CC_OP_ADDL:
1084
    case CC_OP_ADDQ:
1085
        
1086
    case CC_OP_ADCB:
1087
    case CC_OP_ADCW:
1088
    case CC_OP_ADCL:
1089
    case CC_OP_ADCQ:
1090
        
1091
    case CC_OP_SBBB:
1092
    case CC_OP_SBBW:
1093
    case CC_OP_SBBL:
1094
    case CC_OP_SBBQ:
1095
        
1096
    case CC_OP_LOGICB:
1097
    case CC_OP_LOGICW:
1098
    case CC_OP_LOGICL:
1099
    case CC_OP_LOGICQ:
1100
        
1101
    case CC_OP_INCB:
1102
    case CC_OP_INCW:
1103
    case CC_OP_INCL:
1104
    case CC_OP_INCQ:
1105
        
1106
    case CC_OP_DECB:
1107
    case CC_OP_DECW:
1108
    case CC_OP_DECL:
1109
    case CC_OP_DECQ:
1110
        
1111
    case CC_OP_SHLB:
1112
    case CC_OP_SHLW:
1113
    case CC_OP_SHLL:
1114
    case CC_OP_SHLQ:
1115
        
1116
    case CC_OP_SARB:
1117
    case CC_OP_SARW:
1118
    case CC_OP_SARL:
1119
    case CC_OP_SARQ:
1120
        switch(jcc_op) {
1121
        case JCC_Z:
1122
            size = (cc_op - CC_OP_ADDB) & 3;
1123
            goto fast_jcc_z;
1124
        case JCC_S:
1125
            size = (cc_op - CC_OP_ADDB) & 3;
1126
            goto fast_jcc_s;
1127
        default:
1128
            goto slow_jcc;
1129
        }
1130
        break;
1131
    default:
1132
    slow_jcc:
1133
        gen_setcc_slow_T0(s, jcc_op);
1134
        tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, 
1135
                           cpu_T[0], 0, l1);
1136
        break;
1137
    }
1138
}
1139

    
1140
/* XXX: does not work with gdbstub "ice" single step - not a
1141
   serious problem */
1142
static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1143
{
1144
    int l1, l2;
1145

    
1146
    l1 = gen_new_label();
1147
    l2 = gen_new_label();
1148
    gen_op_jnz_ecx(s->aflag, l1);
1149
    gen_set_label(l2);
1150
    gen_jmp_tb(s, next_eip, 1);
1151
    gen_set_label(l1);
1152
    return l2;
1153
}
1154

    
1155
static inline void gen_stos(DisasContext *s, int ot)
1156
{
1157
    gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1158
    gen_string_movl_A0_EDI(s);
1159
    gen_op_st_T0_A0(ot + s->mem_index);
1160
    gen_op_movl_T0_Dshift(ot);
1161
    gen_op_add_reg_T0(s->aflag, R_EDI);
1162
}
1163

    
1164
static inline void gen_lods(DisasContext *s, int ot)
1165
{
1166
    gen_string_movl_A0_ESI(s);
1167
    gen_op_ld_T0_A0(ot + s->mem_index);
1168
    gen_op_mov_reg_T0(ot, R_EAX);
1169
    gen_op_movl_T0_Dshift(ot);
1170
    gen_op_add_reg_T0(s->aflag, R_ESI);
1171
}
1172

    
1173
static inline void gen_scas(DisasContext *s, int ot)
1174
{
1175
    gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1176
    gen_string_movl_A0_EDI(s);
1177
    gen_op_ld_T1_A0(ot + s->mem_index);
1178
    gen_op_cmpl_T0_T1_cc();
1179
    gen_op_movl_T0_Dshift(ot);
1180
    gen_op_add_reg_T0(s->aflag, R_EDI);
1181
}
1182

    
1183
static inline void gen_cmps(DisasContext *s, int ot)
1184
{
1185
    gen_string_movl_A0_ESI(s);
1186
    gen_op_ld_T0_A0(ot + s->mem_index);
1187
    gen_string_movl_A0_EDI(s);
1188
    gen_op_ld_T1_A0(ot + s->mem_index);
1189
    gen_op_cmpl_T0_T1_cc();
1190
    gen_op_movl_T0_Dshift(ot);
1191
    gen_op_add_reg_T0(s->aflag, R_ESI);
1192
    gen_op_add_reg_T0(s->aflag, R_EDI);
1193
}
1194

    
1195
static inline void gen_ins(DisasContext *s, int ot)
1196
{
1197
    if (use_icount)
1198
        gen_io_start();
1199
    gen_string_movl_A0_EDI(s);
1200
    /* Note: we must do this dummy write first to be restartable in
1201
       case of page fault. */
1202
    gen_op_movl_T0_0();
1203
    gen_op_st_T0_A0(ot + s->mem_index);
1204
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1205
    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1206
    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1207
    gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1208
    gen_op_st_T0_A0(ot + s->mem_index);
1209
    gen_op_movl_T0_Dshift(ot);
1210
    gen_op_add_reg_T0(s->aflag, R_EDI);
1211
    if (use_icount)
1212
        gen_io_end();
1213
}
1214

    
1215
static inline void gen_outs(DisasContext *s, int ot)
1216
{
1217
    if (use_icount)
1218
        gen_io_start();
1219
    gen_string_movl_A0_ESI(s);
1220
    gen_op_ld_T0_A0(ot + s->mem_index);
1221

    
1222
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1223
    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1224
    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1225
    tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1226
    gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1227

    
1228
    gen_op_movl_T0_Dshift(ot);
1229
    gen_op_add_reg_T0(s->aflag, R_ESI);
1230
    if (use_icount)
1231
        gen_io_end();
1232
}
1233

    
1234
/* same method as Valgrind : we generate jumps to current or next
1235
   instruction */
1236
#define GEN_REPZ(op)                                                          \
1237
static inline void gen_repz_ ## op(DisasContext *s, int ot,                   \
1238
                                 target_ulong cur_eip, target_ulong next_eip) \
1239
{                                                                             \
1240
    int l2;\
1241
    gen_update_cc_op(s);                                                      \
1242
    l2 = gen_jz_ecx_string(s, next_eip);                                      \
1243
    gen_ ## op(s, ot);                                                        \
1244
    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
1245
    /* a loop would cause two single step exceptions if ECX = 1               \
1246
       before rep string_insn */                                              \
1247
    if (!s->jmp_opt)                                                          \
1248
        gen_op_jz_ecx(s->aflag, l2);                                          \
1249
    gen_jmp(s, cur_eip);                                                      \
1250
}
1251

    
1252
#define GEN_REPZ2(op)                                                         \
1253
static inline void gen_repz_ ## op(DisasContext *s, int ot,                   \
1254
                                   target_ulong cur_eip,                      \
1255
                                   target_ulong next_eip,                     \
1256
                                   int nz)                                    \
1257
{                                                                             \
1258
    int l2;\
1259
    gen_update_cc_op(s);                                                      \
1260
    l2 = gen_jz_ecx_string(s, next_eip);                                      \
1261
    gen_ ## op(s, ot);                                                        \
1262
    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
1263
    gen_op_set_cc_op(CC_OP_SUBB + ot);                                        \
1264
    gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2);                \
1265
    if (!s->jmp_opt)                                                          \
1266
        gen_op_jz_ecx(s->aflag, l2);                                          \
1267
    gen_jmp(s, cur_eip);                                                      \
1268
}
1269

    
1270
GEN_REPZ(movs)
1271
GEN_REPZ(stos)
1272
GEN_REPZ(lods)
1273
GEN_REPZ(ins)
1274
GEN_REPZ(outs)
1275
GEN_REPZ2(scas)
1276
GEN_REPZ2(cmps)
1277

    
1278
static void gen_helper_fp_arith_ST0_FT0(int op)
1279
{
1280
    switch (op) {
1281
    case 0: gen_helper_fadd_ST0_FT0(); break;
1282
    case 1: gen_helper_fmul_ST0_FT0(); break;
1283
    case 2: gen_helper_fcom_ST0_FT0(); break;
1284
    case 3: gen_helper_fcom_ST0_FT0(); break;
1285
    case 4: gen_helper_fsub_ST0_FT0(); break;
1286
    case 5: gen_helper_fsubr_ST0_FT0(); break;
1287
    case 6: gen_helper_fdiv_ST0_FT0(); break;
1288
    case 7: gen_helper_fdivr_ST0_FT0(); break;
1289
    }
1290
}
1291

    
1292
/* NOTE the exception in "r" op ordering */
1293
static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1294
{
1295
    TCGv_i32 tmp = tcg_const_i32(opreg);
1296
    switch (op) {
1297
    case 0: gen_helper_fadd_STN_ST0(tmp); break;
1298
    case 1: gen_helper_fmul_STN_ST0(tmp); break;
1299
    case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1300
    case 5: gen_helper_fsub_STN_ST0(tmp); break;
1301
    case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1302
    case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1303
    }
1304
}
1305

    
1306
/* if d == OR_TMP0, it means memory operand (address in A0) */
1307
static void gen_op(DisasContext *s1, int op, int ot, int d)
1308
{
1309
    if (d != OR_TMP0) {
1310
        gen_op_mov_TN_reg(ot, 0, d);
1311
    } else {
1312
        gen_op_ld_T0_A0(ot + s1->mem_index);
1313
    }
1314
    switch(op) {
1315
    case OP_ADCL:
1316
        if (s1->cc_op != CC_OP_DYNAMIC)
1317
            gen_op_set_cc_op(s1->cc_op);
1318
        gen_compute_eflags_c(cpu_tmp4);
1319
        tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1320
        tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1321
        if (d != OR_TMP0)
1322
            gen_op_mov_reg_T0(ot, d);
1323
        else
1324
            gen_op_st_T0_A0(ot + s1->mem_index);
1325
        tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1326
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1327
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1328
        tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1329
        tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1330
        s1->cc_op = CC_OP_DYNAMIC;
1331
        break;
1332
    case OP_SBBL:
1333
        if (s1->cc_op != CC_OP_DYNAMIC)
1334
            gen_op_set_cc_op(s1->cc_op);
1335
        gen_compute_eflags_c(cpu_tmp4);
1336
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1337
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1338
        if (d != OR_TMP0)
1339
            gen_op_mov_reg_T0(ot, d);
1340
        else
1341
            gen_op_st_T0_A0(ot + s1->mem_index);
1342
        tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1343
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1344
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1345
        tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1346
        tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1347
        s1->cc_op = CC_OP_DYNAMIC;
1348
        break;
1349
    case OP_ADDL:
1350
        gen_op_addl_T0_T1();
1351
        if (d != OR_TMP0)
1352
            gen_op_mov_reg_T0(ot, d);
1353
        else
1354
            gen_op_st_T0_A0(ot + s1->mem_index);
1355
        gen_op_update2_cc();
1356
        s1->cc_op = CC_OP_ADDB + ot;
1357
        break;
1358
    case OP_SUBL:
1359
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1360
        if (d != OR_TMP0)
1361
            gen_op_mov_reg_T0(ot, d);
1362
        else
1363
            gen_op_st_T0_A0(ot + s1->mem_index);
1364
        gen_op_update2_cc();
1365
        s1->cc_op = CC_OP_SUBB + ot;
1366
        break;
1367
    default:
1368
    case OP_ANDL:
1369
        tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1370
        if (d != OR_TMP0)
1371
            gen_op_mov_reg_T0(ot, d);
1372
        else
1373
            gen_op_st_T0_A0(ot + s1->mem_index);
1374
        gen_op_update1_cc();
1375
        s1->cc_op = CC_OP_LOGICB + ot;
1376
        break;
1377
    case OP_ORL:
1378
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1379
        if (d != OR_TMP0)
1380
            gen_op_mov_reg_T0(ot, d);
1381
        else
1382
            gen_op_st_T0_A0(ot + s1->mem_index);
1383
        gen_op_update1_cc();
1384
        s1->cc_op = CC_OP_LOGICB + ot;
1385
        break;
1386
    case OP_XORL:
1387
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1388
        if (d != OR_TMP0)
1389
            gen_op_mov_reg_T0(ot, d);
1390
        else
1391
            gen_op_st_T0_A0(ot + s1->mem_index);
1392
        gen_op_update1_cc();
1393
        s1->cc_op = CC_OP_LOGICB + ot;
1394
        break;
1395
    case OP_CMPL:
1396
        gen_op_cmpl_T0_T1_cc();
1397
        s1->cc_op = CC_OP_SUBB + ot;
1398
        break;
1399
    }
1400
}
1401

    
1402
/* if d == OR_TMP0, it means memory operand (address in A0) */
1403
static void gen_inc(DisasContext *s1, int ot, int d, int c)
1404
{
1405
    if (d != OR_TMP0)
1406
        gen_op_mov_TN_reg(ot, 0, d);
1407
    else
1408
        gen_op_ld_T0_A0(ot + s1->mem_index);
1409
    if (s1->cc_op != CC_OP_DYNAMIC)
1410
        gen_op_set_cc_op(s1->cc_op);
1411
    if (c > 0) {
1412
        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1413
        s1->cc_op = CC_OP_INCB + ot;
1414
    } else {
1415
        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1416
        s1->cc_op = CC_OP_DECB + ot;
1417
    }
1418
    if (d != OR_TMP0)
1419
        gen_op_mov_reg_T0(ot, d);
1420
    else
1421
        gen_op_st_T0_A0(ot + s1->mem_index);
1422
    gen_compute_eflags_c(cpu_cc_src);
1423
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1424
}
1425

    
1426
static void gen_shift_rm_T1(DisasContext *s, int ot, int op1, 
1427
                            int is_right, int is_arith)
1428
{
1429
    target_ulong mask;
1430
    int shift_label;
1431
    TCGv t0, t1;
1432

    
1433
    if (ot == OT_QUAD)
1434
        mask = 0x3f;
1435
    else
1436
        mask = 0x1f;
1437

    
1438
    /* load */
1439
    if (op1 == OR_TMP0)
1440
        gen_op_ld_T0_A0(ot + s->mem_index);
1441
    else
1442
        gen_op_mov_TN_reg(ot, 0, op1);
1443

    
1444
    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1445

    
1446
    tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1447

    
1448
    if (is_right) {
1449
        if (is_arith) {
1450
            gen_exts(ot, cpu_T[0]);
1451
            tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1452
            tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1453
        } else {
1454
            gen_extu(ot, cpu_T[0]);
1455
            tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1456
            tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1457
        }
1458
    } else {
1459
        tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1460
        tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1461
    }
1462

    
1463
    /* store */
1464
    if (op1 == OR_TMP0)
1465
        gen_op_st_T0_A0(ot + s->mem_index);
1466
    else
1467
        gen_op_mov_reg_T0(ot, op1);
1468
        
1469
    /* update eflags if non zero shift */
1470
    if (s->cc_op != CC_OP_DYNAMIC)
1471
        gen_op_set_cc_op(s->cc_op);
1472

    
1473
    /* XXX: inefficient */
1474
    t0 = tcg_temp_local_new();
1475
    t1 = tcg_temp_local_new();
1476

    
1477
    tcg_gen_mov_tl(t0, cpu_T[0]);
1478
    tcg_gen_mov_tl(t1, cpu_T3);
1479

    
1480
    shift_label = gen_new_label();
1481
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1482

    
1483
    tcg_gen_mov_tl(cpu_cc_src, t1);
1484
    tcg_gen_mov_tl(cpu_cc_dst, t0);
1485
    if (is_right)
1486
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1487
    else
1488
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1489
        
1490
    gen_set_label(shift_label);
1491
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1492

    
1493
    tcg_temp_free(t0);
1494
    tcg_temp_free(t1);
1495
}
1496

    
1497
static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1498
                            int is_right, int is_arith)
1499
{
1500
    int mask;
1501
    
1502
    if (ot == OT_QUAD)
1503
        mask = 0x3f;
1504
    else
1505
        mask = 0x1f;
1506

    
1507
    /* load */
1508
    if (op1 == OR_TMP0)
1509
        gen_op_ld_T0_A0(ot + s->mem_index);
1510
    else
1511
        gen_op_mov_TN_reg(ot, 0, op1);
1512

    
1513
    op2 &= mask;
1514
    if (op2 != 0) {
1515
        if (is_right) {
1516
            if (is_arith) {
1517
                gen_exts(ot, cpu_T[0]);
1518
                tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1519
                tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1520
            } else {
1521
                gen_extu(ot, cpu_T[0]);
1522
                tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1523
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1524
            }
1525
        } else {
1526
            tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1527
            tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1528
        }
1529
    }
1530

    
1531
    /* store */
1532
    if (op1 == OR_TMP0)
1533
        gen_op_st_T0_A0(ot + s->mem_index);
1534
    else
1535
        gen_op_mov_reg_T0(ot, op1);
1536
        
1537
    /* update eflags if non zero shift */
1538
    if (op2 != 0) {
1539
        tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1540
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1541
        if (is_right)
1542
            s->cc_op = CC_OP_SARB + ot;
1543
        else
1544
            s->cc_op = CC_OP_SHLB + ot;
1545
    }
1546
}
1547

    
1548
static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1549
{
1550
    if (arg2 >= 0)
1551
        tcg_gen_shli_tl(ret, arg1, arg2);
1552
    else
1553
        tcg_gen_shri_tl(ret, arg1, -arg2);
1554
}
1555

    
1556
static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, 
1557
                          int is_right)
1558
{
1559
    target_ulong mask;
1560
    int label1, label2, data_bits;
1561
    TCGv t0, t1, t2, a0;
1562

    
1563
    /* XXX: inefficient, but we must use local temps */
1564
    t0 = tcg_temp_local_new();
1565
    t1 = tcg_temp_local_new();
1566
    t2 = tcg_temp_local_new();
1567
    a0 = tcg_temp_local_new();
1568

    
1569
    if (ot == OT_QUAD)
1570
        mask = 0x3f;
1571
    else
1572
        mask = 0x1f;
1573

    
1574
    /* load */
1575
    if (op1 == OR_TMP0) {
1576
        tcg_gen_mov_tl(a0, cpu_A0);
1577
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1578
    } else {
1579
        gen_op_mov_v_reg(ot, t0, op1);
1580
    }
1581

    
1582
    tcg_gen_mov_tl(t1, cpu_T[1]);
1583

    
1584
    tcg_gen_andi_tl(t1, t1, mask);
1585

    
1586
    /* Must test zero case to avoid using undefined behaviour in TCG
1587
       shifts. */
1588
    label1 = gen_new_label();
1589
    tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1590
    
1591
    if (ot <= OT_WORD)
1592
        tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1593
    else
1594
        tcg_gen_mov_tl(cpu_tmp0, t1);
1595
    
1596
    gen_extu(ot, t0);
1597
    tcg_gen_mov_tl(t2, t0);
1598

    
1599
    data_bits = 8 << ot;
1600
    /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1601
       fix TCG definition) */
1602
    if (is_right) {
1603
        tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1604
        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1605
        tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1606
    } else {
1607
        tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1608
        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1609
        tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1610
    }
1611
    tcg_gen_or_tl(t0, t0, cpu_tmp4);
1612

    
1613
    gen_set_label(label1);
1614
    /* store */
1615
    if (op1 == OR_TMP0) {
1616
        gen_op_st_v(ot + s->mem_index, t0, a0);
1617
    } else {
1618
        gen_op_mov_reg_v(ot, op1, t0);
1619
    }
1620
    
1621
    /* update eflags */
1622
    if (s->cc_op != CC_OP_DYNAMIC)
1623
        gen_op_set_cc_op(s->cc_op);
1624

    
1625
    label2 = gen_new_label();
1626
    tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1627

    
1628
    gen_compute_eflags(cpu_cc_src);
1629
    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1630
    tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1631
    tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1632
    tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1633
    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1634
    if (is_right) {
1635
        tcg_gen_shri_tl(t0, t0, data_bits - 1);
1636
    }
1637
    tcg_gen_andi_tl(t0, t0, CC_C);
1638
    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1639
    
1640
    tcg_gen_discard_tl(cpu_cc_dst);
1641
    tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1642
        
1643
    gen_set_label(label2);
1644
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1645

    
1646
    tcg_temp_free(t0);
1647
    tcg_temp_free(t1);
1648
    tcg_temp_free(t2);
1649
    tcg_temp_free(a0);
1650
}
1651

    
1652
static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1653
                          int is_right)
1654
{
1655
    int mask;
1656
    int data_bits;
1657
    TCGv t0, t1, a0;
1658

    
1659
    /* XXX: inefficient, but we must use local temps */
1660
    t0 = tcg_temp_local_new();
1661
    t1 = tcg_temp_local_new();
1662
    a0 = tcg_temp_local_new();
1663

    
1664
    if (ot == OT_QUAD)
1665
        mask = 0x3f;
1666
    else
1667
        mask = 0x1f;
1668

    
1669
    /* load */
1670
    if (op1 == OR_TMP0) {
1671
        tcg_gen_mov_tl(a0, cpu_A0);
1672
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1673
    } else {
1674
        gen_op_mov_v_reg(ot, t0, op1);
1675
    }
1676

    
1677
    gen_extu(ot, t0);
1678
    tcg_gen_mov_tl(t1, t0);
1679

    
1680
    op2 &= mask;
1681
    data_bits = 8 << ot;
1682
    if (op2 != 0) {
1683
        int shift = op2 & ((1 << (3 + ot)) - 1);
1684
        if (is_right) {
1685
            tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1686
            tcg_gen_shli_tl(t0, t0, data_bits - shift);
1687
        }
1688
        else {
1689
            tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1690
            tcg_gen_shri_tl(t0, t0, data_bits - shift);
1691
        }
1692
        tcg_gen_or_tl(t0, t0, cpu_tmp4);
1693
    }
1694

    
1695
    /* store */
1696
    if (op1 == OR_TMP0) {
1697
        gen_op_st_v(ot + s->mem_index, t0, a0);
1698
    } else {
1699
        gen_op_mov_reg_v(ot, op1, t0);
1700
    }
1701

    
1702
    if (op2 != 0) {
1703
        /* update eflags */
1704
        if (s->cc_op != CC_OP_DYNAMIC)
1705
            gen_op_set_cc_op(s->cc_op);
1706

    
1707
        gen_compute_eflags(cpu_cc_src);
1708
        tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1709
        tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1710
        tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1711
        tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1712
        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1713
        if (is_right) {
1714
            tcg_gen_shri_tl(t0, t0, data_bits - 1);
1715
        }
1716
        tcg_gen_andi_tl(t0, t0, CC_C);
1717
        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1718

    
1719
        tcg_gen_discard_tl(cpu_cc_dst);
1720
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1721
        s->cc_op = CC_OP_EFLAGS;
1722
    }
1723

    
1724
    tcg_temp_free(t0);
1725
    tcg_temp_free(t1);
1726
    tcg_temp_free(a0);
1727
}
1728

    
1729
/* XXX: add faster immediate = 1 case */
1730
static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, 
1731
                           int is_right)
1732
{
1733
    int label1;
1734

    
1735
    if (s->cc_op != CC_OP_DYNAMIC)
1736
        gen_op_set_cc_op(s->cc_op);
1737

    
1738
    /* load */
1739
    if (op1 == OR_TMP0)
1740
        gen_op_ld_T0_A0(ot + s->mem_index);
1741
    else
1742
        gen_op_mov_TN_reg(ot, 0, op1);
1743
    
1744
    if (is_right) {
1745
        switch (ot) {
1746
        case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1747
        case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1748
        case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1749
#ifdef TARGET_X86_64
1750
        case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1751
#endif
1752
        }
1753
    } else {
1754
        switch (ot) {
1755
        case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1756
        case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1757
        case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1758
#ifdef TARGET_X86_64
1759
        case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1760
#endif
1761
        }
1762
    }
1763
    /* store */
1764
    if (op1 == OR_TMP0)
1765
        gen_op_st_T0_A0(ot + s->mem_index);
1766
    else
1767
        gen_op_mov_reg_T0(ot, op1);
1768

    
1769
    /* update eflags */
1770
    label1 = gen_new_label();
1771
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1772

    
1773
    tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1774
    tcg_gen_discard_tl(cpu_cc_dst);
1775
    tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1776
        
1777
    gen_set_label(label1);
1778
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1779
}
1780

    
1781
/* XXX: add faster immediate case */
1782
static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1, 
1783
                                int is_right)
1784
{
1785
    int label1, label2, data_bits;
1786
    target_ulong mask;
1787
    TCGv t0, t1, t2, a0;
1788

    
1789
    t0 = tcg_temp_local_new();
1790
    t1 = tcg_temp_local_new();
1791
    t2 = tcg_temp_local_new();
1792
    a0 = tcg_temp_local_new();
1793

    
1794
    if (ot == OT_QUAD)
1795
        mask = 0x3f;
1796
    else
1797
        mask = 0x1f;
1798

    
1799
    /* load */
1800
    if (op1 == OR_TMP0) {
1801
        tcg_gen_mov_tl(a0, cpu_A0);
1802
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1803
    } else {
1804
        gen_op_mov_v_reg(ot, t0, op1);
1805
    }
1806

    
1807
    tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1808

    
1809
    tcg_gen_mov_tl(t1, cpu_T[1]);
1810
    tcg_gen_mov_tl(t2, cpu_T3);
1811

    
1812
    /* Must test zero case to avoid using undefined behaviour in TCG
1813
       shifts. */
1814
    label1 = gen_new_label();
1815
    tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1816
    
1817
    tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1818
    if (ot == OT_WORD) {
1819
        /* Note: we implement the Intel behaviour for shift count > 16 */
1820
        if (is_right) {
1821
            tcg_gen_andi_tl(t0, t0, 0xffff);
1822
            tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1823
            tcg_gen_or_tl(t0, t0, cpu_tmp0);
1824
            tcg_gen_ext32u_tl(t0, t0);
1825

    
1826
            tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1827
            
1828
            /* only needed if count > 16, but a test would complicate */
1829
            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1830
            tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1831

    
1832
            tcg_gen_shr_tl(t0, t0, t2);
1833

    
1834
            tcg_gen_or_tl(t0, t0, cpu_tmp0);
1835
        } else {
1836
            /* XXX: not optimal */
1837
            tcg_gen_andi_tl(t0, t0, 0xffff);
1838
            tcg_gen_shli_tl(t1, t1, 16);
1839
            tcg_gen_or_tl(t1, t1, t0);
1840
            tcg_gen_ext32u_tl(t1, t1);
1841
            
1842
            tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1843
            tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1844
            tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1845
            tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1846

    
1847
            tcg_gen_shl_tl(t0, t0, t2);
1848
            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1849
            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1850
            tcg_gen_or_tl(t0, t0, t1);
1851
        }
1852
    } else {
1853
        data_bits = 8 << ot;
1854
        if (is_right) {
1855
            if (ot == OT_LONG)
1856
                tcg_gen_ext32u_tl(t0, t0);
1857

    
1858
            tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1859

    
1860
            tcg_gen_shr_tl(t0, t0, t2);
1861
            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1862
            tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1863
            tcg_gen_or_tl(t0, t0, t1);
1864
            
1865
        } else {
1866
            if (ot == OT_LONG)
1867
                tcg_gen_ext32u_tl(t1, t1);
1868

    
1869
            tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1870
            
1871
            tcg_gen_shl_tl(t0, t0, t2);
1872
            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1873
            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1874
            tcg_gen_or_tl(t0, t0, t1);
1875
        }
1876
    }
1877
    tcg_gen_mov_tl(t1, cpu_tmp4);
1878

    
1879
    gen_set_label(label1);
1880
    /* store */
1881
    if (op1 == OR_TMP0) {
1882
        gen_op_st_v(ot + s->mem_index, t0, a0);
1883
    } else {
1884
        gen_op_mov_reg_v(ot, op1, t0);
1885
    }
1886
    
1887
    /* update eflags */
1888
    if (s->cc_op != CC_OP_DYNAMIC)
1889
        gen_op_set_cc_op(s->cc_op);
1890

    
1891
    label2 = gen_new_label();
1892
    tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1893

    
1894
    tcg_gen_mov_tl(cpu_cc_src, t1);
1895
    tcg_gen_mov_tl(cpu_cc_dst, t0);
1896
    if (is_right) {
1897
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1898
    } else {
1899
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1900
    }
1901
    gen_set_label(label2);
1902
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1903

    
1904
    tcg_temp_free(t0);
1905
    tcg_temp_free(t1);
1906
    tcg_temp_free(t2);
1907
    tcg_temp_free(a0);
1908
}
1909

    
1910
static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1911
{
1912
    if (s != OR_TMP1)
1913
        gen_op_mov_TN_reg(ot, 1, s);
1914
    switch(op) {
1915
    case OP_ROL:
1916
        gen_rot_rm_T1(s1, ot, d, 0);
1917
        break;
1918
    case OP_ROR:
1919
        gen_rot_rm_T1(s1, ot, d, 1);
1920
        break;
1921
    case OP_SHL:
1922
    case OP_SHL1:
1923
        gen_shift_rm_T1(s1, ot, d, 0, 0);
1924
        break;
1925
    case OP_SHR:
1926
        gen_shift_rm_T1(s1, ot, d, 1, 0);
1927
        break;
1928
    case OP_SAR:
1929
        gen_shift_rm_T1(s1, ot, d, 1, 1);
1930
        break;
1931
    case OP_RCL:
1932
        gen_rotc_rm_T1(s1, ot, d, 0);
1933
        break;
1934
    case OP_RCR:
1935
        gen_rotc_rm_T1(s1, ot, d, 1);
1936
        break;
1937
    }
1938
}
1939

    
1940
static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1941
{
1942
    switch(op) {
1943
    case OP_ROL:
1944
        gen_rot_rm_im(s1, ot, d, c, 0);
1945
        break;
1946
    case OP_ROR:
1947
        gen_rot_rm_im(s1, ot, d, c, 1);
1948
        break;
1949
    case OP_SHL:
1950
    case OP_SHL1:
1951
        gen_shift_rm_im(s1, ot, d, c, 0, 0);
1952
        break;
1953
    case OP_SHR:
1954
        gen_shift_rm_im(s1, ot, d, c, 1, 0);
1955
        break;
1956
    case OP_SAR:
1957
        gen_shift_rm_im(s1, ot, d, c, 1, 1);
1958
        break;
1959
    default:
1960
        /* currently not optimized */
1961
        gen_op_movl_T1_im(c);
1962
        gen_shift(s1, op, ot, d, OR_TMP1);
1963
        break;
1964
    }
1965
}
1966

    
1967
static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1968
{
1969
    target_long disp;
1970
    int havesib;
1971
    int base;
1972
    int index;
1973
    int scale;
1974
    int opreg;
1975
    int mod, rm, code, override, must_add_seg;
1976

    
1977
    override = s->override;
1978
    must_add_seg = s->addseg;
1979
    if (override >= 0)
1980
        must_add_seg = 1;
1981
    mod = (modrm >> 6) & 3;
1982
    rm = modrm & 7;
1983

    
1984
    if (s->aflag) {
1985

    
1986
        havesib = 0;
1987
        base = rm;
1988
        index = 0;
1989
        scale = 0;
1990

    
1991
        if (base == 4) {
1992
            havesib = 1;
1993
            code = ldub_code(s->pc++);
1994
            scale = (code >> 6) & 3;
1995
            index = ((code >> 3) & 7) | REX_X(s);
1996
            base = (code & 7);
1997
        }
1998
        base |= REX_B(s);
1999

    
2000
        switch (mod) {
2001
        case 0:
2002
            if ((base & 7) == 5) {
2003
                base = -1;
2004
                disp = (int32_t)ldl_code(s->pc);
2005
                s->pc += 4;
2006
                if (CODE64(s) && !havesib) {
2007
                    disp += s->pc + s->rip_offset;
2008
                }
2009
            } else {
2010
                disp = 0;
2011
            }
2012
            break;
2013
        case 1:
2014
            disp = (int8_t)ldub_code(s->pc++);
2015
            break;
2016
        default:
2017
        case 2:
2018
            disp = ldl_code(s->pc);
2019
            s->pc += 4;
2020
            break;
2021
        }
2022

    
2023
        if (base >= 0) {
2024
            /* for correct popl handling with esp */
2025
            if (base == 4 && s->popl_esp_hack)
2026
                disp += s->popl_esp_hack;
2027
#ifdef TARGET_X86_64
2028
            if (s->aflag == 2) {
2029
                gen_op_movq_A0_reg(base);
2030
                if (disp != 0) {
2031
                    gen_op_addq_A0_im(disp);
2032
                }
2033
            } else
2034
#endif
2035
            {
2036
                gen_op_movl_A0_reg(base);
2037
                if (disp != 0)
2038
                    gen_op_addl_A0_im(disp);
2039
            }
2040
        } else {
2041
#ifdef TARGET_X86_64
2042
            if (s->aflag == 2) {
2043
                gen_op_movq_A0_im(disp);
2044
            } else
2045
#endif
2046
            {
2047
                gen_op_movl_A0_im(disp);
2048
            }
2049
        }
2050
        /* XXX: index == 4 is always invalid */
2051
        if (havesib && (index != 4 || scale != 0)) {
2052
#ifdef TARGET_X86_64
2053
            if (s->aflag == 2) {
2054
                gen_op_addq_A0_reg_sN(scale, index);
2055
            } else
2056
#endif
2057
            {
2058
                gen_op_addl_A0_reg_sN(scale, index);
2059
            }
2060
        }
2061
        if (must_add_seg) {
2062
            if (override < 0) {
2063
                if (base == R_EBP || base == R_ESP)
2064
                    override = R_SS;
2065
                else
2066
                    override = R_DS;
2067
            }
2068
#ifdef TARGET_X86_64
2069
            if (s->aflag == 2) {
2070
                gen_op_addq_A0_seg(override);
2071
            } else
2072
#endif
2073
            {
2074
                gen_op_addl_A0_seg(override);
2075
            }
2076
        }
2077
    } else {
2078
        switch (mod) {
2079
        case 0:
2080
            if (rm == 6) {
2081
                disp = lduw_code(s->pc);
2082
                s->pc += 2;
2083
                gen_op_movl_A0_im(disp);
2084
                rm = 0; /* avoid SS override */
2085
                goto no_rm;
2086
            } else {
2087
                disp = 0;
2088
            }
2089
            break;
2090
        case 1:
2091
            disp = (int8_t)ldub_code(s->pc++);
2092
            break;
2093
        default:
2094
        case 2:
2095
            disp = lduw_code(s->pc);
2096
            s->pc += 2;
2097
            break;
2098
        }
2099
        switch(rm) {
2100
        case 0:
2101
            gen_op_movl_A0_reg(R_EBX);
2102
            gen_op_addl_A0_reg_sN(0, R_ESI);
2103
            break;
2104
        case 1:
2105
            gen_op_movl_A0_reg(R_EBX);
2106
            gen_op_addl_A0_reg_sN(0, R_EDI);
2107
            break;
2108
        case 2:
2109
            gen_op_movl_A0_reg(R_EBP);
2110
            gen_op_addl_A0_reg_sN(0, R_ESI);
2111
            break;
2112
        case 3:
2113
            gen_op_movl_A0_reg(R_EBP);
2114
            gen_op_addl_A0_reg_sN(0, R_EDI);
2115
            break;
2116
        case 4:
2117
            gen_op_movl_A0_reg(R_ESI);
2118
            break;
2119
        case 5:
2120
            gen_op_movl_A0_reg(R_EDI);
2121
            break;
2122
        case 6:
2123
            gen_op_movl_A0_reg(R_EBP);
2124
            break;
2125
        default:
2126
        case 7:
2127
            gen_op_movl_A0_reg(R_EBX);
2128
            break;
2129
        }
2130
        if (disp != 0)
2131
            gen_op_addl_A0_im(disp);
2132
        gen_op_andl_A0_ffff();
2133
    no_rm:
2134
        if (must_add_seg) {
2135
            if (override < 0) {
2136
                if (rm == 2 || rm == 3 || rm == 6)
2137
                    override = R_SS;
2138
                else
2139
                    override = R_DS;
2140
            }
2141
            gen_op_addl_A0_seg(override);
2142
        }
2143
    }
2144

    
2145
    opreg = OR_A0;
2146
    disp = 0;
2147
    *reg_ptr = opreg;
2148
    *offset_ptr = disp;
2149
}
2150

    
2151
static void gen_nop_modrm(DisasContext *s, int modrm)
2152
{
2153
    int mod, rm, base, code;
2154

    
2155
    mod = (modrm >> 6) & 3;
2156
    if (mod == 3)
2157
        return;
2158
    rm = modrm & 7;
2159

    
2160
    if (s->aflag) {
2161

    
2162
        base = rm;
2163

    
2164
        if (base == 4) {
2165
            code = ldub_code(s->pc++);
2166
            base = (code & 7);
2167
        }
2168

    
2169
        switch (mod) {
2170
        case 0:
2171
            if (base == 5) {
2172
                s->pc += 4;
2173
            }
2174
            break;
2175
        case 1:
2176
            s->pc++;
2177
            break;
2178
        default:
2179
        case 2:
2180
            s->pc += 4;
2181
            break;
2182
        }
2183
    } else {
2184
        switch (mod) {
2185
        case 0:
2186
            if (rm == 6) {
2187
                s->pc += 2;
2188
            }
2189
            break;
2190
        case 1:
2191
            s->pc++;
2192
            break;
2193
        default:
2194
        case 2:
2195
            s->pc += 2;
2196
            break;
2197
        }
2198
    }
2199
}
2200

    
2201
/* used for LEA and MOV AX, mem */
2202
static void gen_add_A0_ds_seg(DisasContext *s)
2203
{
2204
    int override, must_add_seg;
2205
    must_add_seg = s->addseg;
2206
    override = R_DS;
2207
    if (s->override >= 0) {
2208
        override = s->override;
2209
        must_add_seg = 1;
2210
    } else {
2211
        override = R_DS;
2212
    }
2213
    if (must_add_seg) {
2214
#ifdef TARGET_X86_64
2215
        if (CODE64(s)) {
2216
            gen_op_addq_A0_seg(override);
2217
        } else
2218
#endif
2219
        {
2220
            gen_op_addl_A0_seg(override);
2221
        }
2222
    }
2223
}
2224

    
2225
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2226
   OR_TMP0 */
2227
static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2228
{
2229
    int mod, rm, opreg, disp;
2230

    
2231
    mod = (modrm >> 6) & 3;
2232
    rm = (modrm & 7) | REX_B(s);
2233
    if (mod == 3) {
2234
        if (is_store) {
2235
            if (reg != OR_TMP0)
2236
                gen_op_mov_TN_reg(ot, 0, reg);
2237
            gen_op_mov_reg_T0(ot, rm);
2238
        } else {
2239
            gen_op_mov_TN_reg(ot, 0, rm);
2240
            if (reg != OR_TMP0)
2241
                gen_op_mov_reg_T0(ot, reg);
2242
        }
2243
    } else {
2244
        gen_lea_modrm(s, modrm, &opreg, &disp);
2245
        if (is_store) {
2246
            if (reg != OR_TMP0)
2247
                gen_op_mov_TN_reg(ot, 0, reg);
2248
            gen_op_st_T0_A0(ot + s->mem_index);
2249
        } else {
2250
            gen_op_ld_T0_A0(ot + s->mem_index);
2251
            if (reg != OR_TMP0)
2252
                gen_op_mov_reg_T0(ot, reg);
2253
        }
2254
    }
2255
}
2256

    
2257
static inline uint32_t insn_get(DisasContext *s, int ot)
2258
{
2259
    uint32_t ret;
2260

    
2261
    switch(ot) {
2262
    case OT_BYTE:
2263
        ret = ldub_code(s->pc);
2264
        s->pc++;
2265
        break;
2266
    case OT_WORD:
2267
        ret = lduw_code(s->pc);
2268
        s->pc += 2;
2269
        break;
2270
    default:
2271
    case OT_LONG:
2272
        ret = ldl_code(s->pc);
2273
        s->pc += 4;
2274
        break;
2275
    }
2276
    return ret;
2277
}
2278

    
2279
static inline int insn_const_size(unsigned int ot)
2280
{
2281
    if (ot <= OT_LONG)
2282
        return 1 << ot;
2283
    else
2284
        return 4;
2285
}
2286

    
2287
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2288
{
2289
    TranslationBlock *tb;
2290
    target_ulong pc;
2291

    
2292
    pc = s->cs_base + eip;
2293
    tb = s->tb;
2294
    /* NOTE: we handle the case where the TB spans two pages here */
2295
    if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2296
        (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))  {
2297
        /* jump to same page: we can use a direct jump */
2298
        tcg_gen_goto_tb(tb_num);
2299
        gen_jmp_im(eip);
2300
        tcg_gen_exit_tb((long)tb + tb_num);
2301
    } else {
2302
        /* jump to another page: currently not optimized */
2303
        gen_jmp_im(eip);
2304
        gen_eob(s);
2305
    }
2306
}
2307

    
2308
static inline void gen_jcc(DisasContext *s, int b,
2309
                           target_ulong val, target_ulong next_eip)
2310
{
2311
    int l1, l2, cc_op;
2312

    
2313
    cc_op = s->cc_op;
2314
    if (s->cc_op != CC_OP_DYNAMIC) {
2315
        gen_op_set_cc_op(s->cc_op);
2316
        s->cc_op = CC_OP_DYNAMIC;
2317
    }
2318
    if (s->jmp_opt) {
2319
        l1 = gen_new_label();
2320
        gen_jcc1(s, cc_op, b, l1);
2321
        
2322
        gen_goto_tb(s, 0, next_eip);
2323

    
2324
        gen_set_label(l1);
2325
        gen_goto_tb(s, 1, val);
2326
        s->is_jmp = 3;
2327
    } else {
2328

    
2329
        l1 = gen_new_label();
2330
        l2 = gen_new_label();
2331
        gen_jcc1(s, cc_op, b, l1);
2332

    
2333
        gen_jmp_im(next_eip);
2334
        tcg_gen_br(l2);
2335

    
2336
        gen_set_label(l1);
2337
        gen_jmp_im(val);
2338
        gen_set_label(l2);
2339
        gen_eob(s);
2340
    }
2341
}
2342

    
2343
static void gen_setcc(DisasContext *s, int b)
2344
{
2345
    int inv, jcc_op, l1;
2346
    TCGv t0;
2347

    
2348
    if (is_fast_jcc_case(s, b)) {
2349
        /* nominal case: we use a jump */
2350
        /* XXX: make it faster by adding new instructions in TCG */
2351
        t0 = tcg_temp_local_new();
2352
        tcg_gen_movi_tl(t0, 0);
2353
        l1 = gen_new_label();
2354
        gen_jcc1(s, s->cc_op, b ^ 1, l1);
2355
        tcg_gen_movi_tl(t0, 1);
2356
        gen_set_label(l1);
2357
        tcg_gen_mov_tl(cpu_T[0], t0);
2358
        tcg_temp_free(t0);
2359
    } else {
2360
        /* slow case: it is more efficient not to generate a jump,
2361
           although it is questionnable whether this optimization is
2362
           worth to */
2363
        inv = b & 1;
2364
        jcc_op = (b >> 1) & 7;
2365
        gen_setcc_slow_T0(s, jcc_op);
2366
        if (inv) {
2367
            tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2368
        }
2369
    }
2370
}
2371

    
2372
static inline void gen_op_movl_T0_seg(int seg_reg)
2373
{
2374
    tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
2375
                     offsetof(CPUX86State,segs[seg_reg].selector));
2376
}
2377

    
2378
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2379
{
2380
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2381
    tcg_gen_st32_tl(cpu_T[0], cpu_env, 
2382
                    offsetof(CPUX86State,segs[seg_reg].selector));
2383
    tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2384
    tcg_gen_st_tl(cpu_T[0], cpu_env, 
2385
                  offsetof(CPUX86State,segs[seg_reg].base));
2386
}
2387

    
2388
/* move T0 to seg_reg and compute if the CPU state may change. Never
2389
   call this function with seg_reg == R_CS */
2390
static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2391
{
2392
    if (s->pe && !s->vm86) {
2393
        /* XXX: optimize by finding processor state dynamically */
2394
        if (s->cc_op != CC_OP_DYNAMIC)
2395
            gen_op_set_cc_op(s->cc_op);
2396
        gen_jmp_im(cur_eip);
2397
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2398
        gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2399
        /* abort translation because the addseg value may change or
2400
           because ss32 may change. For R_SS, translation must always
2401
           stop as a special handling must be done to disable hardware
2402
           interrupts for the next instruction */
2403
        if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2404
            s->is_jmp = 3;
2405
    } else {
2406
        gen_op_movl_seg_T0_vm(seg_reg);
2407
        if (seg_reg == R_SS)
2408
            s->is_jmp = 3;
2409
    }
2410
}
2411

    
2412
static inline int svm_is_rep(int prefixes)
2413
{
2414
    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2415
}
2416

    
2417
static inline void
2418
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2419
                              uint32_t type, uint64_t param)
2420
{
2421
    /* no SVM activated; fast case */
2422
    if (likely(!(s->flags & HF_SVMI_MASK)))
2423
        return;
2424
    if (s->cc_op != CC_OP_DYNAMIC)
2425
        gen_op_set_cc_op(s->cc_op);
2426
    gen_jmp_im(pc_start - s->cs_base);
2427
    gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2428
                                         tcg_const_i64(param));
2429
}
2430

    
2431
static inline void
2432
gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2433
{
2434
    gen_svm_check_intercept_param(s, pc_start, type, 0);
2435
}
2436

    
2437
static inline void gen_stack_update(DisasContext *s, int addend)
2438
{
2439
#ifdef TARGET_X86_64
2440
    if (CODE64(s)) {
2441
        gen_op_add_reg_im(2, R_ESP, addend);
2442
    } else
2443
#endif
2444
    if (s->ss32) {
2445
        gen_op_add_reg_im(1, R_ESP, addend);
2446
    } else {
2447
        gen_op_add_reg_im(0, R_ESP, addend);
2448
    }
2449
}
2450

    
2451
/* generate a push. It depends on ss32, addseg and dflag */
2452
static void gen_push_T0(DisasContext *s)
2453
{
2454
#ifdef TARGET_X86_64
2455
    if (CODE64(s)) {
2456
        gen_op_movq_A0_reg(R_ESP);
2457
        if (s->dflag) {
2458
            gen_op_addq_A0_im(-8);
2459
            gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2460
        } else {
2461
            gen_op_addq_A0_im(-2);
2462
            gen_op_st_T0_A0(OT_WORD + s->mem_index);
2463
        }
2464
        gen_op_mov_reg_A0(2, R_ESP);
2465
    } else
2466
#endif
2467
    {
2468
        gen_op_movl_A0_reg(R_ESP);
2469
        if (!s->dflag)
2470
            gen_op_addl_A0_im(-2);
2471
        else
2472
            gen_op_addl_A0_im(-4);
2473
        if (s->ss32) {
2474
            if (s->addseg) {
2475
                tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2476
                gen_op_addl_A0_seg(R_SS);
2477
            }
2478
        } else {
2479
            gen_op_andl_A0_ffff();
2480
            tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2481
            gen_op_addl_A0_seg(R_SS);
2482
        }
2483
        gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2484
        if (s->ss32 && !s->addseg)
2485
            gen_op_mov_reg_A0(1, R_ESP);
2486
        else
2487
            gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2488
    }
2489
}
2490

    
2491
/* generate a push. It depends on ss32, addseg and dflag */
2492
/* slower version for T1, only used for call Ev */
2493
static void gen_push_T1(DisasContext *s)
2494
{
2495
#ifdef TARGET_X86_64
2496
    if (CODE64(s)) {
2497
        gen_op_movq_A0_reg(R_ESP);
2498
        if (s->dflag) {
2499
            gen_op_addq_A0_im(-8);
2500
            gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2501
        } else {
2502
            gen_op_addq_A0_im(-2);
2503
            gen_op_st_T0_A0(OT_WORD + s->mem_index);
2504
        }
2505
        gen_op_mov_reg_A0(2, R_ESP);
2506
    } else
2507
#endif
2508
    {
2509
        gen_op_movl_A0_reg(R_ESP);
2510
        if (!s->dflag)
2511
            gen_op_addl_A0_im(-2);
2512
        else
2513
            gen_op_addl_A0_im(-4);
2514
        if (s->ss32) {
2515
            if (s->addseg) {
2516
                gen_op_addl_A0_seg(R_SS);
2517
            }
2518
        } else {
2519
            gen_op_andl_A0_ffff();
2520
            gen_op_addl_A0_seg(R_SS);
2521
        }
2522
        gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2523

    
2524
        if (s->ss32 && !s->addseg)
2525
            gen_op_mov_reg_A0(1, R_ESP);
2526
        else
2527
            gen_stack_update(s, (-2) << s->dflag);
2528
    }
2529
}
2530

    
2531
/* two step pop is necessary for precise exceptions */
2532
static void gen_pop_T0(DisasContext *s)
2533
{
2534
#ifdef TARGET_X86_64
2535
    if (CODE64(s)) {
2536
        gen_op_movq_A0_reg(R_ESP);
2537
        gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2538
    } else
2539
#endif
2540
    {
2541
        gen_op_movl_A0_reg(R_ESP);
2542
        if (s->ss32) {
2543
            if (s->addseg)
2544
                gen_op_addl_A0_seg(R_SS);
2545
        } else {
2546
            gen_op_andl_A0_ffff();
2547
            gen_op_addl_A0_seg(R_SS);
2548
        }
2549
        gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2550
    }
2551
}
2552

    
2553
static void gen_pop_update(DisasContext *s)
2554
{
2555
#ifdef TARGET_X86_64
2556
    if (CODE64(s) && s->dflag) {
2557
        gen_stack_update(s, 8);
2558
    } else
2559
#endif
2560
    {
2561
        gen_stack_update(s, 2 << s->dflag);
2562
    }
2563
}
2564

    
2565
static void gen_stack_A0(DisasContext *s)
2566
{
2567
    gen_op_movl_A0_reg(R_ESP);
2568
    if (!s->ss32)
2569
        gen_op_andl_A0_ffff();
2570
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2571
    if (s->addseg)
2572
        gen_op_addl_A0_seg(R_SS);
2573
}
2574

    
2575
/* NOTE: wrap around in 16 bit not fully handled */
2576
static void gen_pusha(DisasContext *s)
2577
{
2578
    int i;
2579
    gen_op_movl_A0_reg(R_ESP);
2580
    gen_op_addl_A0_im(-16 <<  s->dflag);
2581
    if (!s->ss32)
2582
        gen_op_andl_A0_ffff();
2583
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2584
    if (s->addseg)
2585
        gen_op_addl_A0_seg(R_SS);
2586
    for(i = 0;i < 8; i++) {
2587
        gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2588
        gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2589
        gen_op_addl_A0_im(2 <<  s->dflag);
2590
    }
2591
    gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2592
}
2593

    
2594
/* NOTE: wrap around in 16 bit not fully handled */
2595
static void gen_popa(DisasContext *s)
2596
{
2597
    int i;
2598
    gen_op_movl_A0_reg(R_ESP);
2599
    if (!s->ss32)
2600
        gen_op_andl_A0_ffff();
2601
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2602
    tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 <<  s->dflag);
2603
    if (s->addseg)
2604
        gen_op_addl_A0_seg(R_SS);
2605
    for(i = 0;i < 8; i++) {
2606
        /* ESP is not reloaded */
2607
        if (i != 3) {
2608
            gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2609
            gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2610
        }
2611
        gen_op_addl_A0_im(2 <<  s->dflag);
2612
    }
2613
    gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2614
}
2615

    
2616
static void gen_enter(DisasContext *s, int esp_addend, int level)
2617
{
2618
    int ot, opsize;
2619

    
2620
    level &= 0x1f;
2621
#ifdef TARGET_X86_64
2622
    if (CODE64(s)) {
2623
        ot = s->dflag ? OT_QUAD : OT_WORD;
2624
        opsize = 1 << ot;
2625

    
2626
        gen_op_movl_A0_reg(R_ESP);
2627
        gen_op_addq_A0_im(-opsize);
2628
        tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2629

    
2630
        /* push bp */
2631
        gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2632
        gen_op_st_T0_A0(ot + s->mem_index);
2633
        if (level) {
2634
            /* XXX: must save state */
2635
            gen_helper_enter64_level(tcg_const_i32(level),
2636
                                     tcg_const_i32((ot == OT_QUAD)),
2637
                                     cpu_T[1]);
2638
        }
2639
        gen_op_mov_reg_T1(ot, R_EBP);
2640
        tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2641
        gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2642
    } else
2643
#endif
2644
    {
2645
        ot = s->dflag + OT_WORD;
2646
        opsize = 2 << s->dflag;
2647

    
2648
        gen_op_movl_A0_reg(R_ESP);
2649
        gen_op_addl_A0_im(-opsize);
2650
        if (!s->ss32)
2651
            gen_op_andl_A0_ffff();
2652
        tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2653
        if (s->addseg)
2654
            gen_op_addl_A0_seg(R_SS);
2655
        /* push bp */
2656
        gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2657
        gen_op_st_T0_A0(ot + s->mem_index);
2658
        if (level) {
2659
            /* XXX: must save state */
2660
            gen_helper_enter_level(tcg_const_i32(level),
2661
                                   tcg_const_i32(s->dflag),
2662
                                   cpu_T[1]);
2663
        }
2664
        gen_op_mov_reg_T1(ot, R_EBP);
2665
        tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2666
        gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2667
    }
2668
}
2669

    
2670
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2671
{
2672
    if (s->cc_op != CC_OP_DYNAMIC)
2673
        gen_op_set_cc_op(s->cc_op);
2674
    gen_jmp_im(cur_eip);
2675
    gen_helper_raise_exception(tcg_const_i32(trapno));
2676
    s->is_jmp = 3;
2677
}
2678

    
2679
/* an interrupt is different from an exception because of the
2680
   privilege checks */
2681
static void gen_interrupt(DisasContext *s, int intno,
2682
                          target_ulong cur_eip, target_ulong next_eip)
2683
{
2684
    if (s->cc_op != CC_OP_DYNAMIC)
2685
        gen_op_set_cc_op(s->cc_op);
2686
    gen_jmp_im(cur_eip);
2687
    gen_helper_raise_interrupt(tcg_const_i32(intno), 
2688
                               tcg_const_i32(next_eip - cur_eip));
2689
    s->is_jmp = 3;
2690
}
2691

    
2692
static void gen_debug(DisasContext *s, target_ulong cur_eip)
2693
{
2694
    if (s->cc_op != CC_OP_DYNAMIC)
2695
        gen_op_set_cc_op(s->cc_op);
2696
    gen_jmp_im(cur_eip);
2697
    gen_helper_debug();
2698
    s->is_jmp = 3;
2699
}
2700

    
2701
/* generate a generic end of block. Trace exception is also generated
2702
   if needed */
2703
static void gen_eob(DisasContext *s)
2704
{
2705
    if (s->cc_op != CC_OP_DYNAMIC)
2706
        gen_op_set_cc_op(s->cc_op);
2707
    if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2708
        gen_helper_reset_inhibit_irq();
2709
    }
2710
    if (s->tb->flags & HF_RF_MASK) {
2711
        gen_helper_reset_rf();
2712
    }
2713
    if (s->singlestep_enabled) {
2714
        gen_helper_debug();
2715
    } else if (s->tf) {
2716
        gen_helper_single_step();
2717
    } else {
2718
        tcg_gen_exit_tb(0);
2719
    }
2720
    s->is_jmp = 3;
2721
}
2722

    
2723
/* generate a jump to eip. No segment change must happen before as a
2724
   direct call to the next block may occur */
2725
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2726
{
2727
    if (s->jmp_opt) {
2728
        if (s->cc_op != CC_OP_DYNAMIC) {
2729
            gen_op_set_cc_op(s->cc_op);
2730
            s->cc_op = CC_OP_DYNAMIC;
2731
        }
2732
        gen_goto_tb(s, tb_num, eip);
2733
        s->is_jmp = 3;
2734
    } else {
2735
        gen_jmp_im(eip);
2736
        gen_eob(s);
2737
    }
2738
}
2739

    
2740
static void gen_jmp(DisasContext *s, target_ulong eip)
2741
{
2742
    gen_jmp_tb(s, eip, 0);
2743
}
2744

    
2745
static inline void gen_ldq_env_A0(int idx, int offset)
2746
{
2747
    int mem_index = (idx >> 2) - 1;
2748
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2749
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2750
}
2751

    
2752
static inline void gen_stq_env_A0(int idx, int offset)
2753
{
2754
    int mem_index = (idx >> 2) - 1;
2755
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2756
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2757
}
2758

    
2759
static inline void gen_ldo_env_A0(int idx, int offset)
2760
{
2761
    int mem_index = (idx >> 2) - 1;
2762
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2763
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2764
    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2765
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2766
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2767
}
2768

    
2769
static inline void gen_sto_env_A0(int idx, int offset)
2770
{
2771
    int mem_index = (idx >> 2) - 1;
2772
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2773
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2774
    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2775
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2776
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2777
}
2778

    
2779
static inline void gen_op_movo(int d_offset, int s_offset)
2780
{
2781
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2782
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2783
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2784
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2785
}
2786

    
2787
static inline void gen_op_movq(int d_offset, int s_offset)
2788
{
2789
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2790
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2791
}
2792

    
2793
static inline void gen_op_movl(int d_offset, int s_offset)
2794
{
2795
    tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2796
    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2797
}
2798

    
2799
static inline void gen_op_movq_env_0(int d_offset)
2800
{
2801
    tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2802
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2803
}
2804

    
2805
#define SSE_SPECIAL ((void *)1)
2806
#define SSE_DUMMY ((void *)2)
2807

    
2808
#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2809
#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2810
                     gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2811

    
2812
static void *sse_op_table1[256][4] = {
2813
    /* 3DNow! extensions */
2814
    [0x0e] = { SSE_DUMMY }, /* femms */
2815
    [0x0f] = { SSE_DUMMY }, /* pf... */
2816
    /* pure SSE operations */
2817
    [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2818
    [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2819
    [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2820
    [0x13] = { SSE_SPECIAL, SSE_SPECIAL },  /* movlps, movlpd */
2821
    [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2822
    [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2823
    [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd, movshdup */
2824
    [0x17] = { SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd */
2825

    
2826
    [0x28] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
2827
    [0x29] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
2828
    [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2829
    [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2830
    [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2831
    [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2832
    [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2833
    [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2834
    [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2835
    [0x51] = SSE_FOP(sqrt),
2836
    [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2837
    [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2838
    [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2839
    [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2840
    [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2841
    [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2842
    [0x58] = SSE_FOP(add),
2843
    [0x59] = SSE_FOP(mul),
2844
    [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2845
               gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2846
    [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2847
    [0x5c] = SSE_FOP(sub),
2848
    [0x5d] = SSE_FOP(min),
2849
    [0x5e] = SSE_FOP(div),
2850
    [0x5f] = SSE_FOP(max),
2851

    
2852
    [0xc2] = SSE_FOP(cmpeq),
2853
    [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2854

    
2855
    [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2856
    [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2857

    
2858
    /* MMX ops and their SSE extensions */
2859
    [0x60] = MMX_OP2(punpcklbw),
2860
    [0x61] = MMX_OP2(punpcklwd),
2861
    [0x62] = MMX_OP2(punpckldq),
2862
    [0x63] = MMX_OP2(packsswb),
2863
    [0x64] = MMX_OP2(pcmpgtb),
2864
    [0x65] = MMX_OP2(pcmpgtw),
2865
    [0x66] = MMX_OP2(pcmpgtl),
2866
    [0x67] = MMX_OP2(packuswb),
2867
    [0x68] = MMX_OP2(punpckhbw),
2868
    [0x69] = MMX_OP2(punpckhwd),
2869
    [0x6a] = MMX_OP2(punpckhdq),
2870
    [0x6b] = MMX_OP2(packssdw),
2871
    [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2872
    [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2873
    [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2874
    [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2875
    [0x70] = { gen_helper_pshufw_mmx,
2876
               gen_helper_pshufd_xmm,
2877
               gen_helper_pshufhw_xmm,
2878
               gen_helper_pshuflw_xmm },
2879
    [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2880
    [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2881
    [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2882
    [0x74] = MMX_OP2(pcmpeqb),
2883
    [0x75] = MMX_OP2(pcmpeqw),
2884
    [0x76] = MMX_OP2(pcmpeql),
2885
    [0x77] = { SSE_DUMMY }, /* emms */
2886
    [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2887
    [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2888
    [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2889
    [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2890
    [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2891
    [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2892
    [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2893
    [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2894
    [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2895
    [0xd1] = MMX_OP2(psrlw),
2896
    [0xd2] = MMX_OP2(psrld),
2897
    [0xd3] = MMX_OP2(psrlq),
2898
    [0xd4] = MMX_OP2(paddq),
2899
    [0xd5] = MMX_OP2(pmullw),
2900
    [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2901
    [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2902
    [0xd8] = MMX_OP2(psubusb),
2903
    [0xd9] = MMX_OP2(psubusw),
2904
    [0xda] = MMX_OP2(pminub),
2905
    [0xdb] = MMX_OP2(pand),
2906
    [0xdc] = MMX_OP2(paddusb),
2907
    [0xdd] = MMX_OP2(paddusw),
2908
    [0xde] = MMX_OP2(pmaxub),
2909
    [0xdf] = MMX_OP2(pandn),
2910
    [0xe0] = MMX_OP2(pavgb),
2911
    [0xe1] = MMX_OP2(psraw),
2912
    [0xe2] = MMX_OP2(psrad),
2913
    [0xe3] = MMX_OP2(pavgw),
2914
    [0xe4] = MMX_OP2(pmulhuw),
2915
    [0xe5] = MMX_OP2(pmulhw),
2916
    [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2917
    [0xe7] = { SSE_SPECIAL , SSE_SPECIAL },  /* movntq, movntq */
2918
    [0xe8] = MMX_OP2(psubsb),
2919
    [0xe9] = MMX_OP2(psubsw),
2920
    [0xea] = MMX_OP2(pminsw),
2921
    [0xeb] = MMX_OP2(por),
2922
    [0xec] = MMX_OP2(paddsb),
2923
    [0xed] = MMX_OP2(paddsw),
2924
    [0xee] = MMX_OP2(pmaxsw),
2925
    [0xef] = MMX_OP2(pxor),
2926
    [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2927
    [0xf1] = MMX_OP2(psllw),
2928
    [0xf2] = MMX_OP2(pslld),
2929
    [0xf3] = MMX_OP2(psllq),
2930
    [0xf4] = MMX_OP2(pmuludq),
2931
    [0xf5] = MMX_OP2(pmaddwd),
2932
    [0xf6] = MMX_OP2(psadbw),
2933
    [0xf7] = MMX_OP2(maskmov),
2934
    [0xf8] = MMX_OP2(psubb),
2935
    [0xf9] = MMX_OP2(psubw),
2936
    [0xfa] = MMX_OP2(psubl),
2937
    [0xfb] = MMX_OP2(psubq),
2938
    [0xfc] = MMX_OP2(paddb),
2939
    [0xfd] = MMX_OP2(paddw),
2940
    [0xfe] = MMX_OP2(paddl),
2941
};
2942

    
2943
static void *sse_op_table2[3 * 8][2] = {
2944
    [0 + 2] = MMX_OP2(psrlw),
2945
    [0 + 4] = MMX_OP2(psraw),
2946
    [0 + 6] = MMX_OP2(psllw),
2947
    [8 + 2] = MMX_OP2(psrld),
2948
    [8 + 4] = MMX_OP2(psrad),
2949
    [8 + 6] = MMX_OP2(pslld),
2950
    [16 + 2] = MMX_OP2(psrlq),
2951
    [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2952
    [16 + 6] = MMX_OP2(psllq),
2953
    [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2954
};
2955

    
2956
static void *sse_op_table3[4 * 3] = {
2957
    gen_helper_cvtsi2ss,
2958
    gen_helper_cvtsi2sd,
2959
    X86_64_ONLY(gen_helper_cvtsq2ss),
2960
    X86_64_ONLY(gen_helper_cvtsq2sd),
2961

    
2962
    gen_helper_cvttss2si,
2963
    gen_helper_cvttsd2si,
2964
    X86_64_ONLY(gen_helper_cvttss2sq),
2965
    X86_64_ONLY(gen_helper_cvttsd2sq),
2966

    
2967
    gen_helper_cvtss2si,
2968
    gen_helper_cvtsd2si,
2969
    X86_64_ONLY(gen_helper_cvtss2sq),
2970
    X86_64_ONLY(gen_helper_cvtsd2sq),
2971
};
2972

    
2973
static void *sse_op_table4[8][4] = {
2974
    SSE_FOP(cmpeq),
2975
    SSE_FOP(cmplt),
2976
    SSE_FOP(cmple),
2977
    SSE_FOP(cmpunord),
2978
    SSE_FOP(cmpneq),
2979
    SSE_FOP(cmpnlt),
2980
    SSE_FOP(cmpnle),
2981
    SSE_FOP(cmpord),
2982
};
2983

    
2984
static void *sse_op_table5[256] = {
2985
    [0x0c] = gen_helper_pi2fw,
2986
    [0x0d] = gen_helper_pi2fd,
2987
    [0x1c] = gen_helper_pf2iw,
2988
    [0x1d] = gen_helper_pf2id,
2989
    [0x8a] = gen_helper_pfnacc,
2990
    [0x8e] = gen_helper_pfpnacc,
2991
    [0x90] = gen_helper_pfcmpge,
2992
    [0x94] = gen_helper_pfmin,
2993
    [0x96] = gen_helper_pfrcp,
2994
    [0x97] = gen_helper_pfrsqrt,
2995
    [0x9a] = gen_helper_pfsub,
2996
    [0x9e] = gen_helper_pfadd,
2997
    [0xa0] = gen_helper_pfcmpgt,
2998
    [0xa4] = gen_helper_pfmax,
2999
    [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3000
    [0xa7] = gen_helper_movq, /* pfrsqit1 */
3001
    [0xaa] = gen_helper_pfsubr,
3002
    [0xae] = gen_helper_pfacc,
3003
    [0xb0] = gen_helper_pfcmpeq,
3004
    [0xb4] = gen_helper_pfmul,
3005
    [0xb6] = gen_helper_movq, /* pfrcpit2 */
3006
    [0xb7] = gen_helper_pmulhrw_mmx,
3007
    [0xbb] = gen_helper_pswapd,
3008
    [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3009
};
3010

    
3011
struct sse_op_helper_s {
3012
    void *op[2]; uint32_t ext_mask;
3013
};
3014
#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3015
#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3016
#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3017
#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3018
static struct sse_op_helper_s sse_op_table6[256] = {
3019
    [0x00] = SSSE3_OP(pshufb),
3020
    [0x01] = SSSE3_OP(phaddw),
3021
    [0x02] = SSSE3_OP(phaddd),
3022
    [0x03] = SSSE3_OP(phaddsw),
3023
    [0x04] = SSSE3_OP(pmaddubsw),
3024
    [0x05] = SSSE3_OP(phsubw),
3025
    [0x06] = SSSE3_OP(phsubd),
3026
    [0x07] = SSSE3_OP(phsubsw),
3027
    [0x08] = SSSE3_OP(psignb),
3028
    [0x09] = SSSE3_OP(psignw),
3029
    [0x0a] = SSSE3_OP(psignd),
3030
    [0x0b] = SSSE3_OP(pmulhrsw),
3031
    [0x10] = SSE41_OP(pblendvb),
3032
    [0x14] = SSE41_OP(blendvps),
3033
    [0x15] = SSE41_OP(blendvpd),
3034
    [0x17] = SSE41_OP(ptest),
3035
    [0x1c] = SSSE3_OP(pabsb),
3036
    [0x1d] = SSSE3_OP(pabsw),
3037
    [0x1e] = SSSE3_OP(pabsd),
3038
    [0x20] = SSE41_OP(pmovsxbw),
3039
    [0x21] = SSE41_OP(pmovsxbd),
3040
    [0x22] = SSE41_OP(pmovsxbq),
3041
    [0x23] = SSE41_OP(pmovsxwd),
3042
    [0x24] = SSE41_OP(pmovsxwq),
3043
    [0x25] = SSE41_OP(pmovsxdq),
3044
    [0x28] = SSE41_OP(pmuldq),
3045
    [0x29] = SSE41_OP(pcmpeqq),
3046
    [0x2a] = SSE41_SPECIAL, /* movntqda */
3047
    [0x2b] = SSE41_OP(packusdw),
3048
    [0x30] = SSE41_OP(pmovzxbw),
3049
    [0x31] = SSE41_OP(pmovzxbd),
3050
    [0x32] = SSE41_OP(pmovzxbq),
3051
    [0x33] = SSE41_OP(pmovzxwd),
3052
    [0x34] = SSE41_OP(pmovzxwq),
3053
    [0x35] = SSE41_OP(pmovzxdq),
3054
    [0x37] = SSE42_OP(pcmpgtq),
3055
    [0x38] = SSE41_OP(pminsb),
3056
    [0x39] = SSE41_OP(pminsd),
3057
    [0x3a] = SSE41_OP(pminuw),
3058
    [0x3b] = SSE41_OP(pminud),
3059
    [0x3c] = SSE41_OP(pmaxsb),
3060
    [0x3d] = SSE41_OP(pmaxsd),
3061
    [0x3e] = SSE41_OP(pmaxuw),
3062
    [0x3f] = SSE41_OP(pmaxud),
3063
    [0x40] = SSE41_OP(pmulld),
3064
    [0x41] = SSE41_OP(phminposuw),
3065
};
3066

    
3067
static struct sse_op_helper_s sse_op_table7[256] = {
3068
    [0x08] = SSE41_OP(roundps),
3069
    [0x09] = SSE41_OP(roundpd),
3070
    [0x0a] = SSE41_OP(roundss),
3071
    [0x0b] = SSE41_OP(roundsd),
3072
    [0x0c] = SSE41_OP(blendps),
3073
    [0x0d] = SSE41_OP(blendpd),
3074
    [0x0e] = SSE41_OP(pblendw),
3075
    [0x0f] = SSSE3_OP(palignr),
3076
    [0x14] = SSE41_SPECIAL, /* pextrb */
3077
    [0x15] = SSE41_SPECIAL, /* pextrw */
3078
    [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3079
    [0x17] = SSE41_SPECIAL, /* extractps */
3080
    [0x20] = SSE41_SPECIAL, /* pinsrb */
3081
    [0x21] = SSE41_SPECIAL, /* insertps */
3082
    [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3083
    [0x40] = SSE41_OP(dpps),
3084
    [0x41] = SSE41_OP(dppd),
3085
    [0x42] = SSE41_OP(mpsadbw),
3086
    [0x60] = SSE42_OP(pcmpestrm),
3087
    [0x61] = SSE42_OP(pcmpestri),
3088
    [0x62] = SSE42_OP(pcmpistrm),
3089
    [0x63] = SSE42_OP(pcmpistri),
3090
};
3091

    
3092
static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3093
{
3094
    int b1, op1_offset, op2_offset, is_xmm, val, ot;
3095
    int modrm, mod, rm, reg, reg_addr, offset_addr;
3096
    void *sse_op2;
3097

    
3098
    b &= 0xff;
3099
    if (s->prefix & PREFIX_DATA)
3100
        b1 = 1;
3101
    else if (s->prefix & PREFIX_REPZ)
3102
        b1 = 2;
3103
    else if (s->prefix & PREFIX_REPNZ)
3104
        b1 = 3;
3105
    else
3106
        b1 = 0;
3107
    sse_op2 = sse_op_table1[b][b1];
3108
    if (!sse_op2)
3109
        goto illegal_op;
3110
    if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3111
        is_xmm = 1;
3112
    } else {
3113
        if (b1 == 0) {
3114
            /* MMX case */
3115
            is_xmm = 0;
3116
        } else {
3117
            is_xmm = 1;
3118
        }
3119
    }
3120
    /* simple MMX/SSE operation */
3121
    if (s->flags & HF_TS_MASK) {
3122
        gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3123
        return;
3124
    }
3125
    if (s->flags & HF_EM_MASK) {
3126
    illegal_op:
3127
        gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3128
        return;
3129
    }
3130
    if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3131
        if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3132
            goto illegal_op;
3133
    if (b == 0x0e) {
3134
        if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3135
            goto illegal_op;
3136
        /* femms */
3137
        gen_helper_emms();
3138
        return;
3139
    }
3140
    if (b == 0x77) {
3141
        /* emms */
3142
        gen_helper_emms();
3143
        return;
3144
    }
3145
    /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3146
       the static cpu state) */
3147
    if (!is_xmm) {
3148
        gen_helper_enter_mmx();
3149
    }
3150

    
3151
    modrm = ldub_code(s->pc++);
3152
    reg = ((modrm >> 3) & 7);
3153
    if (is_xmm)
3154
        reg |= rex_r;
3155
    mod = (modrm >> 6) & 3;
3156
    if (sse_op2 == SSE_SPECIAL) {
3157
        b |= (b1 << 8);
3158
        switch(b) {
3159
        case 0x0e7: /* movntq */
3160
            if (mod == 3)
3161
                goto illegal_op;
3162
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3163
            gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3164
            break;
3165
        case 0x1e7: /* movntdq */
3166
        case 0x02b: /* movntps */
3167
        case 0x12b: /* movntps */
3168
        case 0x3f0: /* lddqu */
3169
            if (mod == 3)
3170
                goto illegal_op;
3171
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3172
            gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3173
            break;
3174
        case 0x22b: /* movntss */
3175
        case 0x32b: /* movntsd */
3176
            if (mod == 3)
3177
                goto illegal_op;
3178
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3179
            if (b1 & 1) {
3180
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3181
                    xmm_regs[reg]));
3182
            } else {
3183
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3184
                    xmm_regs[reg].XMM_L(0)));
3185
                gen_op_st_T0_A0(OT_LONG + s->mem_index);
3186
            }
3187
            break;
3188
        case 0x6e: /* movd mm, ea */
3189
#ifdef TARGET_X86_64
3190
            if (s->dflag == 2) {
3191
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3192
                tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3193
            } else
3194
#endif
3195
            {
3196
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3197
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3198
                                 offsetof(CPUX86State,fpregs[reg].mmx));
3199
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3200
                gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3201
            }
3202
            break;
3203
        case 0x16e: /* movd xmm, ea */
3204
#ifdef TARGET_X86_64
3205
            if (s->dflag == 2) {
3206
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3207
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3208
                                 offsetof(CPUX86State,xmm_regs[reg]));
3209
                gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3210
            } else
3211
#endif
3212
            {
3213
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3214
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3215
                                 offsetof(CPUX86State,xmm_regs[reg]));
3216
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3217
                gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3218
            }
3219
            break;
3220
        case 0x6f: /* movq mm, ea */
3221
            if (mod != 3) {
3222
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3223
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3224
            } else {
3225
                rm = (modrm & 7);
3226
                tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3227
                               offsetof(CPUX86State,fpregs[rm].mmx));
3228
                tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3229
                               offsetof(CPUX86State,fpregs[reg].mmx));
3230
            }
3231
            break;
3232
        case 0x010: /* movups */
3233
        case 0x110: /* movupd */
3234
        case 0x028: /* movaps */
3235
        case 0x128: /* movapd */
3236
        case 0x16f: /* movdqa xmm, ea */
3237
        case 0x26f: /* movdqu xmm, ea */
3238
            if (mod != 3) {
3239
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3240
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3241
            } else {
3242
                rm = (modrm & 7) | REX_B(s);
3243
                gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3244
                            offsetof(CPUX86State,xmm_regs[rm]));
3245
            }
3246
            break;
3247
        case 0x210: /* movss xmm, ea */
3248
            if (mod != 3) {
3249
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3250
                gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3251
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3252
                gen_op_movl_T0_0();
3253
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3254
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3255
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3256
            } else {
3257
                rm = (modrm & 7) | REX_B(s);
3258
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3259
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3260
            }
3261
            break;
3262
        case 0x310: /* movsd xmm, ea */
3263
            if (mod != 3) {
3264
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3265
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3266
                gen_op_movl_T0_0();
3267
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3268
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3269
            } else {
3270
                rm = (modrm & 7) | REX_B(s);
3271
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3272
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3273
            }
3274
            break;
3275
        case 0x012: /* movlps */
3276
        case 0x112: /* movlpd */
3277
            if (mod != 3) {
3278
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3279
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3280
            } else {
3281
                /* movhlps */
3282
                rm = (modrm & 7) | REX_B(s);
3283
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3284
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3285
            }
3286
            break;
3287
        case 0x212: /* movsldup */
3288
            if (mod != 3) {
3289
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3290
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3291
            } else {
3292
                rm = (modrm & 7) | REX_B(s);
3293
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3294
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3295
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3296
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3297
            }
3298
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3299
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3300
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3301
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3302
            break;
3303
        case 0x312: /* movddup */
3304
            if (mod != 3) {
3305
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3306
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3307
            } else {
3308
                rm = (modrm & 7) | REX_B(s);
3309
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3310
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3311
            }
3312
            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3313
                        offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3314
            break;
3315
        case 0x016: /* movhps */
3316
        case 0x116: /* movhpd */
3317
            if (mod != 3) {
3318
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3319
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3320
            } else {
3321
                /* movlhps */
3322
                rm = (modrm & 7) | REX_B(s);
3323
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3324
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3325
            }
3326
            break;
3327
        case 0x216: /* movshdup */
3328
            if (mod != 3) {
3329
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3330
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3331
            } else {
3332
                rm = (modrm & 7) | REX_B(s);
3333
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3334
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3335
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3336
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3337
            }
3338
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3339
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3340
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3341
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3342
            break;
3343
        case 0x178:
3344
        case 0x378:
3345
            {
3346
                int bit_index, field_length;
3347

    
3348
                if (b1 == 1 && reg != 0)
3349
                    goto illegal_op;
3350
                field_length = ldub_code(s->pc++) & 0x3F;
3351
                bit_index = ldub_code(s->pc++) & 0x3F;
3352
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3353
                    offsetof(CPUX86State,xmm_regs[reg]));
3354
                if (b1 == 1)
3355
                    gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3356
                        tcg_const_i32(field_length));
3357
                else
3358
                    gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3359
                        tcg_const_i32(field_length));
3360
            }
3361
            break;
3362
        case 0x7e: /* movd ea, mm */
3363
#ifdef TARGET_X86_64
3364
            if (s->dflag == 2) {
3365
                tcg_gen_ld_i64(cpu_T[0], cpu_env, 
3366
                               offsetof(CPUX86State,fpregs[reg].mmx));
3367
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3368
            } else
3369
#endif
3370
            {
3371
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
3372
                                 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3373
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3374
            }
3375
            break;
3376
        case 0x17e: /* movd ea, xmm */
3377
#ifdef TARGET_X86_64
3378
            if (s->dflag == 2) {
3379
                tcg_gen_ld_i64(cpu_T[0], cpu_env, 
3380
                               offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3381
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3382
            } else
3383
#endif
3384
            {
3385
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
3386
                                 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3387
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3388
            }
3389
            break;
3390
        case 0x27e: /* movq xmm, ea */
3391
            if (mod != 3) {
3392
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3393
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3394
            } else {
3395
                rm = (modrm & 7) | REX_B(s);
3396
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3397
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3398
            }
3399
            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3400
            break;
3401
        case 0x7f: /* movq ea, mm */
3402
            if (mod != 3) {
3403
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3404
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3405
            } else {
3406
                rm = (modrm & 7);
3407
                gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3408
                            offsetof(CPUX86State,fpregs[reg].mmx));
3409
            }
3410
            break;
3411
        case 0x011: /* movups */
3412
        case 0x111: /* movupd */
3413
        case 0x029: /* movaps */
3414
        case 0x129: /* movapd */
3415
        case 0x17f: /* movdqa ea, xmm */
3416
        case 0x27f: /* movdqu ea, xmm */
3417
            if (mod != 3) {
3418
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3419
                gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3420
            } else {
3421
                rm = (modrm & 7) | REX_B(s);
3422
                gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3423
                            offsetof(CPUX86State,xmm_regs[reg]));
3424
            }
3425
            break;
3426
        case 0x211: /* movss ea, xmm */
3427
            if (mod != 3) {
3428
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3429
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3430
                gen_op_st_T0_A0(OT_LONG + s->mem_index);
3431
            } else {
3432
                rm = (modrm & 7) | REX_B(s);
3433
                gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3434
                            offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3435
            }
3436
            break;
3437
        case 0x311: /* movsd ea, xmm */
3438
            if (mod != 3) {
3439
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3440
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3441
            } else {
3442
                rm = (modrm & 7) | REX_B(s);
3443
                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3444
                            offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3445
            }
3446
            break;
3447
        case 0x013: /* movlps */
3448
        case 0x113: /* movlpd */
3449
            if (mod != 3) {
3450
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3451
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3452
            } else {
3453
                goto illegal_op;
3454
            }
3455
            break;
3456
        case 0x017: /* movhps */
3457
        case 0x117: /* movhpd */
3458
            if (mod != 3) {
3459
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3460
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3461
            } else {
3462
                goto illegal_op;
3463
            }
3464
            break;
3465
        case 0x71: /* shift mm, im */
3466
        case 0x72:
3467
        case 0x73:
3468
        case 0x171: /* shift xmm, im */
3469
        case 0x172:
3470
        case 0x173:
3471
            val = ldub_code(s->pc++);
3472
            if (is_xmm) {
3473
                gen_op_movl_T0_im(val);
3474
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3475
                gen_op_movl_T0_0();
3476
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3477
                op1_offset = offsetof(CPUX86State,xmm_t0);
3478
            } else {
3479
                gen_op_movl_T0_im(val);
3480
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3481
                gen_op_movl_T0_0();
3482
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3483
                op1_offset = offsetof(CPUX86State,mmx_t0);
3484
            }
3485
            sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3486
            if (!sse_op2)
3487
                goto illegal_op;
3488
            if (is_xmm) {
3489
                rm = (modrm & 7) | REX_B(s);
3490
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3491
            } else {
3492
                rm = (modrm & 7);
3493
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3494
            }
3495
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3496
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3497
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3498
            break;
3499
        case 0x050: /* movmskps */
3500
            rm = (modrm & 7) | REX_B(s);
3501
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3502
                             offsetof(CPUX86State,xmm_regs[rm]));
3503
            gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3504
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3505
            gen_op_mov_reg_T0(OT_LONG, reg);
3506
            break;
3507
        case 0x150: /* movmskpd */
3508
            rm = (modrm & 7) | REX_B(s);
3509
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3510
                             offsetof(CPUX86State,xmm_regs[rm]));
3511
            gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3512
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3513
            gen_op_mov_reg_T0(OT_LONG, reg);
3514
            break;
3515
        case 0x02a: /* cvtpi2ps */
3516
        case 0x12a: /* cvtpi2pd */
3517
            gen_helper_enter_mmx();
3518
            if (mod != 3) {
3519
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3520
                op2_offset = offsetof(CPUX86State,mmx_t0);
3521
                gen_ldq_env_A0(s->mem_index, op2_offset);
3522
            } else {
3523
                rm = (modrm & 7);
3524
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3525
            }
3526
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3527
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3528
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3529
            switch(b >> 8) {
3530
            case 0x0:
3531
                gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3532
                break;
3533
            default:
3534
            case 0x1:
3535
                gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3536
                break;
3537
            }
3538
            break;
3539
        case 0x22a: /* cvtsi2ss */
3540
        case 0x32a: /* cvtsi2sd */
3541
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3542
            gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3543
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3544
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3545
            sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3546
            if (ot == OT_LONG) {
3547
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3548
                ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3549
            } else {
3550
                ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3551
            }
3552
            break;
3553
        case 0x02c: /* cvttps2pi */
3554
        case 0x12c: /* cvttpd2pi */
3555
        case 0x02d: /* cvtps2pi */
3556
        case 0x12d: /* cvtpd2pi */
3557
            gen_helper_enter_mmx();
3558
            if (mod != 3) {
3559
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3560
                op2_offset = offsetof(CPUX86State,xmm_t0);
3561
                gen_ldo_env_A0(s->mem_index, op2_offset);
3562
            } else {
3563
                rm = (modrm & 7) | REX_B(s);
3564
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3565
            }
3566
            op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3567
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3568
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3569
            switch(b) {
3570
            case 0x02c:
3571
                gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3572
                break;
3573
            case 0x12c:
3574
                gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3575
                break;
3576
            case 0x02d:
3577
                gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3578
                break;
3579
            case 0x12d:
3580
                gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3581
                break;
3582
            }
3583
            break;
3584
        case 0x22c: /* cvttss2si */
3585
        case 0x32c: /* cvttsd2si */
3586
        case 0x22d: /* cvtss2si */
3587
        case 0x32d: /* cvtsd2si */
3588
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3589
            if (mod != 3) {
3590
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3591
                if ((b >> 8) & 1) {
3592
                    gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3593
                } else {
3594
                    gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3595
                    tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3596
                }
3597
                op2_offset = offsetof(CPUX86State,xmm_t0);
3598
            } else {
3599
                rm = (modrm & 7) | REX_B(s);
3600
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3601
            }
3602
            sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3603
                                    (b & 1) * 4];
3604
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3605
            if (ot == OT_LONG) {
3606
                ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3607
                tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3608
            } else {
3609
                ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3610
            }
3611
            gen_op_mov_reg_T0(ot, reg);
3612
            break;
3613
        case 0xc4: /* pinsrw */
3614
        case 0x1c4:
3615
            s->rip_offset = 1;
3616
            gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3617
            val = ldub_code(s->pc++);
3618
            if (b1) {
3619
                val &= 7;
3620
                tcg_gen_st16_tl(cpu_T[0], cpu_env,
3621
                                offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3622
            } else {
3623
                val &= 3;
3624
                tcg_gen_st16_tl(cpu_T[0], cpu_env,
3625
                                offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3626
            }
3627
            break;
3628
        case 0xc5: /* pextrw */
3629
        case 0x1c5:
3630
            if (mod != 3)
3631
                goto illegal_op;
3632
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3633
            val = ldub_code(s->pc++);
3634
            if (b1) {
3635
                val &= 7;
3636
                rm = (modrm & 7) | REX_B(s);
3637
                tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3638
                                 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3639
            } else {
3640
                val &= 3;
3641
                rm = (modrm & 7);
3642
                tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3643
                                offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3644
            }
3645
            reg = ((modrm >> 3) & 7) | rex_r;
3646
            gen_op_mov_reg_T0(ot, reg);
3647
            break;
3648
        case 0x1d6: /* movq ea, xmm */
3649
            if (mod != 3) {
3650
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3651
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3652
            } else {
3653
                rm = (modrm & 7) | REX_B(s);
3654
                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3655
                            offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3656
                gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3657
            }
3658
            break;
3659
        case 0x2d6: /* movq2dq */
3660
            gen_helper_enter_mmx();
3661
            rm = (modrm & 7);
3662
            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3663
                        offsetof(CPUX86State,fpregs[rm].mmx));
3664
            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3665
            break;
3666
        case 0x3d6: /* movdq2q */
3667
            gen_helper_enter_mmx();
3668
            rm = (modrm & 7) | REX_B(s);
3669
            gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3670
                        offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3671
            break;
3672
        case 0xd7: /* pmovmskb */
3673
        case 0x1d7:
3674
            if (mod != 3)
3675
                goto illegal_op;
3676
            if (b1) {
3677
                rm = (modrm & 7) | REX_B(s);
3678
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3679
                gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3680
            } else {
3681
                rm = (modrm & 7);
3682
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3683
                gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3684
            }
3685
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3686
            reg = ((modrm >> 3) & 7) | rex_r;
3687
            gen_op_mov_reg_T0(OT_LONG, reg);
3688
            break;
3689
        case 0x138:
3690
            if (s->prefix & PREFIX_REPNZ)
3691
                goto crc32;
3692
        case 0x038:
3693
            b = modrm;
3694
            modrm = ldub_code(s->pc++);
3695
            rm = modrm & 7;
3696
            reg = ((modrm >> 3) & 7) | rex_r;
3697
            mod = (modrm >> 6) & 3;
3698

    
3699
            sse_op2 = sse_op_table6[b].op[b1];
3700
            if (!sse_op2)
3701
                goto illegal_op;
3702
            if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3703
                goto illegal_op;
3704

    
3705
            if (b1) {
3706
                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3707
                if (mod == 3) {
3708
                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3709
                } else {
3710
                    op2_offset = offsetof(CPUX86State,xmm_t0);
3711
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3712
                    switch (b) {
3713
                    case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3714
                    case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3715
                    case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3716
                        gen_ldq_env_A0(s->mem_index, op2_offset +
3717
                                        offsetof(XMMReg, XMM_Q(0)));
3718
                        break;
3719
                    case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3720
                    case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3721
                        tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3722
                                          (s->mem_index >> 2) - 1);
3723
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3724
                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3725
                                        offsetof(XMMReg, XMM_L(0)));
3726
                        break;
3727
                    case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3728
                        tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3729
                                          (s->mem_index >> 2) - 1);
3730
                        tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3731
                                        offsetof(XMMReg, XMM_W(0)));
3732
                        break;
3733
                    case 0x2a:            /* movntqda */
3734
                        gen_ldo_env_A0(s->mem_index, op1_offset);
3735
                        return;
3736
                    default:
3737
                        gen_ldo_env_A0(s->mem_index, op2_offset);
3738
                    }
3739
                }
3740
            } else {
3741
                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3742
                if (mod == 3) {
3743
                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3744
                } else {
3745
                    op2_offset = offsetof(CPUX86State,mmx_t0);
3746
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3747
                    gen_ldq_env_A0(s->mem_index, op2_offset);
3748
                }
3749
            }
3750
            if (sse_op2 == SSE_SPECIAL)
3751
                goto illegal_op;
3752

    
3753
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3754
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3755
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3756

    
3757
            if (b == 0x17)
3758
                s->cc_op = CC_OP_EFLAGS;
3759
            break;
3760
        case 0x338: /* crc32 */
3761
        crc32:
3762
            b = modrm;
3763
            modrm = ldub_code(s->pc++);
3764
            reg = ((modrm >> 3) & 7) | rex_r;
3765

    
3766
            if (b != 0xf0 && b != 0xf1)
3767
                goto illegal_op;
3768
            if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3769
                goto illegal_op;
3770

    
3771
            if (b == 0xf0)
3772
                ot = OT_BYTE;
3773
            else if (b == 0xf1 && s->dflag != 2)
3774
                if (s->prefix & PREFIX_DATA)
3775
                    ot = OT_WORD;
3776
                else
3777
                    ot = OT_LONG;
3778
            else
3779
                ot = OT_QUAD;
3780

    
3781
            gen_op_mov_TN_reg(OT_LONG, 0, reg);
3782
            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3783
            gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3784
            gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3785
                             cpu_T[0], tcg_const_i32(8 << ot));
3786

    
3787
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3788
            gen_op_mov_reg_T0(ot, reg);
3789
            break;
3790
        case 0x03a:
3791
        case 0x13a:
3792
            b = modrm;
3793
            modrm = ldub_code(s->pc++);
3794
            rm = modrm & 7;
3795
            reg = ((modrm >> 3) & 7) | rex_r;
3796
            mod = (modrm >> 6) & 3;
3797

    
3798
            sse_op2 = sse_op_table7[b].op[b1];
3799
            if (!sse_op2)
3800
                goto illegal_op;
3801
            if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3802
                goto illegal_op;
3803

    
3804
            if (sse_op2 == SSE_SPECIAL) {
3805
                ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3806
                rm = (modrm & 7) | REX_B(s);
3807
                if (mod != 3)
3808
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3809
                reg = ((modrm >> 3) & 7) | rex_r;
3810
                val = ldub_code(s->pc++);
3811
                switch (b) {
3812
                case 0x14: /* pextrb */
3813
                    tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3814
                                            xmm_regs[reg].XMM_B(val & 15)));
3815
                    if (mod == 3)
3816
                        gen_op_mov_reg_T0(ot, rm);
3817
                    else
3818
                        tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3819
                                        (s->mem_index >> 2) - 1);
3820
                    break;
3821
                case 0x15: /* pextrw */
3822
                    tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3823
                                            xmm_regs[reg].XMM_W(val & 7)));
3824
                    if (mod == 3)
3825
                        gen_op_mov_reg_T0(ot, rm);
3826
                    else
3827
                        tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3828
                                        (s->mem_index >> 2) - 1);
3829
                    break;
3830
                case 0x16:
3831
                    if (ot == OT_LONG) { /* pextrd */
3832
                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3833
                                        offsetof(CPUX86State,
3834
                                                xmm_regs[reg].XMM_L(val & 3)));
3835
                        tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3836
                        if (mod == 3)
3837
                            gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3838
                        else
3839
                            tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3840
                                            (s->mem_index >> 2) - 1);
3841
                    } else { /* pextrq */
3842
#ifdef TARGET_X86_64
3843
                        tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3844
                                        offsetof(CPUX86State,
3845
                                                xmm_regs[reg].XMM_Q(val & 1)));
3846
                        if (mod == 3)
3847
                            gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3848
                        else
3849
                            tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3850
                                            (s->mem_index >> 2) - 1);
3851
#else
3852
                        goto illegal_op;
3853
#endif
3854
                    }
3855
                    break;
3856
                case 0x17: /* extractps */
3857
                    tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3858
                                            xmm_regs[reg].XMM_L(val & 3)));
3859
                    if (mod == 3)
3860
                        gen_op_mov_reg_T0(ot, rm);
3861
                    else
3862
                        tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3863
                                        (s->mem_index >> 2) - 1);
3864
                    break;
3865
                case 0x20: /* pinsrb */
3866
                    if (mod == 3)
3867
                        gen_op_mov_TN_reg(OT_LONG, 0, rm);
3868
                    else
3869
                        tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3870
                                        (s->mem_index >> 2) - 1);
3871
                    tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3872
                                            xmm_regs[reg].XMM_B(val & 15)));
3873
                    break;
3874
                case 0x21: /* insertps */
3875
                    if (mod == 3) {
3876
                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3877
                                        offsetof(CPUX86State,xmm_regs[rm]
3878
                                                .XMM_L((val >> 6) & 3)));
3879
                    } else {
3880
                        tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3881
                                        (s->mem_index >> 2) - 1);
3882
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3883
                    }
3884
                    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3885
                                    offsetof(CPUX86State,xmm_regs[reg]
3886
                                            .XMM_L((val >> 4) & 3)));
3887
                    if ((val >> 0) & 1)
3888
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3889
                                        cpu_env, offsetof(CPUX86State,
3890
                                                xmm_regs[reg].XMM_L(0)));
3891
                    if ((val >> 1) & 1)
3892
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3893
                                        cpu_env, offsetof(CPUX86State,
3894
                                                xmm_regs[reg].XMM_L(1)));
3895
                    if ((val >> 2) & 1)
3896
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3897
                                        cpu_env, offsetof(CPUX86State,
3898
                                                xmm_regs[reg].XMM_L(2)));
3899
                    if ((val >> 3) & 1)
3900
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3901
                                        cpu_env, offsetof(CPUX86State,
3902
                                                xmm_regs[reg].XMM_L(3)));
3903
                    break;
3904
                case 0x22:
3905
                    if (ot == OT_LONG) { /* pinsrd */
3906
                        if (mod == 3)
3907
                            gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3908
                        else
3909
                            tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3910
                                            (s->mem_index >> 2) - 1);
3911
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3912
                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3913
                                        offsetof(CPUX86State,
3914
                                                xmm_regs[reg].XMM_L(val & 3)));
3915
                    } else { /* pinsrq */
3916
#ifdef TARGET_X86_64
3917
                        if (mod == 3)
3918
                            gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3919
                        else
3920
                            tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3921
                                            (s->mem_index >> 2) - 1);
3922
                        tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3923
                                        offsetof(CPUX86State,
3924
                                                xmm_regs[reg].XMM_Q(val & 1)));
3925
#else
3926
                        goto illegal_op;
3927
#endif
3928
                    }
3929
                    break;
3930
                }
3931
                return;
3932
            }
3933

    
3934
            if (b1) {
3935
                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3936
                if (mod == 3) {
3937
                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3938
                } else {
3939
                    op2_offset = offsetof(CPUX86State,xmm_t0);
3940
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3941
                    gen_ldo_env_A0(s->mem_index, op2_offset);
3942
                }
3943
            } else {
3944
                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3945
                if (mod == 3) {
3946
                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3947
                } else {
3948
                    op2_offset = offsetof(CPUX86State,mmx_t0);
3949
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3950
                    gen_ldq_env_A0(s->mem_index, op2_offset);
3951
                }
3952
            }
3953
            val = ldub_code(s->pc++);
3954

    
3955
            if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3956
                s->cc_op = CC_OP_EFLAGS;
3957

    
3958
                if (s->dflag == 2)
3959
                    /* The helper must use entire 64-bit gp registers */
3960
                    val |= 1 << 8;
3961
            }
3962

    
3963
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3964
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3965
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3966
            break;
3967
        default:
3968
            goto illegal_op;
3969
        }
3970
    } else {
3971
        /* generic MMX or SSE operation */
3972
        switch(b) {
3973
        case 0x70: /* pshufx insn */
3974
        case 0xc6: /* pshufx insn */
3975
        case 0xc2: /* compare insns */
3976
            s->rip_offset = 1;
3977
            break;
3978
        default:
3979
            break;
3980
        }
3981
        if (is_xmm) {
3982
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3983
            if (mod != 3) {
3984
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3985
                op2_offset = offsetof(CPUX86State,xmm_t0);
3986
                if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3987
                                b == 0xc2)) {
3988
                    /* specific case for SSE single instructions */
3989
                    if (b1 == 2) {
3990
                        /* 32 bit access */
3991
                        gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3992
                        tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3993
                    } else {
3994
                        /* 64 bit access */
3995
                        gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3996
                    }
3997
                } else {
3998
                    gen_ldo_env_A0(s->mem_index, op2_offset);
3999
                }
4000
            } else {
4001
                rm = (modrm & 7) | REX_B(s);
4002
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4003
            }
4004
        } else {
4005
            op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4006
            if (mod != 3) {
4007
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4008
                op2_offset = offsetof(CPUX86State,mmx_t0);
4009
                gen_ldq_env_A0(s->mem_index, op2_offset);
4010
            } else {
4011
                rm = (modrm & 7);
4012
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4013
            }
4014
        }
4015
        switch(b) {
4016
        case 0x0f: /* 3DNow! data insns */
4017
            if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4018
                goto illegal_op;
4019
            val = ldub_code(s->pc++);
4020
            sse_op2 = sse_op_table5[val];
4021
            if (!sse_op2)
4022
                goto illegal_op;
4023
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4024
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4025
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4026
            break;
4027
        case 0x70: /* pshufx insn */
4028
        case 0xc6: /* pshufx insn */
4029
            val = ldub_code(s->pc++);
4030
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4031
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4032
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4033
            break;
4034
        case 0xc2:
4035
            /* compare insns */
4036
            val = ldub_code(s->pc++);
4037
            if (val >= 8)
4038
                goto illegal_op;
4039
            sse_op2 = sse_op_table4[val][b1];
4040
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4041
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4042
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4043
            break;
4044
        case 0xf7:
4045
            /* maskmov : we must prepare A0 */
4046
            if (mod != 3)
4047
                goto illegal_op;
4048
#ifdef TARGET_X86_64
4049
            if (s->aflag == 2) {
4050
                gen_op_movq_A0_reg(R_EDI);
4051
            } else
4052
#endif
4053
            {
4054
                gen_op_movl_A0_reg(R_EDI);
4055
                if (s->aflag == 0)
4056
                    gen_op_andl_A0_ffff();
4057
            }
4058
            gen_add_A0_ds_seg(s);
4059

    
4060
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4061
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4062
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4063
            break;
4064
        default:
4065
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4066
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4067
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4068
            break;
4069
        }
4070
        if (b == 0x2e || b == 0x2f) {
4071
            s->cc_op = CC_OP_EFLAGS;
4072
        }
4073
    }
4074
}
4075

    
4076
/* convert one instruction. s->is_jmp is set if the translation must
4077
   be stopped. Return the next pc value */
4078
static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4079
{
4080
    int b, prefixes, aflag, dflag;
4081
    int shift, ot;
4082
    int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4083
    target_ulong next_eip, tval;
4084
    int rex_w, rex_r;
4085

    
4086
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4087
        tcg_gen_debug_insn_start(pc_start);
4088
    s->pc = pc_start;
4089
    prefixes = 0;
4090
    aflag = s->code32;
4091
    dflag = s->code32;
4092
    s->override = -1;
4093
    rex_w = -1;
4094
    rex_r = 0;
4095
#ifdef TARGET_X86_64
4096
    s->rex_x = 0;
4097
    s->rex_b = 0;
4098
    x86_64_hregs = 0;
4099
#endif
4100
    s->rip_offset = 0; /* for relative ip address */
4101
 next_byte:
4102
    b = ldub_code(s->pc);
4103
    s->pc++;
4104
    /* check prefixes */
4105
#ifdef TARGET_X86_64
4106
    if (CODE64(s)) {
4107
        switch (b) {
4108
        case 0xf3:
4109
            prefixes |= PREFIX_REPZ;
4110
            goto next_byte;
4111
        case 0xf2:
4112
            prefixes |= PREFIX_REPNZ;
4113
            goto next_byte;
4114
        case 0xf0:
4115
            prefixes |= PREFIX_LOCK;
4116
            goto next_byte;
4117
        case 0x2e:
4118
            s->override = R_CS;
4119
            goto next_byte;
4120
        case 0x36:
4121
            s->override = R_SS;
4122
            goto next_byte;
4123
        case 0x3e:
4124
            s->override = R_DS;
4125
            goto next_byte;
4126
        case 0x26:
4127
            s->override = R_ES;
4128
            goto next_byte;
4129
        case 0x64:
4130
            s->override = R_FS;
4131
            goto next_byte;
4132
        case 0x65:
4133
            s->override = R_GS;
4134
            goto next_byte;
4135
        case 0x66:
4136
            prefixes |= PREFIX_DATA;
4137
            goto next_byte;
4138
        case 0x67:
4139
            prefixes |= PREFIX_ADR;
4140
            goto next_byte;
4141
        case 0x40 ... 0x4f:
4142
            /* REX prefix */
4143
            rex_w = (b >> 3) & 1;
4144
            rex_r = (b & 0x4) << 1;
4145
            s->rex_x = (b & 0x2) << 2;
4146
            REX_B(s) = (b & 0x1) << 3;
4147
            x86_64_hregs = 1; /* select uniform byte register addressing */
4148
            goto next_byte;
4149
        }
4150
        if (rex_w == 1) {
4151
            /* 0x66 is ignored if rex.w is set */
4152
            dflag = 2;
4153
        } else {
4154
            if (prefixes & PREFIX_DATA)
4155
                dflag ^= 1;
4156
        }
4157
        if (!(prefixes & PREFIX_ADR))
4158
            aflag = 2;
4159
    } else
4160
#endif
4161
    {
4162
        switch (b) {
4163
        case 0xf3:
4164
            prefixes |= PREFIX_REPZ;
4165
            goto next_byte;
4166
        case 0xf2:
4167
            prefixes |= PREFIX_REPNZ;
4168
            goto next_byte;
4169
        case 0xf0:
4170
            prefixes |= PREFIX_LOCK;
4171
            goto next_byte;
4172
        case 0x2e:
4173
            s->override = R_CS;
4174
            goto next_byte;
4175
        case 0x36:
4176
            s->override = R_SS;
4177
            goto next_byte;
4178
        case 0x3e:
4179
            s->override = R_DS;
4180
            goto next_byte;
4181
        case 0x26:
4182
            s->override = R_ES;
4183
            goto next_byte;
4184
        case 0x64:
4185
            s->override = R_FS;
4186
            goto next_byte;
4187
        case 0x65:
4188
            s->override = R_GS;
4189
            goto next_byte;
4190
        case 0x66:
4191
            prefixes |= PREFIX_DATA;
4192
            goto next_byte;
4193
        case 0x67:
4194
            prefixes |= PREFIX_ADR;
4195
            goto next_byte;
4196
        }
4197
        if (prefixes & PREFIX_DATA)
4198
            dflag ^= 1;
4199
        if (prefixes & PREFIX_ADR)
4200
            aflag ^= 1;
4201
    }
4202

    
4203
    s->prefix = prefixes;
4204
    s->aflag = aflag;
4205
    s->dflag = dflag;
4206

    
4207
    /* lock generation */
4208
    if (prefixes & PREFIX_LOCK)
4209
        gen_helper_lock();
4210

    
4211
    /* now check op code */
4212
 reswitch:
4213
    switch(b) {
4214
    case 0x0f:
4215
        /**************************/
4216
        /* extended op code */
4217
        b = ldub_code(s->pc++) | 0x100;
4218
        goto reswitch;
4219

    
4220
        /**************************/
4221
        /* arith & logic */
4222
    case 0x00 ... 0x05:
4223
    case 0x08 ... 0x0d:
4224
    case 0x10 ... 0x15:
4225
    case 0x18 ... 0x1d:
4226
    case 0x20 ... 0x25:
4227
    case 0x28 ... 0x2d:
4228
    case 0x30 ... 0x35:
4229
    case 0x38 ... 0x3d:
4230
        {
4231
            int op, f, val;
4232
            op = (b >> 3) & 7;
4233
            f = (b >> 1) & 3;
4234

    
4235
            if ((b & 1) == 0)
4236
                ot = OT_BYTE;
4237
            else
4238
                ot = dflag + OT_WORD;
4239

    
4240
            switch(f) {
4241
            case 0: /* OP Ev, Gv */
4242
                modrm = ldub_code(s->pc++);
4243
                reg = ((modrm >> 3) & 7) | rex_r;
4244
                mod = (modrm >> 6) & 3;
4245
                rm = (modrm & 7) | REX_B(s);
4246
                if (mod != 3) {
4247
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4248
                    opreg = OR_TMP0;
4249
                } else if (op == OP_XORL && rm == reg) {
4250
                xor_zero:
4251
                    /* xor reg, reg optimisation */
4252
                    gen_op_movl_T0_0();
4253
                    s->cc_op = CC_OP_LOGICB + ot;
4254
                    gen_op_mov_reg_T0(ot, reg);
4255
                    gen_op_update1_cc();
4256
                    break;
4257
                } else {
4258
                    opreg = rm;
4259
                }
4260
                gen_op_mov_TN_reg(ot, 1, reg);
4261
                gen_op(s, op, ot, opreg);
4262
                break;
4263
            case 1: /* OP Gv, Ev */
4264
                modrm = ldub_code(s->pc++);
4265
                mod = (modrm >> 6) & 3;
4266
                reg = ((modrm >> 3) & 7) | rex_r;
4267
                rm = (modrm & 7) | REX_B(s);
4268
                if (mod != 3) {
4269
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4270
                    gen_op_ld_T1_A0(ot + s->mem_index);
4271
                } else if (op == OP_XORL && rm == reg) {
4272
                    goto xor_zero;
4273
                } else {
4274
                    gen_op_mov_TN_reg(ot, 1, rm);
4275
                }
4276
                gen_op(s, op, ot, reg);
4277
                break;
4278
            case 2: /* OP A, Iv */
4279
                val = insn_get(s, ot);
4280
                gen_op_movl_T1_im(val);
4281
                gen_op(s, op, ot, OR_EAX);
4282
                break;
4283
            }
4284
        }
4285
        break;
4286

    
4287
    case 0x82:
4288
        if (CODE64(s))
4289
            goto illegal_op;
4290
    case 0x80: /* GRP1 */
4291
    case 0x81:
4292
    case 0x83:
4293
        {
4294
            int val;
4295

    
4296
            if ((b & 1) == 0)
4297
                ot = OT_BYTE;
4298
            else
4299
                ot = dflag + OT_WORD;
4300

    
4301
            modrm = ldub_code(s->pc++);
4302
            mod = (modrm >> 6) & 3;
4303
            rm = (modrm & 7) | REX_B(s);
4304
            op = (modrm >> 3) & 7;
4305

    
4306
            if (mod != 3) {
4307
                if (b == 0x83)
4308
                    s->rip_offset = 1;
4309
                else
4310
                    s->rip_offset = insn_const_size(ot);
4311
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4312
                opreg = OR_TMP0;
4313
            } else {
4314
                opreg = rm;
4315
            }
4316

    
4317
            switch(b) {
4318
            default:
4319
            case 0x80:
4320
            case 0x81:
4321
            case 0x82:
4322
                val = insn_get(s, ot);
4323
                break;
4324
            case 0x83:
4325
                val = (int8_t)insn_get(s, OT_BYTE);
4326
                break;
4327
            }
4328
            gen_op_movl_T1_im(val);
4329
            gen_op(s, op, ot, opreg);
4330
        }
4331
        break;
4332

    
4333
        /**************************/
4334
        /* inc, dec, and other misc arith */
4335
    case 0x40 ... 0x47: /* inc Gv */
4336
        ot = dflag ? OT_LONG : OT_WORD;
4337
        gen_inc(s, ot, OR_EAX + (b & 7), 1);
4338
        break;
4339
    case 0x48 ... 0x4f: /* dec Gv */
4340
        ot = dflag ? OT_LONG : OT_WORD;
4341
        gen_inc(s, ot, OR_EAX + (b & 7), -1);
4342
        break;
4343
    case 0xf6: /* GRP3 */
4344
    case 0xf7:
4345
        if ((b & 1) == 0)
4346
            ot = OT_BYTE;
4347
        else
4348
            ot = dflag + OT_WORD;
4349

    
4350
        modrm = ldub_code(s->pc++);
4351
        mod = (modrm >> 6) & 3;
4352
        rm = (modrm & 7) | REX_B(s);
4353
        op = (modrm >> 3) & 7;
4354
        if (mod != 3) {
4355
            if (op == 0)
4356
                s->rip_offset = insn_const_size(ot);
4357
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4358
            gen_op_ld_T0_A0(ot + s->mem_index);
4359
        } else {
4360
            gen_op_mov_TN_reg(ot, 0, rm);
4361
        }
4362

    
4363
        switch(op) {
4364
        case 0: /* test */
4365
            val = insn_get(s, ot);
4366
            gen_op_movl_T1_im(val);
4367
            gen_op_testl_T0_T1_cc();
4368
            s->cc_op = CC_OP_LOGICB + ot;
4369
            break;
4370
        case 2: /* not */
4371
            tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4372
            if (mod != 3) {
4373
                gen_op_st_T0_A0(ot + s->mem_index);
4374
            } else {
4375
                gen_op_mov_reg_T0(ot, rm);
4376
            }
4377
            break;
4378
        case 3: /* neg */
4379
            tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4380
            if (mod != 3) {
4381
                gen_op_st_T0_A0(ot + s->mem_index);
4382
            } else {
4383
                gen_op_mov_reg_T0(ot, rm);
4384
            }
4385
            gen_op_update_neg_cc();
4386
            s->cc_op = CC_OP_SUBB + ot;
4387
            break;
4388
        case 4: /* mul */
4389
            switch(ot) {
4390
            case OT_BYTE:
4391
                gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4392
                tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4393
                tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4394
                /* XXX: use 32 bit mul which could be faster */
4395
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4396
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4397
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4398
                tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4399
                s->cc_op = CC_OP_MULB;
4400
                break;
4401
            case OT_WORD:
4402
                gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4403
                tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4404
                tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4405
                /* XXX: use 32 bit mul which could be faster */
4406
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4407
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4408
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4409
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4410
                gen_op_mov_reg_T0(OT_WORD, R_EDX);
4411
                tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4412
                s->cc_op = CC_OP_MULW;
4413
                break;
4414
            default:
4415
            case OT_LONG:
4416
#ifdef TARGET_X86_64
4417
                gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4418
                tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4419
                tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4420
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4421
                gen_op_mov_reg_T0(OT_LONG, R_EAX);
4422
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4423
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4424
                gen_op_mov_reg_T0(OT_LONG, R_EDX);
4425
                tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4426
#else
4427
                {
4428
                    TCGv_i64 t0, t1;
4429
                    t0 = tcg_temp_new_i64();
4430
                    t1 = tcg_temp_new_i64();
4431
                    gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4432
                    tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4433
                    tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4434
                    tcg_gen_mul_i64(t0, t0, t1);
4435
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4436
                    gen_op_mov_reg_T0(OT_LONG, R_EAX);
4437
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4438
                    tcg_gen_shri_i64(t0, t0, 32);
4439
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4440
                    gen_op_mov_reg_T0(OT_LONG, R_EDX);
4441
                    tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4442
                }
4443
#endif
4444
                s->cc_op = CC_OP_MULL;
4445
                break;
4446
#ifdef TARGET_X86_64
4447
            case OT_QUAD:
4448
                gen_helper_mulq_EAX_T0(cpu_T[0]);
4449
                s->cc_op = CC_OP_MULQ;
4450
                break;
4451
#endif
4452
            }
4453
            break;
4454
        case 5: /* imul */
4455
            switch(ot) {
4456
            case OT_BYTE:
4457
                gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4458
                tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4459
                tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4460
                /* XXX: use 32 bit mul which could be faster */
4461
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4462
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4463
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4464
                tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4465
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4466
                s->cc_op = CC_OP_MULB;
4467
                break;
4468
            case OT_WORD:
4469
                gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4470
                tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4471
                tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4472
                /* XXX: use 32 bit mul which could be faster */
4473
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4474
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4475
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4476
                tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4477
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4478
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4479
                gen_op_mov_reg_T0(OT_WORD, R_EDX);
4480
                s->cc_op = CC_OP_MULW;
4481
                break;
4482
            default:
4483
            case OT_LONG:
4484
#ifdef TARGET_X86_64
4485
                gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4486
                tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4487
                tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4488
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4489
                gen_op_mov_reg_T0(OT_LONG, R_EAX);
4490
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4491
                tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4492
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4493
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4494
                gen_op_mov_reg_T0(OT_LONG, R_EDX);
4495
#else
4496
                {
4497
                    TCGv_i64 t0, t1;
4498
                    t0 = tcg_temp_new_i64();
4499
                    t1 = tcg_temp_new_i64();
4500
                    gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4501
                    tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4502
                    tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4503
                    tcg_gen_mul_i64(t0, t0, t1);
4504
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4505
                    gen_op_mov_reg_T0(OT_LONG, R_EAX);
4506
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4507
                    tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4508
                    tcg_gen_shri_i64(t0, t0, 32);
4509
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4510
                    gen_op_mov_reg_T0(OT_LONG, R_EDX);
4511
                    tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4512
                }
4513
#endif
4514
                s->cc_op = CC_OP_MULL;
4515
                break;
4516
#ifdef TARGET_X86_64
4517
            case OT_QUAD:
4518
                gen_helper_imulq_EAX_T0(cpu_T[0]);
4519
                s->cc_op = CC_OP_MULQ;
4520
                break;
4521
#endif
4522
            }
4523
            break;
4524
        case 6: /* div */
4525
            switch(ot) {
4526
            case OT_BYTE:
4527
                gen_jmp_im(pc_start - s->cs_base);
4528
                gen_helper_divb_AL(cpu_T[0]);
4529
                break;
4530
            case OT_WORD:
4531
                gen_jmp_im(pc_start - s->cs_base);
4532
                gen_helper_divw_AX(cpu_T[0]);
4533
                break;
4534
            default:
4535
            case OT_LONG:
4536
                gen_jmp_im(pc_start - s->cs_base);
4537
                gen_helper_divl_EAX(cpu_T[0]);
4538
                break;
4539
#ifdef TARGET_X86_64
4540
            case OT_QUAD:
4541
                gen_jmp_im(pc_start - s->cs_base);
4542
                gen_helper_divq_EAX(cpu_T[0]);
4543
                break;
4544
#endif
4545
            }
4546
            break;
4547
        case 7: /* idiv */
4548
            switch(ot) {
4549
            case OT_BYTE:
4550
                gen_jmp_im(pc_start - s->cs_base);
4551
                gen_helper_idivb_AL(cpu_T[0]);
4552
                break;
4553
            case OT_WORD:
4554
                gen_jmp_im(pc_start - s->cs_base);
4555
                gen_helper_idivw_AX(cpu_T[0]);
4556
                break;
4557
            default:
4558
            case OT_LONG:
4559
                gen_jmp_im(pc_start - s->cs_base);
4560
                gen_helper_idivl_EAX(cpu_T[0]);
4561
                break;
4562
#ifdef TARGET_X86_64
4563
            case OT_QUAD:
4564
                gen_jmp_im(pc_start - s->cs_base);
4565
                gen_helper_idivq_EAX(cpu_T[0]);
4566
                break;
4567
#endif
4568
            }
4569
            break;
4570
        default:
4571
            goto illegal_op;
4572
        }
4573
        break;
4574

    
4575
    case 0xfe: /* GRP4 */
4576
    case 0xff: /* GRP5 */
4577
        if ((b & 1) == 0)
4578
            ot = OT_BYTE;
4579
        else
4580
            ot = dflag + OT_WORD;
4581

    
4582
        modrm = ldub_code(s->pc++);
4583
        mod = (modrm >> 6) & 3;
4584
        rm = (modrm & 7) | REX_B(s);
4585
        op = (modrm >> 3) & 7;
4586
        if (op >= 2 && b == 0xfe) {
4587
            goto illegal_op;
4588
        }
4589
        if (CODE64(s)) {
4590
            if (op == 2 || op == 4) {
4591
                /* operand size for jumps is 64 bit */
4592
                ot = OT_QUAD;
4593
            } else if (op == 3 || op == 5) {
4594
                /* for call calls, the operand is 16 or 32 bit, even
4595
                   in long mode */
4596
                ot = dflag ? OT_LONG : OT_WORD;
4597
            } else if (op == 6) {
4598
                /* default push size is 64 bit */
4599
                ot = dflag ? OT_QUAD : OT_WORD;
4600
            }
4601
        }
4602
        if (mod != 3) {
4603
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4604
            if (op >= 2 && op != 3 && op != 5)
4605
                gen_op_ld_T0_A0(ot + s->mem_index);
4606
        } else {
4607
            gen_op_mov_TN_reg(ot, 0, rm);
4608
        }
4609

    
4610
        switch(op) {
4611
        case 0: /* inc Ev */
4612
            if (mod != 3)
4613
                opreg = OR_TMP0;
4614
            else
4615
                opreg = rm;
4616
            gen_inc(s, ot, opreg, 1);
4617
            break;
4618
        case 1: /* dec Ev */
4619
            if (mod != 3)
4620
                opreg = OR_TMP0;
4621
            else
4622
                opreg = rm;
4623
            gen_inc(s, ot, opreg, -1);
4624
            break;
4625
        case 2: /* call Ev */
4626
            /* XXX: optimize if memory (no 'and' is necessary) */
4627
            if (s->dflag == 0)
4628
                gen_op_andl_T0_ffff();
4629
            next_eip = s->pc - s->cs_base;
4630
            gen_movtl_T1_im(next_eip);
4631
            gen_push_T1(s);
4632
            gen_op_jmp_T0();
4633
            gen_eob(s);
4634
            break;
4635
        case 3: /* lcall Ev */
4636
            gen_op_ld_T1_A0(ot + s->mem_index);
4637
            gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4638
            gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4639
        do_lcall:
4640
            if (s->pe && !s->vm86) {
4641
                if (s->cc_op != CC_OP_DYNAMIC)
4642
                    gen_op_set_cc_op(s->cc_op);
4643
                gen_jmp_im(pc_start - s->cs_base);
4644
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4645
                gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4646
                                           tcg_const_i32(dflag), 
4647
                                           tcg_const_i32(s->pc - pc_start));
4648
            } else {
4649
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4650
                gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4651
                                      tcg_const_i32(dflag), 
4652
                                      tcg_const_i32(s->pc - s->cs_base));
4653
            }
4654
            gen_eob(s);
4655
            break;
4656
        case 4: /* jmp Ev */
4657
            if (s->dflag == 0)
4658
                gen_op_andl_T0_ffff();
4659
            gen_op_jmp_T0();
4660
            gen_eob(s);
4661
            break;
4662
        case 5: /* ljmp Ev */
4663
            gen_op_ld_T1_A0(ot + s->mem_index);
4664
            gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4665
            gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4666
        do_ljmp:
4667
            if (s->pe && !s->vm86) {
4668
                if (s->cc_op != CC_OP_DYNAMIC)
4669
                    gen_op_set_cc_op(s->cc_op);
4670
                gen_jmp_im(pc_start - s->cs_base);
4671
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4672
                gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4673
                                          tcg_const_i32(s->pc - pc_start));
4674
            } else {
4675
                gen_op_movl_seg_T0_vm(R_CS);
4676
                gen_op_movl_T0_T1();
4677
                gen_op_jmp_T0();
4678
            }
4679
            gen_eob(s);
4680
            break;
4681
        case 6: /* push Ev */
4682
            gen_push_T0(s);
4683
            break;
4684
        default:
4685
            goto illegal_op;
4686
        }
4687
        break;
4688

    
4689
    case 0x84: /* test Ev, Gv */
4690
    case 0x85:
4691
        if ((b & 1) == 0)
4692
            ot = OT_BYTE;
4693
        else
4694
            ot = dflag + OT_WORD;
4695

    
4696
        modrm = ldub_code(s->pc++);
4697
        mod = (modrm >> 6) & 3;
4698
        rm = (modrm & 7) | REX_B(s);
4699
        reg = ((modrm >> 3) & 7) | rex_r;
4700

    
4701
        gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4702
        gen_op_mov_TN_reg(ot, 1, reg);
4703
        gen_op_testl_T0_T1_cc();
4704
        s->cc_op = CC_OP_LOGICB + ot;
4705
        break;
4706

    
4707
    case 0xa8: /* test eAX, Iv */
4708
    case 0xa9:
4709
        if ((b & 1) == 0)
4710
            ot = OT_BYTE;
4711
        else
4712
            ot = dflag + OT_WORD;
4713
        val = insn_get(s, ot);
4714

    
4715
        gen_op_mov_TN_reg(ot, 0, OR_EAX);
4716
        gen_op_movl_T1_im(val);
4717
        gen_op_testl_T0_T1_cc();
4718
        s->cc_op = CC_OP_LOGICB + ot;
4719
        break;
4720

    
4721
    case 0x98: /* CWDE/CBW */
4722
#ifdef TARGET_X86_64
4723
        if (dflag == 2) {
4724
            gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4725
            tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4726
            gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4727
        } else
4728
#endif
4729
        if (dflag == 1) {
4730
            gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4731
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4732
            gen_op_mov_reg_T0(OT_LONG, R_EAX);
4733
        } else {
4734
            gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4735
            tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4736
            gen_op_mov_reg_T0(OT_WORD, R_EAX);
4737
        }
4738
        break;
4739
    case 0x99: /* CDQ/CWD */
4740
#ifdef TARGET_X86_64
4741
        if (dflag == 2) {
4742
            gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4743
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4744
            gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4745
        } else
4746
#endif
4747
        if (dflag == 1) {
4748
            gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4749
            tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4750
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4751
            gen_op_mov_reg_T0(OT_LONG, R_EDX);
4752
        } else {
4753
            gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4754
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4755
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4756
            gen_op_mov_reg_T0(OT_WORD, R_EDX);
4757
        }
4758
        break;
4759
    case 0x1af: /* imul Gv, Ev */
4760
    case 0x69: /* imul Gv, Ev, I */
4761
    case 0x6b:
4762
        ot = dflag + OT_WORD;
4763
        modrm = ldub_code(s->pc++);
4764
        reg = ((modrm >> 3) & 7) | rex_r;
4765
        if (b == 0x69)
4766
            s->rip_offset = insn_const_size(ot);
4767
        else if (b == 0x6b)
4768
            s->rip_offset = 1;
4769
        gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4770
        if (b == 0x69) {
4771
            val = insn_get(s, ot);
4772
            gen_op_movl_T1_im(val);
4773
        } else if (b == 0x6b) {
4774
            val = (int8_t)insn_get(s, OT_BYTE);
4775
            gen_op_movl_T1_im(val);
4776
        } else {
4777
            gen_op_mov_TN_reg(ot, 1, reg);
4778
        }
4779

    
4780
#ifdef TARGET_X86_64
4781
        if (ot == OT_QUAD) {
4782
            gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4783
        } else
4784
#endif
4785
        if (ot == OT_LONG) {
4786
#ifdef TARGET_X86_64
4787
                tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4788
                tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4789
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4790
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4791
                tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4792
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4793
#else
4794
                {
4795
                    TCGv_i64 t0, t1;
4796
                    t0 = tcg_temp_new_i64();
4797
                    t1 = tcg_temp_new_i64();
4798
                    tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4799
                    tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4800
                    tcg_gen_mul_i64(t0, t0, t1);
4801
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4802
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4803
                    tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4804
                    tcg_gen_shri_i64(t0, t0, 32);
4805
                    tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4806
                    tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4807
                }
4808
#endif
4809
        } else {
4810
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4811
            tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4812
            /* XXX: use 32 bit mul which could be faster */
4813
            tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4814
            tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4815
            tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4816
            tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4817
        }
4818
        gen_op_mov_reg_T0(ot, reg);
4819
        s->cc_op = CC_OP_MULB + ot;
4820
        break;
4821
    case 0x1c0:
4822
    case 0x1c1: /* xadd Ev, Gv */
4823
        if ((b & 1) == 0)
4824
            ot = OT_BYTE;
4825
        else
4826
            ot = dflag + OT_WORD;
4827
        modrm = ldub_code(s->pc++);
4828
        reg = ((modrm >> 3) & 7) | rex_r;
4829
        mod = (modrm >> 6) & 3;
4830
        if (mod == 3) {
4831
            rm = (modrm & 7) | REX_B(s);
4832
            gen_op_mov_TN_reg(ot, 0, reg);
4833
            gen_op_mov_TN_reg(ot, 1, rm);
4834
            gen_op_addl_T0_T1();
4835
            gen_op_mov_reg_T1(ot, reg);
4836
            gen_op_mov_reg_T0(ot, rm);
4837
        } else {
4838
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4839
            gen_op_mov_TN_reg(ot, 0, reg);
4840
            gen_op_ld_T1_A0(ot + s->mem_index);
4841
            gen_op_addl_T0_T1();
4842
            gen_op_st_T0_A0(ot + s->mem_index);
4843
            gen_op_mov_reg_T1(ot, reg);
4844
        }
4845
        gen_op_update2_cc();
4846
        s->cc_op = CC_OP_ADDB + ot;
4847
        break;
4848
    case 0x1b0:
4849
    case 0x1b1: /* cmpxchg Ev, Gv */
4850
        {
4851
            int label1, label2;
4852
            TCGv t0, t1, t2, a0;
4853

    
4854
            if ((b & 1) == 0)
4855
                ot = OT_BYTE;
4856
            else
4857
                ot = dflag + OT_WORD;
4858
            modrm = ldub_code(s->pc++);
4859
            reg = ((modrm >> 3) & 7) | rex_r;
4860
            mod = (modrm >> 6) & 3;
4861
            t0 = tcg_temp_local_new();
4862
            t1 = tcg_temp_local_new();
4863
            t2 = tcg_temp_local_new();
4864
            a0 = tcg_temp_local_new();
4865
            gen_op_mov_v_reg(ot, t1, reg);
4866
            if (mod == 3) {
4867
                rm = (modrm & 7) | REX_B(s);
4868
                gen_op_mov_v_reg(ot, t0, rm);
4869
            } else {
4870
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4871
                tcg_gen_mov_tl(a0, cpu_A0);
4872
                gen_op_ld_v(ot + s->mem_index, t0, a0);
4873
                rm = 0; /* avoid warning */
4874
            }
4875
            label1 = gen_new_label();
4876
            tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4877
            gen_extu(ot, t2);
4878
            tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4879
            if (mod == 3) {
4880
                label2 = gen_new_label();
4881
                gen_op_mov_reg_v(ot, R_EAX, t0);
4882
                tcg_gen_br(label2);
4883
                gen_set_label(label1);
4884
                gen_op_mov_reg_v(ot, rm, t1);
4885
                gen_set_label(label2);
4886
            } else {
4887
                tcg_gen_mov_tl(t1, t0);
4888
                gen_op_mov_reg_v(ot, R_EAX, t0);
4889
                gen_set_label(label1);
4890
                /* always store */
4891
                gen_op_st_v(ot + s->mem_index, t1, a0);
4892
            }
4893
            tcg_gen_mov_tl(cpu_cc_src, t0);
4894
            tcg_gen_mov_tl(cpu_cc_dst, t2);
4895
            s->cc_op = CC_OP_SUBB + ot;
4896
            tcg_temp_free(t0);
4897
            tcg_temp_free(t1);
4898
            tcg_temp_free(t2);
4899
            tcg_temp_free(a0);
4900
        }
4901
        break;
4902
    case 0x1c7: /* cmpxchg8b */
4903
        modrm = ldub_code(s->pc++);
4904
        mod = (modrm >> 6) & 3;
4905
        if ((mod == 3) || ((modrm & 0x38) != 0x8))
4906
            goto illegal_op;
4907
#ifdef TARGET_X86_64
4908
        if (dflag == 2) {
4909
            if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4910
                goto illegal_op;
4911
            gen_jmp_im(pc_start - s->cs_base);
4912
            if (s->cc_op != CC_OP_DYNAMIC)
4913
                gen_op_set_cc_op(s->cc_op);
4914
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4915
            gen_helper_cmpxchg16b(cpu_A0);
4916
        } else
4917
#endif        
4918
        {
4919
            if (!(s->cpuid_features & CPUID_CX8))
4920
                goto illegal_op;
4921
            gen_jmp_im(pc_start - s->cs_base);
4922
            if (s->cc_op != CC_OP_DYNAMIC)
4923
                gen_op_set_cc_op(s->cc_op);
4924
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4925
            gen_helper_cmpxchg8b(cpu_A0);
4926
        }
4927
        s->cc_op = CC_OP_EFLAGS;
4928
        break;
4929

    
4930
        /**************************/
4931
        /* push/pop */
4932
    case 0x50 ... 0x57: /* push */
4933
        gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));