root / target-i386 / translate.c @ 8cfd0495
History | View | Annotate | Download (283.7 kB)
1 |
/*
|
---|---|
2 |
* i386 translation
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
#include <stdarg.h> |
20 |
#include <stdlib.h> |
21 |
#include <stdio.h> |
22 |
#include <string.h> |
23 |
#include <inttypes.h> |
24 |
#include <signal.h> |
25 |
|
26 |
#include "qemu/host-utils.h" |
27 |
#include "cpu.h" |
28 |
#include "disas/disas.h" |
29 |
#include "tcg-op.h" |
30 |
|
31 |
#include "helper.h" |
32 |
#define GEN_HELPER 1 |
33 |
#include "helper.h" |
34 |
|
35 |
#define PREFIX_REPZ 0x01 |
36 |
#define PREFIX_REPNZ 0x02 |
37 |
#define PREFIX_LOCK 0x04 |
38 |
#define PREFIX_DATA 0x08 |
39 |
#define PREFIX_ADR 0x10 |
40 |
#define PREFIX_VEX 0x20 |
41 |
|
42 |
#ifdef TARGET_X86_64
|
43 |
#define CODE64(s) ((s)->code64)
|
44 |
#define REX_X(s) ((s)->rex_x)
|
45 |
#define REX_B(s) ((s)->rex_b)
|
46 |
#else
|
47 |
#define CODE64(s) 0 |
48 |
#define REX_X(s) 0 |
49 |
#define REX_B(s) 0 |
50 |
#endif
|
51 |
|
52 |
#ifdef TARGET_X86_64
|
53 |
# define ctztl ctz64
|
54 |
# define clztl clz64
|
55 |
#else
|
56 |
# define ctztl ctz32
|
57 |
# define clztl clz32
|
58 |
#endif
|
59 |
|
60 |
//#define MACRO_TEST 1
|
61 |
|
62 |
/* global register indexes */
|
63 |
static TCGv_ptr cpu_env;
|
64 |
static TCGv cpu_A0;
|
65 |
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
|
66 |
static TCGv_i32 cpu_cc_op;
|
67 |
static TCGv cpu_regs[CPU_NB_REGS];
|
68 |
/* local temps */
|
69 |
static TCGv cpu_T[2]; |
70 |
/* local register indexes (only used inside old micro ops) */
|
71 |
static TCGv cpu_tmp0, cpu_tmp4;
|
72 |
static TCGv_ptr cpu_ptr0, cpu_ptr1;
|
73 |
static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
|
74 |
static TCGv_i64 cpu_tmp1_i64;
|
75 |
|
76 |
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
|
77 |
|
78 |
#include "exec/gen-icount.h" |
79 |
|
80 |
#ifdef TARGET_X86_64
|
81 |
static int x86_64_hregs; |
82 |
#endif
|
83 |
|
84 |
typedef struct DisasContext { |
85 |
/* current insn context */
|
86 |
int override; /* -1 if no override */ |
87 |
int prefix;
|
88 |
int aflag, dflag;
|
89 |
target_ulong pc; /* pc = eip + cs_base */
|
90 |
int is_jmp; /* 1 = means jump (stop translation), 2 means CPU |
91 |
static state change (stop translation) */
|
92 |
/* current block context */
|
93 |
target_ulong cs_base; /* base of CS segment */
|
94 |
int pe; /* protected mode */ |
95 |
int code32; /* 32 bit code segment */ |
96 |
#ifdef TARGET_X86_64
|
97 |
int lma; /* long mode active */ |
98 |
int code64; /* 64 bit code segment */ |
99 |
int rex_x, rex_b;
|
100 |
#endif
|
101 |
int vex_l; /* vex vector length */ |
102 |
int vex_v; /* vex vvvv register, without 1's compliment. */ |
103 |
int ss32; /* 32 bit stack segment */ |
104 |
CCOp cc_op; /* current CC operation */
|
105 |
bool cc_op_dirty;
|
106 |
int addseg; /* non zero if either DS/ES/SS have a non zero base */ |
107 |
int f_st; /* currently unused */ |
108 |
int vm86; /* vm86 mode */ |
109 |
int cpl;
|
110 |
int iopl;
|
111 |
int tf; /* TF cpu flag */ |
112 |
int singlestep_enabled; /* "hardware" single step enabled */ |
113 |
int jmp_opt; /* use direct block chaining for direct jumps */ |
114 |
int mem_index; /* select memory access functions */ |
115 |
uint64_t flags; /* all execution flags */
|
116 |
struct TranslationBlock *tb;
|
117 |
int popl_esp_hack; /* for correct popl with esp base handling */ |
118 |
int rip_offset; /* only used in x86_64, but left for simplicity */ |
119 |
int cpuid_features;
|
120 |
int cpuid_ext_features;
|
121 |
int cpuid_ext2_features;
|
122 |
int cpuid_ext3_features;
|
123 |
int cpuid_7_0_ebx_features;
|
124 |
} DisasContext; |
125 |
|
126 |
static void gen_eob(DisasContext *s); |
127 |
static void gen_jmp(DisasContext *s, target_ulong eip); |
128 |
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); |
129 |
static void gen_op(DisasContext *s1, int op, int ot, int d); |
130 |
|
131 |
/* i386 arith/logic operations */
|
132 |
enum {
|
133 |
OP_ADDL, |
134 |
OP_ORL, |
135 |
OP_ADCL, |
136 |
OP_SBBL, |
137 |
OP_ANDL, |
138 |
OP_SUBL, |
139 |
OP_XORL, |
140 |
OP_CMPL, |
141 |
}; |
142 |
|
143 |
/* i386 shift ops */
|
144 |
enum {
|
145 |
OP_ROL, |
146 |
OP_ROR, |
147 |
OP_RCL, |
148 |
OP_RCR, |
149 |
OP_SHL, |
150 |
OP_SHR, |
151 |
OP_SHL1, /* undocumented */
|
152 |
OP_SAR = 7,
|
153 |
}; |
154 |
|
155 |
enum {
|
156 |
JCC_O, |
157 |
JCC_B, |
158 |
JCC_Z, |
159 |
JCC_BE, |
160 |
JCC_S, |
161 |
JCC_P, |
162 |
JCC_L, |
163 |
JCC_LE, |
164 |
}; |
165 |
|
166 |
/* operand size */
|
167 |
enum {
|
168 |
OT_BYTE = 0,
|
169 |
OT_WORD, |
170 |
OT_LONG, |
171 |
OT_QUAD, |
172 |
}; |
173 |
|
174 |
enum {
|
175 |
/* I386 int registers */
|
176 |
OR_EAX, /* MUST be even numbered */
|
177 |
OR_ECX, |
178 |
OR_EDX, |
179 |
OR_EBX, |
180 |
OR_ESP, |
181 |
OR_EBP, |
182 |
OR_ESI, |
183 |
OR_EDI, |
184 |
|
185 |
OR_TMP0 = 16, /* temporary operand register */ |
186 |
OR_TMP1, |
187 |
OR_A0, /* temporary register used when doing address evaluation */
|
188 |
}; |
189 |
|
190 |
enum {
|
191 |
USES_CC_DST = 1,
|
192 |
USES_CC_SRC = 2,
|
193 |
USES_CC_SRC2 = 4,
|
194 |
USES_CC_SRCT = 8,
|
195 |
}; |
196 |
|
197 |
/* Bit set if the global variable is live after setting CC_OP to X. */
|
198 |
static const uint8_t cc_op_live[CC_OP_NB] = { |
199 |
[CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
200 |
[CC_OP_EFLAGS] = USES_CC_SRC, |
201 |
[CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, |
202 |
[CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, |
203 |
[CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
204 |
[CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, |
205 |
[CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
206 |
[CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, |
207 |
[CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, |
208 |
[CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, |
209 |
[CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, |
210 |
[CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, |
211 |
[CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, |
212 |
[CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, |
213 |
[CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, |
214 |
[CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
215 |
[CC_OP_CLR] = 0,
|
216 |
}; |
217 |
|
218 |
static void set_cc_op(DisasContext *s, CCOp op) |
219 |
{ |
220 |
int dead;
|
221 |
|
222 |
if (s->cc_op == op) {
|
223 |
return;
|
224 |
} |
225 |
|
226 |
/* Discard CC computation that will no longer be used. */
|
227 |
dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; |
228 |
if (dead & USES_CC_DST) {
|
229 |
tcg_gen_discard_tl(cpu_cc_dst); |
230 |
} |
231 |
if (dead & USES_CC_SRC) {
|
232 |
tcg_gen_discard_tl(cpu_cc_src); |
233 |
} |
234 |
if (dead & USES_CC_SRC2) {
|
235 |
tcg_gen_discard_tl(cpu_cc_src2); |
236 |
} |
237 |
if (dead & USES_CC_SRCT) {
|
238 |
tcg_gen_discard_tl(cpu_cc_srcT); |
239 |
} |
240 |
|
241 |
if (op == CC_OP_DYNAMIC) {
|
242 |
/* The DYNAMIC setting is translator only, and should never be
|
243 |
stored. Thus we always consider it clean. */
|
244 |
s->cc_op_dirty = false;
|
245 |
} else {
|
246 |
/* Discard any computed CC_OP value (see shifts). */
|
247 |
if (s->cc_op == CC_OP_DYNAMIC) {
|
248 |
tcg_gen_discard_i32(cpu_cc_op); |
249 |
} |
250 |
s->cc_op_dirty = true;
|
251 |
} |
252 |
s->cc_op = op; |
253 |
} |
254 |
|
255 |
static void gen_update_cc_op(DisasContext *s) |
256 |
{ |
257 |
if (s->cc_op_dirty) {
|
258 |
tcg_gen_movi_i32(cpu_cc_op, s->cc_op); |
259 |
s->cc_op_dirty = false;
|
260 |
} |
261 |
} |
262 |
|
263 |
static inline void gen_op_movl_T0_0(void) |
264 |
{ |
265 |
tcg_gen_movi_tl(cpu_T[0], 0); |
266 |
} |
267 |
|
268 |
static inline void gen_op_movl_T0_im(int32_t val) |
269 |
{ |
270 |
tcg_gen_movi_tl(cpu_T[0], val);
|
271 |
} |
272 |
|
273 |
static inline void gen_op_movl_T0_imu(uint32_t val) |
274 |
{ |
275 |
tcg_gen_movi_tl(cpu_T[0], val);
|
276 |
} |
277 |
|
278 |
static inline void gen_op_movl_T1_im(int32_t val) |
279 |
{ |
280 |
tcg_gen_movi_tl(cpu_T[1], val);
|
281 |
} |
282 |
|
283 |
static inline void gen_op_movl_T1_imu(uint32_t val) |
284 |
{ |
285 |
tcg_gen_movi_tl(cpu_T[1], val);
|
286 |
} |
287 |
|
288 |
static inline void gen_op_movl_A0_im(uint32_t val) |
289 |
{ |
290 |
tcg_gen_movi_tl(cpu_A0, val); |
291 |
} |
292 |
|
293 |
#ifdef TARGET_X86_64
|
294 |
static inline void gen_op_movq_A0_im(int64_t val) |
295 |
{ |
296 |
tcg_gen_movi_tl(cpu_A0, val); |
297 |
} |
298 |
#endif
|
299 |
|
300 |
static inline void gen_movtl_T0_im(target_ulong val) |
301 |
{ |
302 |
tcg_gen_movi_tl(cpu_T[0], val);
|
303 |
} |
304 |
|
305 |
static inline void gen_movtl_T1_im(target_ulong val) |
306 |
{ |
307 |
tcg_gen_movi_tl(cpu_T[1], val);
|
308 |
} |
309 |
|
310 |
static inline void gen_op_andl_T0_ffff(void) |
311 |
{ |
312 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); |
313 |
} |
314 |
|
315 |
static inline void gen_op_andl_T0_im(uint32_t val) |
316 |
{ |
317 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val); |
318 |
} |
319 |
|
320 |
static inline void gen_op_movl_T0_T1(void) |
321 |
{ |
322 |
tcg_gen_mov_tl(cpu_T[0], cpu_T[1]); |
323 |
} |
324 |
|
325 |
static inline void gen_op_andl_A0_ffff(void) |
326 |
{ |
327 |
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
|
328 |
} |
329 |
|
330 |
#ifdef TARGET_X86_64
|
331 |
|
332 |
#define NB_OP_SIZES 4 |
333 |
|
334 |
#else /* !TARGET_X86_64 */ |
335 |
|
336 |
#define NB_OP_SIZES 3 |
337 |
|
338 |
#endif /* !TARGET_X86_64 */ |
339 |
|
340 |
#if defined(HOST_WORDS_BIGENDIAN)
|
341 |
#define REG_B_OFFSET (sizeof(target_ulong) - 1) |
342 |
#define REG_H_OFFSET (sizeof(target_ulong) - 2) |
343 |
#define REG_W_OFFSET (sizeof(target_ulong) - 2) |
344 |
#define REG_L_OFFSET (sizeof(target_ulong) - 4) |
345 |
#define REG_LH_OFFSET (sizeof(target_ulong) - 8) |
346 |
#else
|
347 |
#define REG_B_OFFSET 0 |
348 |
#define REG_H_OFFSET 1 |
349 |
#define REG_W_OFFSET 0 |
350 |
#define REG_L_OFFSET 0 |
351 |
#define REG_LH_OFFSET 4 |
352 |
#endif
|
353 |
|
354 |
/* In instruction encodings for byte register accesses the
|
355 |
* register number usually indicates "low 8 bits of register N";
|
356 |
* however there are some special cases where N 4..7 indicates
|
357 |
* [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
|
358 |
* true for this special case, false otherwise.
|
359 |
*/
|
360 |
static inline bool byte_reg_is_xH(int reg) |
361 |
{ |
362 |
if (reg < 4) { |
363 |
return false; |
364 |
} |
365 |
#ifdef TARGET_X86_64
|
366 |
if (reg >= 8 || x86_64_hregs) { |
367 |
return false; |
368 |
} |
369 |
#endif
|
370 |
return true; |
371 |
} |
372 |
|
373 |
static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0) |
374 |
{ |
375 |
switch(ot) {
|
376 |
case OT_BYTE:
|
377 |
if (!byte_reg_is_xH(reg)) {
|
378 |
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); |
379 |
} else {
|
380 |
tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); |
381 |
} |
382 |
break;
|
383 |
case OT_WORD:
|
384 |
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16); |
385 |
break;
|
386 |
default: /* XXX this shouldn't be reached; abort? */ |
387 |
case OT_LONG:
|
388 |
/* For x86_64, this sets the higher half of register to zero.
|
389 |
For i386, this is equivalent to a mov. */
|
390 |
tcg_gen_ext32u_tl(cpu_regs[reg], t0); |
391 |
break;
|
392 |
#ifdef TARGET_X86_64
|
393 |
case OT_QUAD:
|
394 |
tcg_gen_mov_tl(cpu_regs[reg], t0); |
395 |
break;
|
396 |
#endif
|
397 |
} |
398 |
} |
399 |
|
400 |
static inline void gen_op_mov_reg_T0(int ot, int reg) |
401 |
{ |
402 |
gen_op_mov_reg_v(ot, reg, cpu_T[0]);
|
403 |
} |
404 |
|
405 |
static inline void gen_op_mov_reg_T1(int ot, int reg) |
406 |
{ |
407 |
gen_op_mov_reg_v(ot, reg, cpu_T[1]);
|
408 |
} |
409 |
|
410 |
static inline void gen_op_mov_reg_A0(int size, int reg) |
411 |
{ |
412 |
switch(size) {
|
413 |
case OT_BYTE:
|
414 |
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16); |
415 |
break;
|
416 |
default: /* XXX this shouldn't be reached; abort? */ |
417 |
case OT_WORD:
|
418 |
/* For x86_64, this sets the higher half of register to zero.
|
419 |
For i386, this is equivalent to a mov. */
|
420 |
tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0); |
421 |
break;
|
422 |
#ifdef TARGET_X86_64
|
423 |
case OT_LONG:
|
424 |
tcg_gen_mov_tl(cpu_regs[reg], cpu_A0); |
425 |
break;
|
426 |
#endif
|
427 |
} |
428 |
} |
429 |
|
430 |
static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg) |
431 |
{ |
432 |
if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
|
433 |
tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); |
434 |
tcg_gen_ext8u_tl(t0, t0); |
435 |
} else {
|
436 |
tcg_gen_mov_tl(t0, cpu_regs[reg]); |
437 |
} |
438 |
} |
439 |
|
440 |
static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg) |
441 |
{ |
442 |
gen_op_mov_v_reg(ot, cpu_T[t_index], reg); |
443 |
} |
444 |
|
445 |
static inline void gen_op_movl_A0_reg(int reg) |
446 |
{ |
447 |
tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]); |
448 |
} |
449 |
|
450 |
static inline void gen_op_addl_A0_im(int32_t val) |
451 |
{ |
452 |
tcg_gen_addi_tl(cpu_A0, cpu_A0, val); |
453 |
#ifdef TARGET_X86_64
|
454 |
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
|
455 |
#endif
|
456 |
} |
457 |
|
458 |
#ifdef TARGET_X86_64
|
459 |
static inline void gen_op_addq_A0_im(int64_t val) |
460 |
{ |
461 |
tcg_gen_addi_tl(cpu_A0, cpu_A0, val); |
462 |
} |
463 |
#endif
|
464 |
|
465 |
static void gen_add_A0_im(DisasContext *s, int val) |
466 |
{ |
467 |
#ifdef TARGET_X86_64
|
468 |
if (CODE64(s))
|
469 |
gen_op_addq_A0_im(val); |
470 |
else
|
471 |
#endif
|
472 |
gen_op_addl_A0_im(val); |
473 |
} |
474 |
|
475 |
static inline void gen_op_addl_T0_T1(void) |
476 |
{ |
477 |
tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
478 |
} |
479 |
|
480 |
static inline void gen_op_jmp_T0(void) |
481 |
{ |
482 |
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
|
483 |
} |
484 |
|
485 |
static inline void gen_op_add_reg_im(int size, int reg, int32_t val) |
486 |
{ |
487 |
switch(size) {
|
488 |
case OT_BYTE:
|
489 |
tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val); |
490 |
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16); |
491 |
break;
|
492 |
case OT_WORD:
|
493 |
tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val); |
494 |
/* For x86_64, this sets the higher half of register to zero.
|
495 |
For i386, this is equivalent to a nop. */
|
496 |
tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0); |
497 |
tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0); |
498 |
break;
|
499 |
#ifdef TARGET_X86_64
|
500 |
case OT_LONG:
|
501 |
tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val); |
502 |
break;
|
503 |
#endif
|
504 |
} |
505 |
} |
506 |
|
507 |
static inline void gen_op_add_reg_T0(int size, int reg) |
508 |
{ |
509 |
switch(size) {
|
510 |
case OT_BYTE:
|
511 |
tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
|
512 |
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16); |
513 |
break;
|
514 |
case OT_WORD:
|
515 |
tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
|
516 |
/* For x86_64, this sets the higher half of register to zero.
|
517 |
For i386, this is equivalent to a nop. */
|
518 |
tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0); |
519 |
tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0); |
520 |
break;
|
521 |
#ifdef TARGET_X86_64
|
522 |
case OT_LONG:
|
523 |
tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
|
524 |
break;
|
525 |
#endif
|
526 |
} |
527 |
} |
528 |
|
529 |
static inline void gen_op_addl_A0_reg_sN(int shift, int reg) |
530 |
{ |
531 |
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]); |
532 |
if (shift != 0) |
533 |
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); |
534 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
535 |
/* For x86_64, this sets the higher half of register to zero.
|
536 |
For i386, this is equivalent to a nop. */
|
537 |
tcg_gen_ext32u_tl(cpu_A0, cpu_A0); |
538 |
} |
539 |
|
540 |
static inline void gen_op_movl_A0_seg(int reg) |
541 |
{ |
542 |
tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET); |
543 |
} |
544 |
|
545 |
static inline void gen_op_addl_A0_seg(DisasContext *s, int reg) |
546 |
{ |
547 |
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)); |
548 |
#ifdef TARGET_X86_64
|
549 |
if (CODE64(s)) {
|
550 |
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
|
551 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
552 |
} else {
|
553 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
554 |
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
|
555 |
} |
556 |
#else
|
557 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
558 |
#endif
|
559 |
} |
560 |
|
561 |
#ifdef TARGET_X86_64
|
562 |
static inline void gen_op_movq_A0_seg(int reg) |
563 |
{ |
564 |
tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base)); |
565 |
} |
566 |
|
567 |
static inline void gen_op_addq_A0_seg(int reg) |
568 |
{ |
569 |
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)); |
570 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
571 |
} |
572 |
|
573 |
static inline void gen_op_movq_A0_reg(int reg) |
574 |
{ |
575 |
tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]); |
576 |
} |
577 |
|
578 |
static inline void gen_op_addq_A0_reg_sN(int shift, int reg) |
579 |
{ |
580 |
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]); |
581 |
if (shift != 0) |
582 |
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); |
583 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
584 |
} |
585 |
#endif
|
586 |
|
587 |
static inline void gen_op_lds_T0_A0(int idx) |
588 |
{ |
589 |
int mem_index = (idx >> 2) - 1; |
590 |
switch(idx & 3) { |
591 |
case OT_BYTE:
|
592 |
tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
|
593 |
break;
|
594 |
case OT_WORD:
|
595 |
tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
|
596 |
break;
|
597 |
default:
|
598 |
case OT_LONG:
|
599 |
tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
|
600 |
break;
|
601 |
} |
602 |
} |
603 |
|
604 |
static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0) |
605 |
{ |
606 |
int mem_index = (idx >> 2) - 1; |
607 |
switch(idx & 3) { |
608 |
case OT_BYTE:
|
609 |
tcg_gen_qemu_ld8u(t0, a0, mem_index); |
610 |
break;
|
611 |
case OT_WORD:
|
612 |
tcg_gen_qemu_ld16u(t0, a0, mem_index); |
613 |
break;
|
614 |
case OT_LONG:
|
615 |
tcg_gen_qemu_ld32u(t0, a0, mem_index); |
616 |
break;
|
617 |
default:
|
618 |
case OT_QUAD:
|
619 |
/* Should never happen on 32-bit targets. */
|
620 |
#ifdef TARGET_X86_64
|
621 |
tcg_gen_qemu_ld64(t0, a0, mem_index); |
622 |
#endif
|
623 |
break;
|
624 |
} |
625 |
} |
626 |
|
627 |
/* XXX: always use ldu or lds */
|
628 |
static inline void gen_op_ld_T0_A0(int idx) |
629 |
{ |
630 |
gen_op_ld_v(idx, cpu_T[0], cpu_A0);
|
631 |
} |
632 |
|
633 |
static inline void gen_op_ldu_T0_A0(int idx) |
634 |
{ |
635 |
gen_op_ld_v(idx, cpu_T[0], cpu_A0);
|
636 |
} |
637 |
|
638 |
static inline void gen_op_ld_T1_A0(int idx) |
639 |
{ |
640 |
gen_op_ld_v(idx, cpu_T[1], cpu_A0);
|
641 |
} |
642 |
|
643 |
static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0) |
644 |
{ |
645 |
int mem_index = (idx >> 2) - 1; |
646 |
switch(idx & 3) { |
647 |
case OT_BYTE:
|
648 |
tcg_gen_qemu_st8(t0, a0, mem_index); |
649 |
break;
|
650 |
case OT_WORD:
|
651 |
tcg_gen_qemu_st16(t0, a0, mem_index); |
652 |
break;
|
653 |
case OT_LONG:
|
654 |
tcg_gen_qemu_st32(t0, a0, mem_index); |
655 |
break;
|
656 |
default:
|
657 |
case OT_QUAD:
|
658 |
/* Should never happen on 32-bit targets. */
|
659 |
#ifdef TARGET_X86_64
|
660 |
tcg_gen_qemu_st64(t0, a0, mem_index); |
661 |
#endif
|
662 |
break;
|
663 |
} |
664 |
} |
665 |
|
666 |
static inline void gen_op_st_T0_A0(int idx) |
667 |
{ |
668 |
gen_op_st_v(idx, cpu_T[0], cpu_A0);
|
669 |
} |
670 |
|
671 |
static inline void gen_op_st_T1_A0(int idx) |
672 |
{ |
673 |
gen_op_st_v(idx, cpu_T[1], cpu_A0);
|
674 |
} |
675 |
|
676 |
static inline void gen_jmp_im(target_ulong pc) |
677 |
{ |
678 |
tcg_gen_movi_tl(cpu_tmp0, pc); |
679 |
tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip)); |
680 |
} |
681 |
|
682 |
static inline void gen_string_movl_A0_ESI(DisasContext *s) |
683 |
{ |
684 |
int override;
|
685 |
|
686 |
override = s->override; |
687 |
#ifdef TARGET_X86_64
|
688 |
if (s->aflag == 2) { |
689 |
if (override >= 0) { |
690 |
gen_op_movq_A0_seg(override); |
691 |
gen_op_addq_A0_reg_sN(0, R_ESI);
|
692 |
} else {
|
693 |
gen_op_movq_A0_reg(R_ESI); |
694 |
} |
695 |
} else
|
696 |
#endif
|
697 |
if (s->aflag) {
|
698 |
/* 32 bit address */
|
699 |
if (s->addseg && override < 0) |
700 |
override = R_DS; |
701 |
if (override >= 0) { |
702 |
gen_op_movl_A0_seg(override); |
703 |
gen_op_addl_A0_reg_sN(0, R_ESI);
|
704 |
} else {
|
705 |
gen_op_movl_A0_reg(R_ESI); |
706 |
} |
707 |
} else {
|
708 |
/* 16 address, always override */
|
709 |
if (override < 0) |
710 |
override = R_DS; |
711 |
gen_op_movl_A0_reg(R_ESI); |
712 |
gen_op_andl_A0_ffff(); |
713 |
gen_op_addl_A0_seg(s, override); |
714 |
} |
715 |
} |
716 |
|
717 |
static inline void gen_string_movl_A0_EDI(DisasContext *s) |
718 |
{ |
719 |
#ifdef TARGET_X86_64
|
720 |
if (s->aflag == 2) { |
721 |
gen_op_movq_A0_reg(R_EDI); |
722 |
} else
|
723 |
#endif
|
724 |
if (s->aflag) {
|
725 |
if (s->addseg) {
|
726 |
gen_op_movl_A0_seg(R_ES); |
727 |
gen_op_addl_A0_reg_sN(0, R_EDI);
|
728 |
} else {
|
729 |
gen_op_movl_A0_reg(R_EDI); |
730 |
} |
731 |
} else {
|
732 |
gen_op_movl_A0_reg(R_EDI); |
733 |
gen_op_andl_A0_ffff(); |
734 |
gen_op_addl_A0_seg(s, R_ES); |
735 |
} |
736 |
} |
737 |
|
738 |
static inline void gen_op_movl_T0_Dshift(int ot) |
739 |
{ |
740 |
tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
|
741 |
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot); |
742 |
}; |
743 |
|
744 |
static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign) |
745 |
{ |
746 |
switch (size) {
|
747 |
case OT_BYTE:
|
748 |
if (sign) {
|
749 |
tcg_gen_ext8s_tl(dst, src); |
750 |
} else {
|
751 |
tcg_gen_ext8u_tl(dst, src); |
752 |
} |
753 |
return dst;
|
754 |
case OT_WORD:
|
755 |
if (sign) {
|
756 |
tcg_gen_ext16s_tl(dst, src); |
757 |
} else {
|
758 |
tcg_gen_ext16u_tl(dst, src); |
759 |
} |
760 |
return dst;
|
761 |
#ifdef TARGET_X86_64
|
762 |
case OT_LONG:
|
763 |
if (sign) {
|
764 |
tcg_gen_ext32s_tl(dst, src); |
765 |
} else {
|
766 |
tcg_gen_ext32u_tl(dst, src); |
767 |
} |
768 |
return dst;
|
769 |
#endif
|
770 |
default:
|
771 |
return src;
|
772 |
} |
773 |
} |
774 |
|
775 |
static void gen_extu(int ot, TCGv reg) |
776 |
{ |
777 |
gen_ext_tl(reg, reg, ot, false);
|
778 |
} |
779 |
|
780 |
static void gen_exts(int ot, TCGv reg) |
781 |
{ |
782 |
gen_ext_tl(reg, reg, ot, true);
|
783 |
} |
784 |
|
785 |
static inline void gen_op_jnz_ecx(int size, int label1) |
786 |
{ |
787 |
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); |
788 |
gen_extu(size + 1, cpu_tmp0);
|
789 |
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
|
790 |
} |
791 |
|
792 |
static inline void gen_op_jz_ecx(int size, int label1) |
793 |
{ |
794 |
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); |
795 |
gen_extu(size + 1, cpu_tmp0);
|
796 |
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
|
797 |
} |
798 |
|
799 |
static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n) |
800 |
{ |
801 |
switch (ot) {
|
802 |
case OT_BYTE:
|
803 |
gen_helper_inb(v, n); |
804 |
break;
|
805 |
case OT_WORD:
|
806 |
gen_helper_inw(v, n); |
807 |
break;
|
808 |
case OT_LONG:
|
809 |
gen_helper_inl(v, n); |
810 |
break;
|
811 |
} |
812 |
} |
813 |
|
814 |
static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n) |
815 |
{ |
816 |
switch (ot) {
|
817 |
case OT_BYTE:
|
818 |
gen_helper_outb(v, n); |
819 |
break;
|
820 |
case OT_WORD:
|
821 |
gen_helper_outw(v, n); |
822 |
break;
|
823 |
case OT_LONG:
|
824 |
gen_helper_outl(v, n); |
825 |
break;
|
826 |
} |
827 |
} |
828 |
|
829 |
static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip, |
830 |
uint32_t svm_flags) |
831 |
{ |
832 |
int state_saved;
|
833 |
target_ulong next_eip; |
834 |
|
835 |
state_saved = 0;
|
836 |
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
|
837 |
gen_update_cc_op(s); |
838 |
gen_jmp_im(cur_eip); |
839 |
state_saved = 1;
|
840 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
841 |
switch (ot) {
|
842 |
case OT_BYTE:
|
843 |
gen_helper_check_iob(cpu_env, cpu_tmp2_i32); |
844 |
break;
|
845 |
case OT_WORD:
|
846 |
gen_helper_check_iow(cpu_env, cpu_tmp2_i32); |
847 |
break;
|
848 |
case OT_LONG:
|
849 |
gen_helper_check_iol(cpu_env, cpu_tmp2_i32); |
850 |
break;
|
851 |
} |
852 |
} |
853 |
if(s->flags & HF_SVMI_MASK) {
|
854 |
if (!state_saved) {
|
855 |
gen_update_cc_op(s); |
856 |
gen_jmp_im(cur_eip); |
857 |
} |
858 |
svm_flags |= (1 << (4 + ot)); |
859 |
next_eip = s->pc - s->cs_base; |
860 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
861 |
gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32, |
862 |
tcg_const_i32(svm_flags), |
863 |
tcg_const_i32(next_eip - cur_eip)); |
864 |
} |
865 |
} |
866 |
|
867 |
static inline void gen_movs(DisasContext *s, int ot) |
868 |
{ |
869 |
gen_string_movl_A0_ESI(s); |
870 |
gen_op_ld_T0_A0(ot + s->mem_index); |
871 |
gen_string_movl_A0_EDI(s); |
872 |
gen_op_st_T0_A0(ot + s->mem_index); |
873 |
gen_op_movl_T0_Dshift(ot); |
874 |
gen_op_add_reg_T0(s->aflag, R_ESI); |
875 |
gen_op_add_reg_T0(s->aflag, R_EDI); |
876 |
} |
877 |
|
878 |
static void gen_op_update1_cc(void) |
879 |
{ |
880 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
881 |
} |
882 |
|
883 |
static void gen_op_update2_cc(void) |
884 |
{ |
885 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
|
886 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
887 |
} |
888 |
|
889 |
static void gen_op_update3_cc(TCGv reg) |
890 |
{ |
891 |
tcg_gen_mov_tl(cpu_cc_src2, reg); |
892 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
|
893 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
894 |
} |
895 |
|
896 |
static inline void gen_op_testl_T0_T1_cc(void) |
897 |
{ |
898 |
tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]); |
899 |
} |
900 |
|
901 |
static void gen_op_update_neg_cc(void) |
902 |
{ |
903 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
904 |
tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
|
905 |
tcg_gen_movi_tl(cpu_cc_srcT, 0);
|
906 |
} |
907 |
|
908 |
/* compute all eflags to cc_src */
|
909 |
static void gen_compute_eflags(DisasContext *s) |
910 |
{ |
911 |
TCGv zero, dst, src1, src2; |
912 |
int live, dead;
|
913 |
|
914 |
if (s->cc_op == CC_OP_EFLAGS) {
|
915 |
return;
|
916 |
} |
917 |
if (s->cc_op == CC_OP_CLR) {
|
918 |
tcg_gen_movi_tl(cpu_cc_src, CC_Z); |
919 |
set_cc_op(s, CC_OP_EFLAGS); |
920 |
return;
|
921 |
} |
922 |
|
923 |
TCGV_UNUSED(zero); |
924 |
dst = cpu_cc_dst; |
925 |
src1 = cpu_cc_src; |
926 |
src2 = cpu_cc_src2; |
927 |
|
928 |
/* Take care to not read values that are not live. */
|
929 |
live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; |
930 |
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); |
931 |
if (dead) {
|
932 |
zero = tcg_const_tl(0);
|
933 |
if (dead & USES_CC_DST) {
|
934 |
dst = zero; |
935 |
} |
936 |
if (dead & USES_CC_SRC) {
|
937 |
src1 = zero; |
938 |
} |
939 |
if (dead & USES_CC_SRC2) {
|
940 |
src2 = zero; |
941 |
} |
942 |
} |
943 |
|
944 |
gen_update_cc_op(s); |
945 |
gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); |
946 |
set_cc_op(s, CC_OP_EFLAGS); |
947 |
|
948 |
if (dead) {
|
949 |
tcg_temp_free(zero); |
950 |
} |
951 |
} |
952 |
|
953 |
typedef struct CCPrepare { |
954 |
TCGCond cond; |
955 |
TCGv reg; |
956 |
TCGv reg2; |
957 |
target_ulong imm; |
958 |
target_ulong mask; |
959 |
bool use_reg2;
|
960 |
bool no_setcond;
|
961 |
} CCPrepare; |
962 |
|
963 |
/* compute eflags.C to reg */
|
964 |
static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
|
965 |
{ |
966 |
TCGv t0, t1; |
967 |
int size, shift;
|
968 |
|
969 |
switch (s->cc_op) {
|
970 |
case CC_OP_SUBB ... CC_OP_SUBQ:
|
971 |
/* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
|
972 |
size = s->cc_op - CC_OP_SUBB; |
973 |
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
|
974 |
/* If no temporary was used, be careful not to alias t1 and t0. */
|
975 |
t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg; |
976 |
tcg_gen_mov_tl(t0, cpu_cc_srcT); |
977 |
gen_extu(size, t0); |
978 |
goto add_sub;
|
979 |
|
980 |
case CC_OP_ADDB ... CC_OP_ADDQ:
|
981 |
/* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
|
982 |
size = s->cc_op - CC_OP_ADDB; |
983 |
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
|
984 |
t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
|
985 |
add_sub:
|
986 |
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
|
987 |
.reg2 = t1, .mask = -1, .use_reg2 = true }; |
988 |
|
989 |
case CC_OP_LOGICB ... CC_OP_LOGICQ:
|
990 |
case CC_OP_CLR:
|
991 |
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
992 |
|
993 |
case CC_OP_INCB ... CC_OP_INCQ:
|
994 |
case CC_OP_DECB ... CC_OP_DECQ:
|
995 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
996 |
.mask = -1, .no_setcond = true }; |
997 |
|
998 |
case CC_OP_SHLB ... CC_OP_SHLQ:
|
999 |
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
|
1000 |
size = s->cc_op - CC_OP_SHLB; |
1001 |
shift = (8 << size) - 1; |
1002 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
1003 |
.mask = (target_ulong)1 << shift };
|
1004 |
|
1005 |
case CC_OP_MULB ... CC_OP_MULQ:
|
1006 |
return (CCPrepare) { .cond = TCG_COND_NE,
|
1007 |
.reg = cpu_cc_src, .mask = -1 };
|
1008 |
|
1009 |
case CC_OP_BMILGB ... CC_OP_BMILGQ:
|
1010 |
size = s->cc_op - CC_OP_BMILGB; |
1011 |
t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
|
1012 |
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; |
1013 |
|
1014 |
case CC_OP_ADCX:
|
1015 |
case CC_OP_ADCOX:
|
1016 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
|
1017 |
.mask = -1, .no_setcond = true }; |
1018 |
|
1019 |
case CC_OP_EFLAGS:
|
1020 |
case CC_OP_SARB ... CC_OP_SARQ:
|
1021 |
/* CC_SRC & 1 */
|
1022 |
return (CCPrepare) { .cond = TCG_COND_NE,
|
1023 |
.reg = cpu_cc_src, .mask = CC_C }; |
1024 |
|
1025 |
default:
|
1026 |
/* The need to compute only C from CC_OP_DYNAMIC is important
|
1027 |
in efficiently implementing e.g. INC at the start of a TB. */
|
1028 |
gen_update_cc_op(s); |
1029 |
gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src, |
1030 |
cpu_cc_src2, cpu_cc_op); |
1031 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
|
1032 |
.mask = -1, .no_setcond = true }; |
1033 |
} |
1034 |
} |
1035 |
|
1036 |
/* compute eflags.P to reg */
|
1037 |
static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
|
1038 |
{ |
1039 |
gen_compute_eflags(s); |
1040 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
1041 |
.mask = CC_P }; |
1042 |
} |
1043 |
|
1044 |
/* compute eflags.S to reg */
|
1045 |
static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
|
1046 |
{ |
1047 |
switch (s->cc_op) {
|
1048 |
case CC_OP_DYNAMIC:
|
1049 |
gen_compute_eflags(s); |
1050 |
/* FALLTHRU */
|
1051 |
case CC_OP_EFLAGS:
|
1052 |
case CC_OP_ADCX:
|
1053 |
case CC_OP_ADOX:
|
1054 |
case CC_OP_ADCOX:
|
1055 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
1056 |
.mask = CC_S }; |
1057 |
case CC_OP_CLR:
|
1058 |
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
1059 |
default:
|
1060 |
{ |
1061 |
int size = (s->cc_op - CC_OP_ADDB) & 3; |
1062 |
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
|
1063 |
return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; |
1064 |
} |
1065 |
} |
1066 |
} |
1067 |
|
1068 |
/* compute eflags.O to reg */
|
1069 |
static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
|
1070 |
{ |
1071 |
switch (s->cc_op) {
|
1072 |
case CC_OP_ADOX:
|
1073 |
case CC_OP_ADCOX:
|
1074 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
|
1075 |
.mask = -1, .no_setcond = true }; |
1076 |
case CC_OP_CLR:
|
1077 |
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
1078 |
default:
|
1079 |
gen_compute_eflags(s); |
1080 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
1081 |
.mask = CC_O }; |
1082 |
} |
1083 |
} |
1084 |
|
1085 |
/* compute eflags.Z to reg */
|
1086 |
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
|
1087 |
{ |
1088 |
switch (s->cc_op) {
|
1089 |
case CC_OP_DYNAMIC:
|
1090 |
gen_compute_eflags(s); |
1091 |
/* FALLTHRU */
|
1092 |
case CC_OP_EFLAGS:
|
1093 |
case CC_OP_ADCX:
|
1094 |
case CC_OP_ADOX:
|
1095 |
case CC_OP_ADCOX:
|
1096 |
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
1097 |
.mask = CC_Z }; |
1098 |
case CC_OP_CLR:
|
1099 |
return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; |
1100 |
default:
|
1101 |
{ |
1102 |
int size = (s->cc_op - CC_OP_ADDB) & 3; |
1103 |
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
|
1104 |
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; |
1105 |
} |
1106 |
} |
1107 |
} |
1108 |
|
1109 |
/* perform a conditional store into register 'reg' according to jump opcode
|
1110 |
value 'b'. In the fast case, T0 is guaranted not to be used. */
|
1111 |
static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) |
1112 |
{ |
1113 |
int inv, jcc_op, size, cond;
|
1114 |
CCPrepare cc; |
1115 |
TCGv t0; |
1116 |
|
1117 |
inv = b & 1;
|
1118 |
jcc_op = (b >> 1) & 7; |
1119 |
|
1120 |
switch (s->cc_op) {
|
1121 |
case CC_OP_SUBB ... CC_OP_SUBQ:
|
1122 |
/* We optimize relational operators for the cmp/jcc case. */
|
1123 |
size = s->cc_op - CC_OP_SUBB; |
1124 |
switch (jcc_op) {
|
1125 |
case JCC_BE:
|
1126 |
tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT); |
1127 |
gen_extu(size, cpu_tmp4); |
1128 |
t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
|
1129 |
cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, |
1130 |
.reg2 = t0, .mask = -1, .use_reg2 = true }; |
1131 |
break;
|
1132 |
|
1133 |
case JCC_L:
|
1134 |
cond = TCG_COND_LT; |
1135 |
goto fast_jcc_l;
|
1136 |
case JCC_LE:
|
1137 |
cond = TCG_COND_LE; |
1138 |
fast_jcc_l:
|
1139 |
tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT); |
1140 |
gen_exts(size, cpu_tmp4); |
1141 |
t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
|
1142 |
cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, |
1143 |
.reg2 = t0, .mask = -1, .use_reg2 = true }; |
1144 |
break;
|
1145 |
|
1146 |
default:
|
1147 |
goto slow_jcc;
|
1148 |
} |
1149 |
break;
|
1150 |
|
1151 |
default:
|
1152 |
slow_jcc:
|
1153 |
/* This actually generates good code for JC, JZ and JS. */
|
1154 |
switch (jcc_op) {
|
1155 |
case JCC_O:
|
1156 |
cc = gen_prepare_eflags_o(s, reg); |
1157 |
break;
|
1158 |
case JCC_B:
|
1159 |
cc = gen_prepare_eflags_c(s, reg); |
1160 |
break;
|
1161 |
case JCC_Z:
|
1162 |
cc = gen_prepare_eflags_z(s, reg); |
1163 |
break;
|
1164 |
case JCC_BE:
|
1165 |
gen_compute_eflags(s); |
1166 |
cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
1167 |
.mask = CC_Z | CC_C }; |
1168 |
break;
|
1169 |
case JCC_S:
|
1170 |
cc = gen_prepare_eflags_s(s, reg); |
1171 |
break;
|
1172 |
case JCC_P:
|
1173 |
cc = gen_prepare_eflags_p(s, reg); |
1174 |
break;
|
1175 |
case JCC_L:
|
1176 |
gen_compute_eflags(s); |
1177 |
if (TCGV_EQUAL(reg, cpu_cc_src)) {
|
1178 |
reg = cpu_tmp0; |
1179 |
} |
1180 |
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ |
1181 |
tcg_gen_xor_tl(reg, reg, cpu_cc_src); |
1182 |
cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, |
1183 |
.mask = CC_S }; |
1184 |
break;
|
1185 |
default:
|
1186 |
case JCC_LE:
|
1187 |
gen_compute_eflags(s); |
1188 |
if (TCGV_EQUAL(reg, cpu_cc_src)) {
|
1189 |
reg = cpu_tmp0; |
1190 |
} |
1191 |
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ |
1192 |
tcg_gen_xor_tl(reg, reg, cpu_cc_src); |
1193 |
cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, |
1194 |
.mask = CC_S | CC_Z }; |
1195 |
break;
|
1196 |
} |
1197 |
break;
|
1198 |
} |
1199 |
|
1200 |
if (inv) {
|
1201 |
cc.cond = tcg_invert_cond(cc.cond); |
1202 |
} |
1203 |
return cc;
|
1204 |
} |
1205 |
|
1206 |
static void gen_setcc1(DisasContext *s, int b, TCGv reg) |
1207 |
{ |
1208 |
CCPrepare cc = gen_prepare_cc(s, b, reg); |
1209 |
|
1210 |
if (cc.no_setcond) {
|
1211 |
if (cc.cond == TCG_COND_EQ) {
|
1212 |
tcg_gen_xori_tl(reg, cc.reg, 1);
|
1213 |
} else {
|
1214 |
tcg_gen_mov_tl(reg, cc.reg); |
1215 |
} |
1216 |
return;
|
1217 |
} |
1218 |
|
1219 |
if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && |
1220 |
cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { |
1221 |
tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask)); |
1222 |
tcg_gen_andi_tl(reg, reg, 1);
|
1223 |
return;
|
1224 |
} |
1225 |
if (cc.mask != -1) { |
1226 |
tcg_gen_andi_tl(reg, cc.reg, cc.mask); |
1227 |
cc.reg = reg; |
1228 |
} |
1229 |
if (cc.use_reg2) {
|
1230 |
tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2); |
1231 |
} else {
|
1232 |
tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm); |
1233 |
} |
1234 |
} |
1235 |
|
1236 |
static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) |
1237 |
{ |
1238 |
gen_setcc1(s, JCC_B << 1, reg);
|
1239 |
} |
1240 |
|
1241 |
/* generate a conditional jump to label 'l1' according to jump opcode
|
1242 |
value 'b'. In the fast case, T0 is guaranted not to be used. */
|
1243 |
static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1) |
1244 |
{ |
1245 |
CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
|
1246 |
|
1247 |
if (cc.mask != -1) { |
1248 |
tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
|
1249 |
cc.reg = cpu_T[0];
|
1250 |
} |
1251 |
if (cc.use_reg2) {
|
1252 |
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); |
1253 |
} else {
|
1254 |
tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); |
1255 |
} |
1256 |
} |
1257 |
|
1258 |
/* Generate a conditional jump to label 'l1' according to jump opcode
|
1259 |
value 'b'. In the fast case, T0 is guaranted not to be used.
|
1260 |
A translation block must end soon. */
|
1261 |
static inline void gen_jcc1(DisasContext *s, int b, int l1) |
1262 |
{ |
1263 |
CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
|
1264 |
|
1265 |
gen_update_cc_op(s); |
1266 |
if (cc.mask != -1) { |
1267 |
tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
|
1268 |
cc.reg = cpu_T[0];
|
1269 |
} |
1270 |
set_cc_op(s, CC_OP_DYNAMIC); |
1271 |
if (cc.use_reg2) {
|
1272 |
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); |
1273 |
} else {
|
1274 |
tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); |
1275 |
} |
1276 |
} |
1277 |
|
1278 |
/* XXX: does not work with gdbstub "ice" single step - not a
|
1279 |
serious problem */
|
1280 |
static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) |
1281 |
{ |
1282 |
int l1, l2;
|
1283 |
|
1284 |
l1 = gen_new_label(); |
1285 |
l2 = gen_new_label(); |
1286 |
gen_op_jnz_ecx(s->aflag, l1); |
1287 |
gen_set_label(l2); |
1288 |
gen_jmp_tb(s, next_eip, 1);
|
1289 |
gen_set_label(l1); |
1290 |
return l2;
|
1291 |
} |
1292 |
|
1293 |
static inline void gen_stos(DisasContext *s, int ot) |
1294 |
{ |
1295 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
|
1296 |
gen_string_movl_A0_EDI(s); |
1297 |
gen_op_st_T0_A0(ot + s->mem_index); |
1298 |
gen_op_movl_T0_Dshift(ot); |
1299 |
gen_op_add_reg_T0(s->aflag, R_EDI); |
1300 |
} |
1301 |
|
1302 |
static inline void gen_lods(DisasContext *s, int ot) |
1303 |
{ |
1304 |
gen_string_movl_A0_ESI(s); |
1305 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1306 |
gen_op_mov_reg_T0(ot, R_EAX); |
1307 |
gen_op_movl_T0_Dshift(ot); |
1308 |
gen_op_add_reg_T0(s->aflag, R_ESI); |
1309 |
} |
1310 |
|
1311 |
static inline void gen_scas(DisasContext *s, int ot) |
1312 |
{ |
1313 |
gen_string_movl_A0_EDI(s); |
1314 |
gen_op_ld_T1_A0(ot + s->mem_index); |
1315 |
gen_op(s, OP_CMPL, ot, R_EAX); |
1316 |
gen_op_movl_T0_Dshift(ot); |
1317 |
gen_op_add_reg_T0(s->aflag, R_EDI); |
1318 |
} |
1319 |
|
1320 |
static inline void gen_cmps(DisasContext *s, int ot) |
1321 |
{ |
1322 |
gen_string_movl_A0_EDI(s); |
1323 |
gen_op_ld_T1_A0(ot + s->mem_index); |
1324 |
gen_string_movl_A0_ESI(s); |
1325 |
gen_op(s, OP_CMPL, ot, OR_TMP0); |
1326 |
gen_op_movl_T0_Dshift(ot); |
1327 |
gen_op_add_reg_T0(s->aflag, R_ESI); |
1328 |
gen_op_add_reg_T0(s->aflag, R_EDI); |
1329 |
} |
1330 |
|
1331 |
static inline void gen_ins(DisasContext *s, int ot) |
1332 |
{ |
1333 |
if (use_icount)
|
1334 |
gen_io_start(); |
1335 |
gen_string_movl_A0_EDI(s); |
1336 |
/* Note: we must do this dummy write first to be restartable in
|
1337 |
case of page fault. */
|
1338 |
gen_op_movl_T0_0(); |
1339 |
gen_op_st_T0_A0(ot + s->mem_index); |
1340 |
gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
|
1341 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
|
1342 |
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
|
1343 |
gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
|
1344 |
gen_op_st_T0_A0(ot + s->mem_index); |
1345 |
gen_op_movl_T0_Dshift(ot); |
1346 |
gen_op_add_reg_T0(s->aflag, R_EDI); |
1347 |
if (use_icount)
|
1348 |
gen_io_end(); |
1349 |
} |
1350 |
|
1351 |
static inline void gen_outs(DisasContext *s, int ot) |
1352 |
{ |
1353 |
if (use_icount)
|
1354 |
gen_io_start(); |
1355 |
gen_string_movl_A0_ESI(s); |
1356 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1357 |
|
1358 |
gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
|
1359 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
|
1360 |
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
|
1361 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
|
1362 |
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
1363 |
|
1364 |
gen_op_movl_T0_Dshift(ot); |
1365 |
gen_op_add_reg_T0(s->aflag, R_ESI); |
1366 |
if (use_icount)
|
1367 |
gen_io_end(); |
1368 |
} |
1369 |
|
1370 |
/* same method as Valgrind : we generate jumps to current or next
|
1371 |
instruction */
|
1372 |
#define GEN_REPZ(op) \
|
1373 |
static inline void gen_repz_ ## op(DisasContext *s, int ot, \ |
1374 |
target_ulong cur_eip, target_ulong next_eip) \ |
1375 |
{ \ |
1376 |
int l2;\
|
1377 |
gen_update_cc_op(s); \ |
1378 |
l2 = gen_jz_ecx_string(s, next_eip); \ |
1379 |
gen_ ## op(s, ot); \ |
1380 |
gen_op_add_reg_im(s->aflag, R_ECX, -1); \
|
1381 |
/* a loop would cause two single step exceptions if ECX = 1 \
|
1382 |
before rep string_insn */ \
|
1383 |
if (!s->jmp_opt) \
|
1384 |
gen_op_jz_ecx(s->aflag, l2); \ |
1385 |
gen_jmp(s, cur_eip); \ |
1386 |
} |
1387 |
|
1388 |
#define GEN_REPZ2(op) \
|
1389 |
static inline void gen_repz_ ## op(DisasContext *s, int ot, \ |
1390 |
target_ulong cur_eip, \ |
1391 |
target_ulong next_eip, \ |
1392 |
int nz) \
|
1393 |
{ \ |
1394 |
int l2;\
|
1395 |
gen_update_cc_op(s); \ |
1396 |
l2 = gen_jz_ecx_string(s, next_eip); \ |
1397 |
gen_ ## op(s, ot); \ |
1398 |
gen_op_add_reg_im(s->aflag, R_ECX, -1); \
|
1399 |
gen_update_cc_op(s); \ |
1400 |
gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ |
1401 |
if (!s->jmp_opt) \
|
1402 |
gen_op_jz_ecx(s->aflag, l2); \ |
1403 |
gen_jmp(s, cur_eip); \ |
1404 |
} |
1405 |
|
1406 |
GEN_REPZ(movs) |
1407 |
GEN_REPZ(stos) |
1408 |
GEN_REPZ(lods) |
1409 |
GEN_REPZ(ins) |
1410 |
GEN_REPZ(outs) |
1411 |
GEN_REPZ2(scas) |
1412 |
GEN_REPZ2(cmps) |
1413 |
|
1414 |
static void gen_helper_fp_arith_ST0_FT0(int op) |
1415 |
{ |
1416 |
switch (op) {
|
1417 |
case 0: |
1418 |
gen_helper_fadd_ST0_FT0(cpu_env); |
1419 |
break;
|
1420 |
case 1: |
1421 |
gen_helper_fmul_ST0_FT0(cpu_env); |
1422 |
break;
|
1423 |
case 2: |
1424 |
gen_helper_fcom_ST0_FT0(cpu_env); |
1425 |
break;
|
1426 |
case 3: |
1427 |
gen_helper_fcom_ST0_FT0(cpu_env); |
1428 |
break;
|
1429 |
case 4: |
1430 |
gen_helper_fsub_ST0_FT0(cpu_env); |
1431 |
break;
|
1432 |
case 5: |
1433 |
gen_helper_fsubr_ST0_FT0(cpu_env); |
1434 |
break;
|
1435 |
case 6: |
1436 |
gen_helper_fdiv_ST0_FT0(cpu_env); |
1437 |
break;
|
1438 |
case 7: |
1439 |
gen_helper_fdivr_ST0_FT0(cpu_env); |
1440 |
break;
|
1441 |
} |
1442 |
} |
1443 |
|
1444 |
/* NOTE the exception in "r" op ordering */
|
1445 |
static void gen_helper_fp_arith_STN_ST0(int op, int opreg) |
1446 |
{ |
1447 |
TCGv_i32 tmp = tcg_const_i32(opreg); |
1448 |
switch (op) {
|
1449 |
case 0: |
1450 |
gen_helper_fadd_STN_ST0(cpu_env, tmp); |
1451 |
break;
|
1452 |
case 1: |
1453 |
gen_helper_fmul_STN_ST0(cpu_env, tmp); |
1454 |
break;
|
1455 |
case 4: |
1456 |
gen_helper_fsubr_STN_ST0(cpu_env, tmp); |
1457 |
break;
|
1458 |
case 5: |
1459 |
gen_helper_fsub_STN_ST0(cpu_env, tmp); |
1460 |
break;
|
1461 |
case 6: |
1462 |
gen_helper_fdivr_STN_ST0(cpu_env, tmp); |
1463 |
break;
|
1464 |
case 7: |
1465 |
gen_helper_fdiv_STN_ST0(cpu_env, tmp); |
1466 |
break;
|
1467 |
} |
1468 |
} |
1469 |
|
1470 |
/* if d == OR_TMP0, it means memory operand (address in A0) */
|
1471 |
static void gen_op(DisasContext *s1, int op, int ot, int d) |
1472 |
{ |
1473 |
if (d != OR_TMP0) {
|
1474 |
gen_op_mov_TN_reg(ot, 0, d);
|
1475 |
} else {
|
1476 |
gen_op_ld_T0_A0(ot + s1->mem_index); |
1477 |
} |
1478 |
switch(op) {
|
1479 |
case OP_ADCL:
|
1480 |
gen_compute_eflags_c(s1, cpu_tmp4); |
1481 |
tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1482 |
tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4); |
1483 |
if (d != OR_TMP0)
|
1484 |
gen_op_mov_reg_T0(ot, d); |
1485 |
else
|
1486 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1487 |
gen_op_update3_cc(cpu_tmp4); |
1488 |
set_cc_op(s1, CC_OP_ADCB + ot); |
1489 |
break;
|
1490 |
case OP_SBBL:
|
1491 |
gen_compute_eflags_c(s1, cpu_tmp4); |
1492 |
tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1493 |
tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4); |
1494 |
if (d != OR_TMP0)
|
1495 |
gen_op_mov_reg_T0(ot, d); |
1496 |
else
|
1497 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1498 |
gen_op_update3_cc(cpu_tmp4); |
1499 |
set_cc_op(s1, CC_OP_SBBB + ot); |
1500 |
break;
|
1501 |
case OP_ADDL:
|
1502 |
gen_op_addl_T0_T1(); |
1503 |
if (d != OR_TMP0)
|
1504 |
gen_op_mov_reg_T0(ot, d); |
1505 |
else
|
1506 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1507 |
gen_op_update2_cc(); |
1508 |
set_cc_op(s1, CC_OP_ADDB + ot); |
1509 |
break;
|
1510 |
case OP_SUBL:
|
1511 |
tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
|
1512 |
tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1513 |
if (d != OR_TMP0)
|
1514 |
gen_op_mov_reg_T0(ot, d); |
1515 |
else
|
1516 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1517 |
gen_op_update2_cc(); |
1518 |
set_cc_op(s1, CC_OP_SUBB + ot); |
1519 |
break;
|
1520 |
default:
|
1521 |
case OP_ANDL:
|
1522 |
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1523 |
if (d != OR_TMP0)
|
1524 |
gen_op_mov_reg_T0(ot, d); |
1525 |
else
|
1526 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1527 |
gen_op_update1_cc(); |
1528 |
set_cc_op(s1, CC_OP_LOGICB + ot); |
1529 |
break;
|
1530 |
case OP_ORL:
|
1531 |
tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1532 |
if (d != OR_TMP0)
|
1533 |
gen_op_mov_reg_T0(ot, d); |
1534 |
else
|
1535 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1536 |
gen_op_update1_cc(); |
1537 |
set_cc_op(s1, CC_OP_LOGICB + ot); |
1538 |
break;
|
1539 |
case OP_XORL:
|
1540 |
tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1541 |
if (d != OR_TMP0)
|
1542 |
gen_op_mov_reg_T0(ot, d); |
1543 |
else
|
1544 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1545 |
gen_op_update1_cc(); |
1546 |
set_cc_op(s1, CC_OP_LOGICB + ot); |
1547 |
break;
|
1548 |
case OP_CMPL:
|
1549 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
|
1550 |
tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
|
1551 |
tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]); |
1552 |
set_cc_op(s1, CC_OP_SUBB + ot); |
1553 |
break;
|
1554 |
} |
1555 |
} |
1556 |
|
1557 |
/* if d == OR_TMP0, it means memory operand (address in A0) */
|
1558 |
static void gen_inc(DisasContext *s1, int ot, int d, int c) |
1559 |
{ |
1560 |
if (d != OR_TMP0)
|
1561 |
gen_op_mov_TN_reg(ot, 0, d);
|
1562 |
else
|
1563 |
gen_op_ld_T0_A0(ot + s1->mem_index); |
1564 |
gen_compute_eflags_c(s1, cpu_cc_src); |
1565 |
if (c > 0) { |
1566 |
tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1); |
1567 |
set_cc_op(s1, CC_OP_INCB + ot); |
1568 |
} else {
|
1569 |
tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1); |
1570 |
set_cc_op(s1, CC_OP_DECB + ot); |
1571 |
} |
1572 |
if (d != OR_TMP0)
|
1573 |
gen_op_mov_reg_T0(ot, d); |
1574 |
else
|
1575 |
gen_op_st_T0_A0(ot + s1->mem_index); |
1576 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
1577 |
} |
1578 |
|
1579 |
static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1, |
1580 |
TCGv count, bool is_right)
|
1581 |
{ |
1582 |
TCGv_i32 z32, s32, oldop; |
1583 |
TCGv z_tl; |
1584 |
|
1585 |
/* Store the results into the CC variables. If we know that the
|
1586 |
variable must be dead, store unconditionally. Otherwise we'll
|
1587 |
need to not disrupt the current contents. */
|
1588 |
z_tl = tcg_const_tl(0);
|
1589 |
if (cc_op_live[s->cc_op] & USES_CC_DST) {
|
1590 |
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl, |
1591 |
result, cpu_cc_dst); |
1592 |
} else {
|
1593 |
tcg_gen_mov_tl(cpu_cc_dst, result); |
1594 |
} |
1595 |
if (cc_op_live[s->cc_op] & USES_CC_SRC) {
|
1596 |
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl, |
1597 |
shm1, cpu_cc_src); |
1598 |
} else {
|
1599 |
tcg_gen_mov_tl(cpu_cc_src, shm1); |
1600 |
} |
1601 |
tcg_temp_free(z_tl); |
1602 |
|
1603 |
/* Get the two potential CC_OP values into temporaries. */
|
1604 |
tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); |
1605 |
if (s->cc_op == CC_OP_DYNAMIC) {
|
1606 |
oldop = cpu_cc_op; |
1607 |
} else {
|
1608 |
tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op); |
1609 |
oldop = cpu_tmp3_i32; |
1610 |
} |
1611 |
|
1612 |
/* Conditionally store the CC_OP value. */
|
1613 |
z32 = tcg_const_i32(0);
|
1614 |
s32 = tcg_temp_new_i32(); |
1615 |
tcg_gen_trunc_tl_i32(s32, count); |
1616 |
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop); |
1617 |
tcg_temp_free_i32(z32); |
1618 |
tcg_temp_free_i32(s32); |
1619 |
|
1620 |
/* The CC_OP value is no longer predictable. */
|
1621 |
set_cc_op(s, CC_OP_DYNAMIC); |
1622 |
} |
1623 |
|
1624 |
static void gen_shift_rm_T1(DisasContext *s, int ot, int op1, |
1625 |
int is_right, int is_arith) |
1626 |
{ |
1627 |
target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f); |
1628 |
|
1629 |
/* load */
|
1630 |
if (op1 == OR_TMP0) {
|
1631 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1632 |
} else {
|
1633 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1634 |
} |
1635 |
|
1636 |
tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask); |
1637 |
tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1); |
1638 |
|
1639 |
if (is_right) {
|
1640 |
if (is_arith) {
|
1641 |
gen_exts(ot, cpu_T[0]);
|
1642 |
tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1643 |
tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1644 |
} else {
|
1645 |
gen_extu(ot, cpu_T[0]);
|
1646 |
tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1647 |
tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1648 |
} |
1649 |
} else {
|
1650 |
tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1651 |
tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1652 |
} |
1653 |
|
1654 |
/* store */
|
1655 |
if (op1 == OR_TMP0) {
|
1656 |
gen_op_st_T0_A0(ot + s->mem_index); |
1657 |
} else {
|
1658 |
gen_op_mov_reg_T0(ot, op1); |
1659 |
} |
1660 |
|
1661 |
gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right); |
1662 |
} |
1663 |
|
1664 |
static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2, |
1665 |
int is_right, int is_arith) |
1666 |
{ |
1667 |
int mask = (ot == OT_QUAD ? 0x3f : 0x1f); |
1668 |
|
1669 |
/* load */
|
1670 |
if (op1 == OR_TMP0)
|
1671 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1672 |
else
|
1673 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1674 |
|
1675 |
op2 &= mask; |
1676 |
if (op2 != 0) { |
1677 |
if (is_right) {
|
1678 |
if (is_arith) {
|
1679 |
gen_exts(ot, cpu_T[0]);
|
1680 |
tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1); |
1681 |
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2); |
1682 |
} else {
|
1683 |
gen_extu(ot, cpu_T[0]);
|
1684 |
tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1); |
1685 |
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2); |
1686 |
} |
1687 |
} else {
|
1688 |
tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1); |
1689 |
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2); |
1690 |
} |
1691 |
} |
1692 |
|
1693 |
/* store */
|
1694 |
if (op1 == OR_TMP0)
|
1695 |
gen_op_st_T0_A0(ot + s->mem_index); |
1696 |
else
|
1697 |
gen_op_mov_reg_T0(ot, op1); |
1698 |
|
1699 |
/* update eflags if non zero shift */
|
1700 |
if (op2 != 0) { |
1701 |
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); |
1702 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
1703 |
set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); |
1704 |
} |
1705 |
} |
1706 |
|
1707 |
static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2) |
1708 |
{ |
1709 |
if (arg2 >= 0) |
1710 |
tcg_gen_shli_tl(ret, arg1, arg2); |
1711 |
else
|
1712 |
tcg_gen_shri_tl(ret, arg1, -arg2); |
1713 |
} |
1714 |
|
1715 |
static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right) |
1716 |
{ |
1717 |
target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f); |
1718 |
TCGv_i32 t0, t1; |
1719 |
|
1720 |
/* load */
|
1721 |
if (op1 == OR_TMP0) {
|
1722 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1723 |
} else {
|
1724 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1725 |
} |
1726 |
|
1727 |
tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask); |
1728 |
|
1729 |
switch (ot) {
|
1730 |
case OT_BYTE:
|
1731 |
/* Replicate the 8-bit input so that a 32-bit rotate works. */
|
1732 |
tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); |
1733 |
tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101); |
1734 |
goto do_long;
|
1735 |
case OT_WORD:
|
1736 |
/* Replicate the 16-bit input so that a 32-bit rotate works. */
|
1737 |
tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16); |
1738 |
goto do_long;
|
1739 |
do_long:
|
1740 |
#ifdef TARGET_X86_64
|
1741 |
case OT_LONG:
|
1742 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
1743 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
|
1744 |
if (is_right) {
|
1745 |
tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
1746 |
} else {
|
1747 |
tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
1748 |
} |
1749 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
1750 |
break;
|
1751 |
#endif
|
1752 |
default:
|
1753 |
if (is_right) {
|
1754 |
tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1755 |
} else {
|
1756 |
tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
1757 |
} |
1758 |
break;
|
1759 |
} |
1760 |
|
1761 |
/* store */
|
1762 |
if (op1 == OR_TMP0) {
|
1763 |
gen_op_st_T0_A0(ot + s->mem_index); |
1764 |
} else {
|
1765 |
gen_op_mov_reg_T0(ot, op1); |
1766 |
} |
1767 |
|
1768 |
/* We'll need the flags computed into CC_SRC. */
|
1769 |
gen_compute_eflags(s); |
1770 |
|
1771 |
/* The value that was "rotated out" is now present at the other end
|
1772 |
of the word. Compute C into CC_DST and O into CC_SRC2. Note that
|
1773 |
since we've computed the flags into CC_SRC, these variables are
|
1774 |
currently dead. */
|
1775 |
if (is_right) {
|
1776 |
tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1); |
1777 |
tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
|
1778 |
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
|
1779 |
} else {
|
1780 |
tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
|
1781 |
tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1); |
1782 |
} |
1783 |
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
|
1784 |
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); |
1785 |
|
1786 |
/* Now conditionally store the new CC_OP value. If the shift count
|
1787 |
is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
|
1788 |
Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
|
1789 |
exactly as we computed above. */
|
1790 |
t0 = tcg_const_i32(0);
|
1791 |
t1 = tcg_temp_new_i32(); |
1792 |
tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
|
1793 |
tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX); |
1794 |
tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS); |
1795 |
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, |
1796 |
cpu_tmp2_i32, cpu_tmp3_i32); |
1797 |
tcg_temp_free_i32(t0); |
1798 |
tcg_temp_free_i32(t1); |
1799 |
|
1800 |
/* The CC_OP value is no longer predictable. */
|
1801 |
set_cc_op(s, CC_OP_DYNAMIC); |
1802 |
} |
1803 |
|
1804 |
static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2, |
1805 |
int is_right)
|
1806 |
{ |
1807 |
int mask = (ot == OT_QUAD ? 0x3f : 0x1f); |
1808 |
int shift;
|
1809 |
|
1810 |
/* load */
|
1811 |
if (op1 == OR_TMP0) {
|
1812 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1813 |
} else {
|
1814 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1815 |
} |
1816 |
|
1817 |
op2 &= mask; |
1818 |
if (op2 != 0) { |
1819 |
switch (ot) {
|
1820 |
#ifdef TARGET_X86_64
|
1821 |
case OT_LONG:
|
1822 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
1823 |
if (is_right) {
|
1824 |
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); |
1825 |
} else {
|
1826 |
tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); |
1827 |
} |
1828 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
1829 |
break;
|
1830 |
#endif
|
1831 |
default:
|
1832 |
if (is_right) {
|
1833 |
tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2); |
1834 |
} else {
|
1835 |
tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2); |
1836 |
} |
1837 |
break;
|
1838 |
case OT_BYTE:
|
1839 |
mask = 7;
|
1840 |
goto do_shifts;
|
1841 |
case OT_WORD:
|
1842 |
mask = 15;
|
1843 |
do_shifts:
|
1844 |
shift = op2 & mask; |
1845 |
if (is_right) {
|
1846 |
shift = mask + 1 - shift;
|
1847 |
} |
1848 |
gen_extu(ot, cpu_T[0]);
|
1849 |
tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
|
1850 |
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift); |
1851 |
tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); |
1852 |
break;
|
1853 |
} |
1854 |
} |
1855 |
|
1856 |
/* store */
|
1857 |
if (op1 == OR_TMP0) {
|
1858 |
gen_op_st_T0_A0(ot + s->mem_index); |
1859 |
} else {
|
1860 |
gen_op_mov_reg_T0(ot, op1); |
1861 |
} |
1862 |
|
1863 |
if (op2 != 0) { |
1864 |
/* Compute the flags into CC_SRC. */
|
1865 |
gen_compute_eflags(s); |
1866 |
|
1867 |
/* The value that was "rotated out" is now present at the other end
|
1868 |
of the word. Compute C into CC_DST and O into CC_SRC2. Note that
|
1869 |
since we've computed the flags into CC_SRC, these variables are
|
1870 |
currently dead. */
|
1871 |
if (is_right) {
|
1872 |
tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1); |
1873 |
tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
|
1874 |
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
|
1875 |
} else {
|
1876 |
tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
|
1877 |
tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1); |
1878 |
} |
1879 |
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
|
1880 |
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); |
1881 |
set_cc_op(s, CC_OP_ADCOX); |
1882 |
} |
1883 |
} |
1884 |
|
1885 |
/* XXX: add faster immediate = 1 case */
|
1886 |
static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, |
1887 |
int is_right)
|
1888 |
{ |
1889 |
gen_compute_eflags(s); |
1890 |
assert(s->cc_op == CC_OP_EFLAGS); |
1891 |
|
1892 |
/* load */
|
1893 |
if (op1 == OR_TMP0)
|
1894 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1895 |
else
|
1896 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1897 |
|
1898 |
if (is_right) {
|
1899 |
switch (ot) {
|
1900 |
case OT_BYTE:
|
1901 |
gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1902 |
break;
|
1903 |
case OT_WORD:
|
1904 |
gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1905 |
break;
|
1906 |
case OT_LONG:
|
1907 |
gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1908 |
break;
|
1909 |
#ifdef TARGET_X86_64
|
1910 |
case OT_QUAD:
|
1911 |
gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1912 |
break;
|
1913 |
#endif
|
1914 |
} |
1915 |
} else {
|
1916 |
switch (ot) {
|
1917 |
case OT_BYTE:
|
1918 |
gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1919 |
break;
|
1920 |
case OT_WORD:
|
1921 |
gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1922 |
break;
|
1923 |
case OT_LONG:
|
1924 |
gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1925 |
break;
|
1926 |
#ifdef TARGET_X86_64
|
1927 |
case OT_QUAD:
|
1928 |
gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1929 |
break;
|
1930 |
#endif
|
1931 |
} |
1932 |
} |
1933 |
/* store */
|
1934 |
if (op1 == OR_TMP0)
|
1935 |
gen_op_st_T0_A0(ot + s->mem_index); |
1936 |
else
|
1937 |
gen_op_mov_reg_T0(ot, op1); |
1938 |
} |
1939 |
|
1940 |
/* XXX: add faster immediate case */
|
1941 |
static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1, |
1942 |
bool is_right, TCGv count_in)
|
1943 |
{ |
1944 |
target_ulong mask = (ot == OT_QUAD ? 63 : 31); |
1945 |
TCGv count; |
1946 |
|
1947 |
/* load */
|
1948 |
if (op1 == OR_TMP0) {
|
1949 |
gen_op_ld_T0_A0(ot + s->mem_index); |
1950 |
} else {
|
1951 |
gen_op_mov_TN_reg(ot, 0, op1);
|
1952 |
} |
1953 |
|
1954 |
count = tcg_temp_new(); |
1955 |
tcg_gen_andi_tl(count, count_in, mask); |
1956 |
|
1957 |
switch (ot) {
|
1958 |
case OT_WORD:
|
1959 |
/* Note: we implement the Intel behaviour for shift count > 16.
|
1960 |
This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
|
1961 |
portion by constructing it as a 32-bit value. */
|
1962 |
if (is_right) {
|
1963 |
tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16); |
1964 |
tcg_gen_mov_tl(cpu_T[1], cpu_T[0]); |
1965 |
tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
|
1966 |
} else {
|
1967 |
tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16); |
1968 |
} |
1969 |
/* FALLTHRU */
|
1970 |
#ifdef TARGET_X86_64
|
1971 |
case OT_LONG:
|
1972 |
/* Concatenate the two 32-bit values and use a 64-bit shift. */
|
1973 |
tcg_gen_subi_tl(cpu_tmp0, count, 1);
|
1974 |
if (is_right) {
|
1975 |
tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]); |
1976 |
tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1977 |
tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count); |
1978 |
} else {
|
1979 |
tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]); |
1980 |
tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1981 |
tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count); |
1982 |
tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
|
1983 |
tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32); |
1984 |
} |
1985 |
break;
|
1986 |
#endif
|
1987 |
default:
|
1988 |
tcg_gen_subi_tl(cpu_tmp0, count, 1);
|
1989 |
if (is_right) {
|
1990 |
tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1991 |
|
1992 |
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
|
1993 |
tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count); |
1994 |
tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4); |
1995 |
} else {
|
1996 |
tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
|
1997 |
if (ot == OT_WORD) {
|
1998 |
/* Only needed if count > 16, for Intel behaviour. */
|
1999 |
tcg_gen_subfi_tl(cpu_tmp4, 33, count);
|
2000 |
tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
|
2001 |
tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4); |
2002 |
} |
2003 |
|
2004 |
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
|
2005 |
tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count); |
2006 |
tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4); |
2007 |
} |
2008 |
tcg_gen_movi_tl(cpu_tmp4, 0);
|
2009 |
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
|
2010 |
cpu_tmp4, cpu_T[1]);
|
2011 |
tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
2012 |
break;
|
2013 |
} |
2014 |
|
2015 |
/* store */
|
2016 |
if (op1 == OR_TMP0) {
|
2017 |
gen_op_st_T0_A0(ot + s->mem_index); |
2018 |
} else {
|
2019 |
gen_op_mov_reg_T0(ot, op1); |
2020 |
} |
2021 |
|
2022 |
gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
|
2023 |
tcg_temp_free(count); |
2024 |
} |
2025 |
|
2026 |
static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) |
2027 |
{ |
2028 |
if (s != OR_TMP1)
|
2029 |
gen_op_mov_TN_reg(ot, 1, s);
|
2030 |
switch(op) {
|
2031 |
case OP_ROL:
|
2032 |
gen_rot_rm_T1(s1, ot, d, 0);
|
2033 |
break;
|
2034 |
case OP_ROR:
|
2035 |
gen_rot_rm_T1(s1, ot, d, 1);
|
2036 |
break;
|
2037 |
case OP_SHL:
|
2038 |
case OP_SHL1:
|
2039 |
gen_shift_rm_T1(s1, ot, d, 0, 0); |
2040 |
break;
|
2041 |
case OP_SHR:
|
2042 |
gen_shift_rm_T1(s1, ot, d, 1, 0); |
2043 |
break;
|
2044 |
case OP_SAR:
|
2045 |
gen_shift_rm_T1(s1, ot, d, 1, 1); |
2046 |
break;
|
2047 |
case OP_RCL:
|
2048 |
gen_rotc_rm_T1(s1, ot, d, 0);
|
2049 |
break;
|
2050 |
case OP_RCR:
|
2051 |
gen_rotc_rm_T1(s1, ot, d, 1);
|
2052 |
break;
|
2053 |
} |
2054 |
} |
2055 |
|
2056 |
static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c) |
2057 |
{ |
2058 |
switch(op) {
|
2059 |
case OP_ROL:
|
2060 |
gen_rot_rm_im(s1, ot, d, c, 0);
|
2061 |
break;
|
2062 |
case OP_ROR:
|
2063 |
gen_rot_rm_im(s1, ot, d, c, 1);
|
2064 |
break;
|
2065 |
case OP_SHL:
|
2066 |
case OP_SHL1:
|
2067 |
gen_shift_rm_im(s1, ot, d, c, 0, 0); |
2068 |
break;
|
2069 |
case OP_SHR:
|
2070 |
gen_shift_rm_im(s1, ot, d, c, 1, 0); |
2071 |
break;
|
2072 |
case OP_SAR:
|
2073 |
gen_shift_rm_im(s1, ot, d, c, 1, 1); |
2074 |
break;
|
2075 |
default:
|
2076 |
/* currently not optimized */
|
2077 |
gen_op_movl_T1_im(c); |
2078 |
gen_shift(s1, op, ot, d, OR_TMP1); |
2079 |
break;
|
2080 |
} |
2081 |
} |
2082 |
|
2083 |
static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm, |
2084 |
int *reg_ptr, int *offset_ptr) |
2085 |
{ |
2086 |
target_long disp; |
2087 |
int havesib;
|
2088 |
int base;
|
2089 |
int index;
|
2090 |
int scale;
|
2091 |
int opreg;
|
2092 |
int mod, rm, code, override, must_add_seg;
|
2093 |
|
2094 |
override = s->override; |
2095 |
must_add_seg = s->addseg; |
2096 |
if (override >= 0) |
2097 |
must_add_seg = 1;
|
2098 |
mod = (modrm >> 6) & 3; |
2099 |
rm = modrm & 7;
|
2100 |
|
2101 |
if (s->aflag) {
|
2102 |
|
2103 |
havesib = 0;
|
2104 |
base = rm; |
2105 |
index = 0;
|
2106 |
scale = 0;
|
2107 |
|
2108 |
if (base == 4) { |
2109 |
havesib = 1;
|
2110 |
code = cpu_ldub_code(env, s->pc++); |
2111 |
scale = (code >> 6) & 3; |
2112 |
index = ((code >> 3) & 7) | REX_X(s); |
2113 |
base = (code & 7);
|
2114 |
} |
2115 |
base |= REX_B(s); |
2116 |
|
2117 |
switch (mod) {
|
2118 |
case 0: |
2119 |
if ((base & 7) == 5) { |
2120 |
base = -1;
|
2121 |
disp = (int32_t)cpu_ldl_code(env, s->pc); |
2122 |
s->pc += 4;
|
2123 |
if (CODE64(s) && !havesib) {
|
2124 |
disp += s->pc + s->rip_offset; |
2125 |
} |
2126 |
} else {
|
2127 |
disp = 0;
|
2128 |
} |
2129 |
break;
|
2130 |
case 1: |
2131 |
disp = (int8_t)cpu_ldub_code(env, s->pc++); |
2132 |
break;
|
2133 |
default:
|
2134 |
case 2: |
2135 |
disp = (int32_t)cpu_ldl_code(env, s->pc); |
2136 |
s->pc += 4;
|
2137 |
break;
|
2138 |
} |
2139 |
|
2140 |
if (base >= 0) { |
2141 |
/* for correct popl handling with esp */
|
2142 |
if (base == 4 && s->popl_esp_hack) |
2143 |
disp += s->popl_esp_hack; |
2144 |
#ifdef TARGET_X86_64
|
2145 |
if (s->aflag == 2) { |
2146 |
gen_op_movq_A0_reg(base); |
2147 |
if (disp != 0) { |
2148 |
gen_op_addq_A0_im(disp); |
2149 |
} |
2150 |
} else
|
2151 |
#endif
|
2152 |
{ |
2153 |
gen_op_movl_A0_reg(base); |
2154 |
if (disp != 0) |
2155 |
gen_op_addl_A0_im(disp); |
2156 |
} |
2157 |
} else {
|
2158 |
#ifdef TARGET_X86_64
|
2159 |
if (s->aflag == 2) { |
2160 |
gen_op_movq_A0_im(disp); |
2161 |
} else
|
2162 |
#endif
|
2163 |
{ |
2164 |
gen_op_movl_A0_im(disp); |
2165 |
} |
2166 |
} |
2167 |
/* index == 4 means no index */
|
2168 |
if (havesib && (index != 4)) { |
2169 |
#ifdef TARGET_X86_64
|
2170 |
if (s->aflag == 2) { |
2171 |
gen_op_addq_A0_reg_sN(scale, index); |
2172 |
} else
|
2173 |
#endif
|
2174 |
{ |
2175 |
gen_op_addl_A0_reg_sN(scale, index); |
2176 |
} |
2177 |
} |
2178 |
if (must_add_seg) {
|
2179 |
if (override < 0) { |
2180 |
if (base == R_EBP || base == R_ESP)
|
2181 |
override = R_SS; |
2182 |
else
|
2183 |
override = R_DS; |
2184 |
} |
2185 |
#ifdef TARGET_X86_64
|
2186 |
if (s->aflag == 2) { |
2187 |
gen_op_addq_A0_seg(override); |
2188 |
} else
|
2189 |
#endif
|
2190 |
{ |
2191 |
gen_op_addl_A0_seg(s, override); |
2192 |
} |
2193 |
} |
2194 |
} else {
|
2195 |
switch (mod) {
|
2196 |
case 0: |
2197 |
if (rm == 6) { |
2198 |
disp = cpu_lduw_code(env, s->pc); |
2199 |
s->pc += 2;
|
2200 |
gen_op_movl_A0_im(disp); |
2201 |
rm = 0; /* avoid SS override */ |
2202 |
goto no_rm;
|
2203 |
} else {
|
2204 |
disp = 0;
|
2205 |
} |
2206 |
break;
|
2207 |
case 1: |
2208 |
disp = (int8_t)cpu_ldub_code(env, s->pc++); |
2209 |
break;
|
2210 |
default:
|
2211 |
case 2: |
2212 |
disp = cpu_lduw_code(env, s->pc); |
2213 |
s->pc += 2;
|
2214 |
break;
|
2215 |
} |
2216 |
switch(rm) {
|
2217 |
case 0: |
2218 |
gen_op_movl_A0_reg(R_EBX); |
2219 |
gen_op_addl_A0_reg_sN(0, R_ESI);
|
2220 |
break;
|
2221 |
case 1: |
2222 |
gen_op_movl_A0_reg(R_EBX); |
2223 |
gen_op_addl_A0_reg_sN(0, R_EDI);
|
2224 |
break;
|
2225 |
case 2: |
2226 |
gen_op_movl_A0_reg(R_EBP); |
2227 |
gen_op_addl_A0_reg_sN(0, R_ESI);
|
2228 |
break;
|
2229 |
case 3: |
2230 |
gen_op_movl_A0_reg(R_EBP); |
2231 |
gen_op_addl_A0_reg_sN(0, R_EDI);
|
2232 |
break;
|
2233 |
case 4: |
2234 |
gen_op_movl_A0_reg(R_ESI); |
2235 |
break;
|
2236 |
case 5: |
2237 |
gen_op_movl_A0_reg(R_EDI); |
2238 |
break;
|
2239 |
case 6: |
2240 |
gen_op_movl_A0_reg(R_EBP); |
2241 |
break;
|
2242 |
default:
|
2243 |
case 7: |
2244 |
gen_op_movl_A0_reg(R_EBX); |
2245 |
break;
|
2246 |
} |
2247 |
if (disp != 0) |
2248 |
gen_op_addl_A0_im(disp); |
2249 |
gen_op_andl_A0_ffff(); |
2250 |
no_rm:
|
2251 |
if (must_add_seg) {
|
2252 |
if (override < 0) { |
2253 |
if (rm == 2 || rm == 3 || rm == 6) |
2254 |
override = R_SS; |
2255 |
else
|
2256 |
override = R_DS; |
2257 |
} |
2258 |
gen_op_addl_A0_seg(s, override); |
2259 |
} |
2260 |
} |
2261 |
|
2262 |
opreg = OR_A0; |
2263 |
disp = 0;
|
2264 |
*reg_ptr = opreg; |
2265 |
*offset_ptr = disp; |
2266 |
} |
2267 |
|
2268 |
static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) |
2269 |
{ |
2270 |
int mod, rm, base, code;
|
2271 |
|
2272 |
mod = (modrm >> 6) & 3; |
2273 |
if (mod == 3) |
2274 |
return;
|
2275 |
rm = modrm & 7;
|
2276 |
|
2277 |
if (s->aflag) {
|
2278 |
|
2279 |
base = rm; |
2280 |
|
2281 |
if (base == 4) { |
2282 |
code = cpu_ldub_code(env, s->pc++); |
2283 |
base = (code & 7);
|
2284 |
} |
2285 |
|
2286 |
switch (mod) {
|
2287 |
case 0: |
2288 |
if (base == 5) { |
2289 |
s->pc += 4;
|
2290 |
} |
2291 |
break;
|
2292 |
case 1: |
2293 |
s->pc++; |
2294 |
break;
|
2295 |
default:
|
2296 |
case 2: |
2297 |
s->pc += 4;
|
2298 |
break;
|
2299 |
} |
2300 |
} else {
|
2301 |
switch (mod) {
|
2302 |
case 0: |
2303 |
if (rm == 6) { |
2304 |
s->pc += 2;
|
2305 |
} |
2306 |
break;
|
2307 |
case 1: |
2308 |
s->pc++; |
2309 |
break;
|
2310 |
default:
|
2311 |
case 2: |
2312 |
s->pc += 2;
|
2313 |
break;
|
2314 |
} |
2315 |
} |
2316 |
} |
2317 |
|
2318 |
/* used for LEA and MOV AX, mem */
|
2319 |
static void gen_add_A0_ds_seg(DisasContext *s) |
2320 |
{ |
2321 |
int override, must_add_seg;
|
2322 |
must_add_seg = s->addseg; |
2323 |
override = R_DS; |
2324 |
if (s->override >= 0) { |
2325 |
override = s->override; |
2326 |
must_add_seg = 1;
|
2327 |
} |
2328 |
if (must_add_seg) {
|
2329 |
#ifdef TARGET_X86_64
|
2330 |
if (CODE64(s)) {
|
2331 |
gen_op_addq_A0_seg(override); |
2332 |
} else
|
2333 |
#endif
|
2334 |
{ |
2335 |
gen_op_addl_A0_seg(s, override); |
2336 |
} |
2337 |
} |
2338 |
} |
2339 |
|
2340 |
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
|
2341 |
OR_TMP0 */
|
2342 |
static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, |
2343 |
int ot, int reg, int is_store) |
2344 |
{ |
2345 |
int mod, rm, opreg, disp;
|
2346 |
|
2347 |
mod = (modrm >> 6) & 3; |
2348 |
rm = (modrm & 7) | REX_B(s);
|
2349 |
if (mod == 3) { |
2350 |
if (is_store) {
|
2351 |
if (reg != OR_TMP0)
|
2352 |
gen_op_mov_TN_reg(ot, 0, reg);
|
2353 |
gen_op_mov_reg_T0(ot, rm); |
2354 |
} else {
|
2355 |
gen_op_mov_TN_reg(ot, 0, rm);
|
2356 |
if (reg != OR_TMP0)
|
2357 |
gen_op_mov_reg_T0(ot, reg); |
2358 |
} |
2359 |
} else {
|
2360 |
gen_lea_modrm(env, s, modrm, &opreg, &disp); |
2361 |
if (is_store) {
|
2362 |
if (reg != OR_TMP0)
|
2363 |
gen_op_mov_TN_reg(ot, 0, reg);
|
2364 |
gen_op_st_T0_A0(ot + s->mem_index); |
2365 |
} else {
|
2366 |
gen_op_ld_T0_A0(ot + s->mem_index); |
2367 |
if (reg != OR_TMP0)
|
2368 |
gen_op_mov_reg_T0(ot, reg); |
2369 |
} |
2370 |
} |
2371 |
} |
2372 |
|
2373 |
static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot) |
2374 |
{ |
2375 |
uint32_t ret; |
2376 |
|
2377 |
switch(ot) {
|
2378 |
case OT_BYTE:
|
2379 |
ret = cpu_ldub_code(env, s->pc); |
2380 |
s->pc++; |
2381 |
break;
|
2382 |
case OT_WORD:
|
2383 |
ret = cpu_lduw_code(env, s->pc); |
2384 |
s->pc += 2;
|
2385 |
break;
|
2386 |
default:
|
2387 |
case OT_LONG:
|
2388 |
ret = cpu_ldl_code(env, s->pc); |
2389 |
s->pc += 4;
|
2390 |
break;
|
2391 |
} |
2392 |
return ret;
|
2393 |
} |
2394 |
|
2395 |
static inline int insn_const_size(unsigned int ot) |
2396 |
{ |
2397 |
if (ot <= OT_LONG)
|
2398 |
return 1 << ot; |
2399 |
else
|
2400 |
return 4; |
2401 |
} |
2402 |
|
2403 |
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) |
2404 |
{ |
2405 |
TranslationBlock *tb; |
2406 |
target_ulong pc; |
2407 |
|
2408 |
pc = s->cs_base + eip; |
2409 |
tb = s->tb; |
2410 |
/* NOTE: we handle the case where the TB spans two pages here */
|
2411 |
if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
|
2412 |
(pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
|
2413 |
/* jump to same page: we can use a direct jump */
|
2414 |
tcg_gen_goto_tb(tb_num); |
2415 |
gen_jmp_im(eip); |
2416 |
tcg_gen_exit_tb((uintptr_t)tb + tb_num); |
2417 |
} else {
|
2418 |
/* jump to another page: currently not optimized */
|
2419 |
gen_jmp_im(eip); |
2420 |
gen_eob(s); |
2421 |
} |
2422 |
} |
2423 |
|
2424 |
static inline void gen_jcc(DisasContext *s, int b, |
2425 |
target_ulong val, target_ulong next_eip) |
2426 |
{ |
2427 |
int l1, l2;
|
2428 |
|
2429 |
if (s->jmp_opt) {
|
2430 |
l1 = gen_new_label(); |
2431 |
gen_jcc1(s, b, l1); |
2432 |
|
2433 |
gen_goto_tb(s, 0, next_eip);
|
2434 |
|
2435 |
gen_set_label(l1); |
2436 |
gen_goto_tb(s, 1, val);
|
2437 |
s->is_jmp = DISAS_TB_JUMP; |
2438 |
} else {
|
2439 |
l1 = gen_new_label(); |
2440 |
l2 = gen_new_label(); |
2441 |
gen_jcc1(s, b, l1); |
2442 |
|
2443 |
gen_jmp_im(next_eip); |
2444 |
tcg_gen_br(l2); |
2445 |
|
2446 |
gen_set_label(l1); |
2447 |
gen_jmp_im(val); |
2448 |
gen_set_label(l2); |
2449 |
gen_eob(s); |
2450 |
} |
2451 |
} |
2452 |
|
2453 |
static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b, |
2454 |
int modrm, int reg) |
2455 |
{ |
2456 |
CCPrepare cc; |
2457 |
|
2458 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
2459 |
|
2460 |
cc = gen_prepare_cc(s, b, cpu_T[1]);
|
2461 |
if (cc.mask != -1) { |
2462 |
TCGv t0 = tcg_temp_new(); |
2463 |
tcg_gen_andi_tl(t0, cc.reg, cc.mask); |
2464 |
cc.reg = t0; |
2465 |
} |
2466 |
if (!cc.use_reg2) {
|
2467 |
cc.reg2 = tcg_const_tl(cc.imm); |
2468 |
} |
2469 |
|
2470 |
tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
|
2471 |
cpu_T[0], cpu_regs[reg]);
|
2472 |
gen_op_mov_reg_T0(ot, reg); |
2473 |
|
2474 |
if (cc.mask != -1) { |
2475 |
tcg_temp_free(cc.reg); |
2476 |
} |
2477 |
if (!cc.use_reg2) {
|
2478 |
tcg_temp_free(cc.reg2); |
2479 |
} |
2480 |
} |
2481 |
|
2482 |
static inline void gen_op_movl_T0_seg(int seg_reg) |
2483 |
{ |
2484 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
|
2485 |
offsetof(CPUX86State,segs[seg_reg].selector)); |
2486 |
} |
2487 |
|
2488 |
static inline void gen_op_movl_seg_T0_vm(int seg_reg) |
2489 |
{ |
2490 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); |
2491 |
tcg_gen_st32_tl(cpu_T[0], cpu_env,
|
2492 |
offsetof(CPUX86State,segs[seg_reg].selector)); |
2493 |
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4); |
2494 |
tcg_gen_st_tl(cpu_T[0], cpu_env,
|
2495 |
offsetof(CPUX86State,segs[seg_reg].base)); |
2496 |
} |
2497 |
|
2498 |
/* move T0 to seg_reg and compute if the CPU state may change. Never
|
2499 |
call this function with seg_reg == R_CS */
|
2500 |
static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) |
2501 |
{ |
2502 |
if (s->pe && !s->vm86) {
|
2503 |
/* XXX: optimize by finding processor state dynamically */
|
2504 |
gen_update_cc_op(s); |
2505 |
gen_jmp_im(cur_eip); |
2506 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
2507 |
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); |
2508 |
/* abort translation because the addseg value may change or
|
2509 |
because ss32 may change. For R_SS, translation must always
|
2510 |
stop as a special handling must be done to disable hardware
|
2511 |
interrupts for the next instruction */
|
2512 |
if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
|
2513 |
s->is_jmp = DISAS_TB_JUMP; |
2514 |
} else {
|
2515 |
gen_op_movl_seg_T0_vm(seg_reg); |
2516 |
if (seg_reg == R_SS)
|
2517 |
s->is_jmp = DISAS_TB_JUMP; |
2518 |
} |
2519 |
} |
2520 |
|
2521 |
static inline int svm_is_rep(int prefixes) |
2522 |
{ |
2523 |
return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); |
2524 |
} |
2525 |
|
2526 |
static inline void |
2527 |
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, |
2528 |
uint32_t type, uint64_t param) |
2529 |
{ |
2530 |
/* no SVM activated; fast case */
|
2531 |
if (likely(!(s->flags & HF_SVMI_MASK)))
|
2532 |
return;
|
2533 |
gen_update_cc_op(s); |
2534 |
gen_jmp_im(pc_start - s->cs_base); |
2535 |
gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type), |
2536 |
tcg_const_i64(param)); |
2537 |
} |
2538 |
|
2539 |
static inline void |
2540 |
gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) |
2541 |
{ |
2542 |
gen_svm_check_intercept_param(s, pc_start, type, 0);
|
2543 |
} |
2544 |
|
2545 |
static inline void gen_stack_update(DisasContext *s, int addend) |
2546 |
{ |
2547 |
#ifdef TARGET_X86_64
|
2548 |
if (CODE64(s)) {
|
2549 |
gen_op_add_reg_im(2, R_ESP, addend);
|
2550 |
} else
|
2551 |
#endif
|
2552 |
if (s->ss32) {
|
2553 |
gen_op_add_reg_im(1, R_ESP, addend);
|
2554 |
} else {
|
2555 |
gen_op_add_reg_im(0, R_ESP, addend);
|
2556 |
} |
2557 |
} |
2558 |
|
2559 |
/* generate a push. It depends on ss32, addseg and dflag */
|
2560 |
static void gen_push_T0(DisasContext *s) |
2561 |
{ |
2562 |
#ifdef TARGET_X86_64
|
2563 |
if (CODE64(s)) {
|
2564 |
gen_op_movq_A0_reg(R_ESP); |
2565 |
if (s->dflag) {
|
2566 |
gen_op_addq_A0_im(-8);
|
2567 |
gen_op_st_T0_A0(OT_QUAD + s->mem_index); |
2568 |
} else {
|
2569 |
gen_op_addq_A0_im(-2);
|
2570 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
2571 |
} |
2572 |
gen_op_mov_reg_A0(2, R_ESP);
|
2573 |
} else
|
2574 |
#endif
|
2575 |
{ |
2576 |
gen_op_movl_A0_reg(R_ESP); |
2577 |
if (!s->dflag)
|
2578 |
gen_op_addl_A0_im(-2);
|
2579 |
else
|
2580 |
gen_op_addl_A0_im(-4);
|
2581 |
if (s->ss32) {
|
2582 |
if (s->addseg) {
|
2583 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2584 |
gen_op_addl_A0_seg(s, R_SS); |
2585 |
} |
2586 |
} else {
|
2587 |
gen_op_andl_A0_ffff(); |
2588 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2589 |
gen_op_addl_A0_seg(s, R_SS); |
2590 |
} |
2591 |
gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
|
2592 |
if (s->ss32 && !s->addseg)
|
2593 |
gen_op_mov_reg_A0(1, R_ESP);
|
2594 |
else
|
2595 |
gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
|
2596 |
} |
2597 |
} |
2598 |
|
2599 |
/* generate a push. It depends on ss32, addseg and dflag */
|
2600 |
/* slower version for T1, only used for call Ev */
|
2601 |
static void gen_push_T1(DisasContext *s) |
2602 |
{ |
2603 |
#ifdef TARGET_X86_64
|
2604 |
if (CODE64(s)) {
|
2605 |
gen_op_movq_A0_reg(R_ESP); |
2606 |
if (s->dflag) {
|
2607 |
gen_op_addq_A0_im(-8);
|
2608 |
gen_op_st_T1_A0(OT_QUAD + s->mem_index); |
2609 |
} else {
|
2610 |
gen_op_addq_A0_im(-2);
|
2611 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
2612 |
} |
2613 |
gen_op_mov_reg_A0(2, R_ESP);
|
2614 |
} else
|
2615 |
#endif
|
2616 |
{ |
2617 |
gen_op_movl_A0_reg(R_ESP); |
2618 |
if (!s->dflag)
|
2619 |
gen_op_addl_A0_im(-2);
|
2620 |
else
|
2621 |
gen_op_addl_A0_im(-4);
|
2622 |
if (s->ss32) {
|
2623 |
if (s->addseg) {
|
2624 |
gen_op_addl_A0_seg(s, R_SS); |
2625 |
} |
2626 |
} else {
|
2627 |
gen_op_andl_A0_ffff(); |
2628 |
gen_op_addl_A0_seg(s, R_SS); |
2629 |
} |
2630 |
gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
|
2631 |
|
2632 |
if (s->ss32 && !s->addseg)
|
2633 |
gen_op_mov_reg_A0(1, R_ESP);
|
2634 |
else
|
2635 |
gen_stack_update(s, (-2) << s->dflag);
|
2636 |
} |
2637 |
} |
2638 |
|
2639 |
/* two step pop is necessary for precise exceptions */
|
2640 |
static void gen_pop_T0(DisasContext *s) |
2641 |
{ |
2642 |
#ifdef TARGET_X86_64
|
2643 |
if (CODE64(s)) {
|
2644 |
gen_op_movq_A0_reg(R_ESP); |
2645 |
gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index); |
2646 |
} else
|
2647 |
#endif
|
2648 |
{ |
2649 |
gen_op_movl_A0_reg(R_ESP); |
2650 |
if (s->ss32) {
|
2651 |
if (s->addseg)
|
2652 |
gen_op_addl_A0_seg(s, R_SS); |
2653 |
} else {
|
2654 |
gen_op_andl_A0_ffff(); |
2655 |
gen_op_addl_A0_seg(s, R_SS); |
2656 |
} |
2657 |
gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
|
2658 |
} |
2659 |
} |
2660 |
|
2661 |
static void gen_pop_update(DisasContext *s) |
2662 |
{ |
2663 |
#ifdef TARGET_X86_64
|
2664 |
if (CODE64(s) && s->dflag) {
|
2665 |
gen_stack_update(s, 8);
|
2666 |
} else
|
2667 |
#endif
|
2668 |
{ |
2669 |
gen_stack_update(s, 2 << s->dflag);
|
2670 |
} |
2671 |
} |
2672 |
|
2673 |
static void gen_stack_A0(DisasContext *s) |
2674 |
{ |
2675 |
gen_op_movl_A0_reg(R_ESP); |
2676 |
if (!s->ss32)
|
2677 |
gen_op_andl_A0_ffff(); |
2678 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2679 |
if (s->addseg)
|
2680 |
gen_op_addl_A0_seg(s, R_SS); |
2681 |
} |
2682 |
|
2683 |
/* NOTE: wrap around in 16 bit not fully handled */
|
2684 |
static void gen_pusha(DisasContext *s) |
2685 |
{ |
2686 |
int i;
|
2687 |
gen_op_movl_A0_reg(R_ESP); |
2688 |
gen_op_addl_A0_im(-16 << s->dflag);
|
2689 |
if (!s->ss32)
|
2690 |
gen_op_andl_A0_ffff(); |
2691 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2692 |
if (s->addseg)
|
2693 |
gen_op_addl_A0_seg(s, R_SS); |
2694 |
for(i = 0;i < 8; i++) { |
2695 |
gen_op_mov_TN_reg(OT_LONG, 0, 7 - i); |
2696 |
gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index); |
2697 |
gen_op_addl_A0_im(2 << s->dflag);
|
2698 |
} |
2699 |
gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); |
2700 |
} |
2701 |
|
2702 |
/* NOTE: wrap around in 16 bit not fully handled */
|
2703 |
static void gen_popa(DisasContext *s) |
2704 |
{ |
2705 |
int i;
|
2706 |
gen_op_movl_A0_reg(R_ESP); |
2707 |
if (!s->ss32)
|
2708 |
gen_op_andl_A0_ffff(); |
2709 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2710 |
tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag); |
2711 |
if (s->addseg)
|
2712 |
gen_op_addl_A0_seg(s, R_SS); |
2713 |
for(i = 0;i < 8; i++) { |
2714 |
/* ESP is not reloaded */
|
2715 |
if (i != 3) { |
2716 |
gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index); |
2717 |
gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
|
2718 |
} |
2719 |
gen_op_addl_A0_im(2 << s->dflag);
|
2720 |
} |
2721 |
gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); |
2722 |
} |
2723 |
|
2724 |
static void gen_enter(DisasContext *s, int esp_addend, int level) |
2725 |
{ |
2726 |
int ot, opsize;
|
2727 |
|
2728 |
level &= 0x1f;
|
2729 |
#ifdef TARGET_X86_64
|
2730 |
if (CODE64(s)) {
|
2731 |
ot = s->dflag ? OT_QUAD : OT_WORD; |
2732 |
opsize = 1 << ot;
|
2733 |
|
2734 |
gen_op_movl_A0_reg(R_ESP); |
2735 |
gen_op_addq_A0_im(-opsize); |
2736 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2737 |
|
2738 |
/* push bp */
|
2739 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
|
2740 |
gen_op_st_T0_A0(ot + s->mem_index); |
2741 |
if (level) {
|
2742 |
/* XXX: must save state */
|
2743 |
gen_helper_enter64_level(cpu_env, tcg_const_i32(level), |
2744 |
tcg_const_i32((ot == OT_QUAD)), |
2745 |
cpu_T[1]);
|
2746 |
} |
2747 |
gen_op_mov_reg_T1(ot, R_EBP); |
2748 |
tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); |
2749 |
gen_op_mov_reg_T1(OT_QUAD, R_ESP); |
2750 |
} else
|
2751 |
#endif
|
2752 |
{ |
2753 |
ot = s->dflag + OT_WORD; |
2754 |
opsize = 2 << s->dflag;
|
2755 |
|
2756 |
gen_op_movl_A0_reg(R_ESP); |
2757 |
gen_op_addl_A0_im(-opsize); |
2758 |
if (!s->ss32)
|
2759 |
gen_op_andl_A0_ffff(); |
2760 |
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
|
2761 |
if (s->addseg)
|
2762 |
gen_op_addl_A0_seg(s, R_SS); |
2763 |
/* push bp */
|
2764 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
|
2765 |
gen_op_st_T0_A0(ot + s->mem_index); |
2766 |
if (level) {
|
2767 |
/* XXX: must save state */
|
2768 |
gen_helper_enter_level(cpu_env, tcg_const_i32(level), |
2769 |
tcg_const_i32(s->dflag), |
2770 |
cpu_T[1]);
|
2771 |
} |
2772 |
gen_op_mov_reg_T1(ot, R_EBP); |
2773 |
tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); |
2774 |
gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); |
2775 |
} |
2776 |
} |
2777 |
|
2778 |
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) |
2779 |
{ |
2780 |
gen_update_cc_op(s); |
2781 |
gen_jmp_im(cur_eip); |
2782 |
gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); |
2783 |
s->is_jmp = DISAS_TB_JUMP; |
2784 |
} |
2785 |
|
2786 |
/* an interrupt is different from an exception because of the
|
2787 |
privilege checks */
|
2788 |
static void gen_interrupt(DisasContext *s, int intno, |
2789 |
target_ulong cur_eip, target_ulong next_eip) |
2790 |
{ |
2791 |
gen_update_cc_op(s); |
2792 |
gen_jmp_im(cur_eip); |
2793 |
gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), |
2794 |
tcg_const_i32(next_eip - cur_eip)); |
2795 |
s->is_jmp = DISAS_TB_JUMP; |
2796 |
} |
2797 |
|
2798 |
static void gen_debug(DisasContext *s, target_ulong cur_eip) |
2799 |
{ |
2800 |
gen_update_cc_op(s); |
2801 |
gen_jmp_im(cur_eip); |
2802 |
gen_helper_debug(cpu_env); |
2803 |
s->is_jmp = DISAS_TB_JUMP; |
2804 |
} |
2805 |
|
2806 |
/* generate a generic end of block. Trace exception is also generated
|
2807 |
if needed */
|
2808 |
static void gen_eob(DisasContext *s) |
2809 |
{ |
2810 |
gen_update_cc_op(s); |
2811 |
if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
|
2812 |
gen_helper_reset_inhibit_irq(cpu_env); |
2813 |
} |
2814 |
if (s->tb->flags & HF_RF_MASK) {
|
2815 |
gen_helper_reset_rf(cpu_env); |
2816 |
} |
2817 |
if (s->singlestep_enabled) {
|
2818 |
gen_helper_debug(cpu_env); |
2819 |
} else if (s->tf) { |
2820 |
gen_helper_single_step(cpu_env); |
2821 |
} else {
|
2822 |
tcg_gen_exit_tb(0);
|
2823 |
} |
2824 |
s->is_jmp = DISAS_TB_JUMP; |
2825 |
} |
2826 |
|
2827 |
/* generate a jump to eip. No segment change must happen before as a
|
2828 |
direct call to the next block may occur */
|
2829 |
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) |
2830 |
{ |
2831 |
gen_update_cc_op(s); |
2832 |
set_cc_op(s, CC_OP_DYNAMIC); |
2833 |
if (s->jmp_opt) {
|
2834 |
gen_goto_tb(s, tb_num, eip); |
2835 |
s->is_jmp = DISAS_TB_JUMP; |
2836 |
} else {
|
2837 |
gen_jmp_im(eip); |
2838 |
gen_eob(s); |
2839 |
} |
2840 |
} |
2841 |
|
2842 |
static void gen_jmp(DisasContext *s, target_ulong eip) |
2843 |
{ |
2844 |
gen_jmp_tb(s, eip, 0);
|
2845 |
} |
2846 |
|
2847 |
static inline void gen_ldq_env_A0(int idx, int offset) |
2848 |
{ |
2849 |
int mem_index = (idx >> 2) - 1; |
2850 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index); |
2851 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset); |
2852 |
} |
2853 |
|
2854 |
static inline void gen_stq_env_A0(int idx, int offset) |
2855 |
{ |
2856 |
int mem_index = (idx >> 2) - 1; |
2857 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset); |
2858 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index); |
2859 |
} |
2860 |
|
2861 |
static inline void gen_ldo_env_A0(int idx, int offset) |
2862 |
{ |
2863 |
int mem_index = (idx >> 2) - 1; |
2864 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index); |
2865 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
|
2866 |
tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
|
2867 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index); |
2868 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
|
2869 |
} |
2870 |
|
2871 |
static inline void gen_sto_env_A0(int idx, int offset) |
2872 |
{ |
2873 |
int mem_index = (idx >> 2) - 1; |
2874 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
|
2875 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index); |
2876 |
tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
|
2877 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
|
2878 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index); |
2879 |
} |
2880 |
|
2881 |
static inline void gen_op_movo(int d_offset, int s_offset) |
2882 |
{ |
2883 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); |
2884 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2885 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
|
2886 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
|
2887 |
} |
2888 |
|
2889 |
static inline void gen_op_movq(int d_offset, int s_offset) |
2890 |
{ |
2891 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); |
2892 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2893 |
} |
2894 |
|
2895 |
static inline void gen_op_movl(int d_offset, int s_offset) |
2896 |
{ |
2897 |
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset); |
2898 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset); |
2899 |
} |
2900 |
|
2901 |
static inline void gen_op_movq_env_0(int d_offset) |
2902 |
{ |
2903 |
tcg_gen_movi_i64(cpu_tmp1_i64, 0);
|
2904 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2905 |
} |
2906 |
|
2907 |
typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); |
2908 |
typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); |
2909 |
typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); |
2910 |
typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); |
2911 |
typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); |
2912 |
typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, |
2913 |
TCGv_i32 val); |
2914 |
typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); |
2915 |
typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, |
2916 |
TCGv val); |
2917 |
|
2918 |
#define SSE_SPECIAL ((void *)1) |
2919 |
#define SSE_DUMMY ((void *)2) |
2920 |
|
2921 |
#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } |
2922 |
#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ |
2923 |
gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } |
2924 |
|
2925 |
static const SSEFunc_0_epp sse_op_table1[256][4] = { |
2926 |
/* 3DNow! extensions */
|
2927 |
[0x0e] = { SSE_DUMMY }, /* femms */ |
2928 |
[0x0f] = { SSE_DUMMY }, /* pf... */ |
2929 |
/* pure SSE operations */
|
2930 |
[0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ |
2931 |
[0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ |
2932 |
[0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ |
2933 |
[0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ |
2934 |
[0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
|
2935 |
[0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
|
2936 |
[0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ |
2937 |
[0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ |
2938 |
|
2939 |
[0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ |
2940 |
[0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ |
2941 |
[0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ |
2942 |
[0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ |
2943 |
[0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ |
2944 |
[0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ |
2945 |
[0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
|
2946 |
[0x2f] = { gen_helper_comiss, gen_helper_comisd },
|
2947 |
[0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ |
2948 |
[0x51] = SSE_FOP(sqrt),
|
2949 |
[0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, |
2950 |
[0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, |
2951 |
[0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ |
2952 |
[0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ |
2953 |
[0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ |
2954 |
[0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ |
2955 |
[0x58] = SSE_FOP(add),
|
2956 |
[0x59] = SSE_FOP(mul),
|
2957 |
[0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
|
2958 |
gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, |
2959 |
[0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
|
2960 |
[0x5c] = SSE_FOP(sub),
|
2961 |
[0x5d] = SSE_FOP(min),
|
2962 |
[0x5e] = SSE_FOP(div),
|
2963 |
[0x5f] = SSE_FOP(max),
|
2964 |
|
2965 |
[0xc2] = SSE_FOP(cmpeq),
|
2966 |
[0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
|
2967 |
(SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
|
2968 |
|
2969 |
/* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
|
2970 |
[0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
|
2971 |
[0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
|
2972 |
|
2973 |
/* MMX ops and their SSE extensions */
|
2974 |
[0x60] = MMX_OP2(punpcklbw),
|
2975 |
[0x61] = MMX_OP2(punpcklwd),
|
2976 |
[0x62] = MMX_OP2(punpckldq),
|
2977 |
[0x63] = MMX_OP2(packsswb),
|
2978 |
[0x64] = MMX_OP2(pcmpgtb),
|
2979 |
[0x65] = MMX_OP2(pcmpgtw),
|
2980 |
[0x66] = MMX_OP2(pcmpgtl),
|
2981 |
[0x67] = MMX_OP2(packuswb),
|
2982 |
[0x68] = MMX_OP2(punpckhbw),
|
2983 |
[0x69] = MMX_OP2(punpckhwd),
|
2984 |
[0x6a] = MMX_OP2(punpckhdq),
|
2985 |
[0x6b] = MMX_OP2(packssdw),
|
2986 |
[0x6c] = { NULL, gen_helper_punpcklqdq_xmm }, |
2987 |
[0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, |
2988 |
[0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ |
2989 |
[0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ |
2990 |
[0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
|
2991 |
(SSEFunc_0_epp)gen_helper_pshufd_xmm, |
2992 |
(SSEFunc_0_epp)gen_helper_pshufhw_xmm, |
2993 |
(SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
|
2994 |
[0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ |
2995 |
[0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ |
2996 |
[0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ |
2997 |
[0x74] = MMX_OP2(pcmpeqb),
|
2998 |
[0x75] = MMX_OP2(pcmpeqw),
|
2999 |
[0x76] = MMX_OP2(pcmpeql),
|
3000 |
[0x77] = { SSE_DUMMY }, /* emms */ |
3001 |
[0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ |
3002 |
[0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, |
3003 |
[0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, |
3004 |
[0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, |
3005 |
[0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ |
3006 |
[0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ |
3007 |
[0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ |
3008 |
[0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ |
3009 |
[0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, |
3010 |
[0xd1] = MMX_OP2(psrlw),
|
3011 |
[0xd2] = MMX_OP2(psrld),
|
3012 |
[0xd3] = MMX_OP2(psrlq),
|
3013 |
[0xd4] = MMX_OP2(paddq),
|
3014 |
[0xd5] = MMX_OP2(pmullw),
|
3015 |
[0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, |
3016 |
[0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ |
3017 |
[0xd8] = MMX_OP2(psubusb),
|
3018 |
[0xd9] = MMX_OP2(psubusw),
|
3019 |
[0xda] = MMX_OP2(pminub),
|
3020 |
[0xdb] = MMX_OP2(pand),
|
3021 |
[0xdc] = MMX_OP2(paddusb),
|
3022 |
[0xdd] = MMX_OP2(paddusw),
|
3023 |
[0xde] = MMX_OP2(pmaxub),
|
3024 |
[0xdf] = MMX_OP2(pandn),
|
3025 |
[0xe0] = MMX_OP2(pavgb),
|
3026 |
[0xe1] = MMX_OP2(psraw),
|
3027 |
[0xe2] = MMX_OP2(psrad),
|
3028 |
[0xe3] = MMX_OP2(pavgw),
|
3029 |
[0xe4] = MMX_OP2(pmulhuw),
|
3030 |
[0xe5] = MMX_OP2(pmulhw),
|
3031 |
[0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, |
3032 |
[0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ |
3033 |
[0xe8] = MMX_OP2(psubsb),
|
3034 |
[0xe9] = MMX_OP2(psubsw),
|
3035 |
[0xea] = MMX_OP2(pminsw),
|
3036 |
[0xeb] = MMX_OP2(por),
|
3037 |
[0xec] = MMX_OP2(paddsb),
|
3038 |
[0xed] = MMX_OP2(paddsw),
|
3039 |
[0xee] = MMX_OP2(pmaxsw),
|
3040 |
[0xef] = MMX_OP2(pxor),
|
3041 |
[0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ |
3042 |
[0xf1] = MMX_OP2(psllw),
|
3043 |
[0xf2] = MMX_OP2(pslld),
|
3044 |
[0xf3] = MMX_OP2(psllq),
|
3045 |
[0xf4] = MMX_OP2(pmuludq),
|
3046 |
[0xf5] = MMX_OP2(pmaddwd),
|
3047 |
[0xf6] = MMX_OP2(psadbw),
|
3048 |
[0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
|
3049 |
(SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
|
3050 |
[0xf8] = MMX_OP2(psubb),
|
3051 |
[0xf9] = MMX_OP2(psubw),
|
3052 |
[0xfa] = MMX_OP2(psubl),
|
3053 |
[0xfb] = MMX_OP2(psubq),
|
3054 |
[0xfc] = MMX_OP2(paddb),
|
3055 |
[0xfd] = MMX_OP2(paddw),
|
3056 |
[0xfe] = MMX_OP2(paddl),
|
3057 |
}; |
3058 |
|
3059 |
static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { |
3060 |
[0 + 2] = MMX_OP2(psrlw), |
3061 |
[0 + 4] = MMX_OP2(psraw), |
3062 |
[0 + 6] = MMX_OP2(psllw), |
3063 |
[8 + 2] = MMX_OP2(psrld), |
3064 |
[8 + 4] = MMX_OP2(psrad), |
3065 |
[8 + 6] = MMX_OP2(pslld), |
3066 |
[16 + 2] = MMX_OP2(psrlq), |
3067 |
[16 + 3] = { NULL, gen_helper_psrldq_xmm }, |
3068 |
[16 + 6] = MMX_OP2(psllq), |
3069 |
[16 + 7] = { NULL, gen_helper_pslldq_xmm }, |
3070 |
}; |
3071 |
|
3072 |
static const SSEFunc_0_epi sse_op_table3ai[] = { |
3073 |
gen_helper_cvtsi2ss, |
3074 |
gen_helper_cvtsi2sd |
3075 |
}; |
3076 |
|
3077 |
#ifdef TARGET_X86_64
|
3078 |
static const SSEFunc_0_epl sse_op_table3aq[] = { |
3079 |
gen_helper_cvtsq2ss, |
3080 |
gen_helper_cvtsq2sd |
3081 |
}; |
3082 |
#endif
|
3083 |
|
3084 |
static const SSEFunc_i_ep sse_op_table3bi[] = { |
3085 |
gen_helper_cvttss2si, |
3086 |
gen_helper_cvtss2si, |
3087 |
gen_helper_cvttsd2si, |
3088 |
gen_helper_cvtsd2si |
3089 |
}; |
3090 |
|
3091 |
#ifdef TARGET_X86_64
|
3092 |
static const SSEFunc_l_ep sse_op_table3bq[] = { |
3093 |
gen_helper_cvttss2sq, |
3094 |
gen_helper_cvtss2sq, |
3095 |
gen_helper_cvttsd2sq, |
3096 |
gen_helper_cvtsd2sq |
3097 |
}; |
3098 |
#endif
|
3099 |
|
3100 |
static const SSEFunc_0_epp sse_op_table4[8][4] = { |
3101 |
SSE_FOP(cmpeq), |
3102 |
SSE_FOP(cmplt), |
3103 |
SSE_FOP(cmple), |
3104 |
SSE_FOP(cmpunord), |
3105 |
SSE_FOP(cmpneq), |
3106 |
SSE_FOP(cmpnlt), |
3107 |
SSE_FOP(cmpnle), |
3108 |
SSE_FOP(cmpord), |
3109 |
}; |
3110 |
|
3111 |
static const SSEFunc_0_epp sse_op_table5[256] = { |
3112 |
[0x0c] = gen_helper_pi2fw,
|
3113 |
[0x0d] = gen_helper_pi2fd,
|
3114 |
[0x1c] = gen_helper_pf2iw,
|
3115 |
[0x1d] = gen_helper_pf2id,
|
3116 |
[0x8a] = gen_helper_pfnacc,
|
3117 |
[0x8e] = gen_helper_pfpnacc,
|
3118 |
[0x90] = gen_helper_pfcmpge,
|
3119 |
[0x94] = gen_helper_pfmin,
|
3120 |
[0x96] = gen_helper_pfrcp,
|
3121 |
[0x97] = gen_helper_pfrsqrt,
|
3122 |
[0x9a] = gen_helper_pfsub,
|
3123 |
[0x9e] = gen_helper_pfadd,
|
3124 |
[0xa0] = gen_helper_pfcmpgt,
|
3125 |
[0xa4] = gen_helper_pfmax,
|
3126 |
[0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ |
3127 |
[0xa7] = gen_helper_movq, /* pfrsqit1 */ |
3128 |
[0xaa] = gen_helper_pfsubr,
|
3129 |
[0xae] = gen_helper_pfacc,
|
3130 |
[0xb0] = gen_helper_pfcmpeq,
|
3131 |
[0xb4] = gen_helper_pfmul,
|
3132 |
[0xb6] = gen_helper_movq, /* pfrcpit2 */ |
3133 |
[0xb7] = gen_helper_pmulhrw_mmx,
|
3134 |
[0xbb] = gen_helper_pswapd,
|
3135 |
[0xbf] = gen_helper_pavgb_mmx /* pavgusb */ |
3136 |
}; |
3137 |
|
3138 |
struct SSEOpHelper_epp {
|
3139 |
SSEFunc_0_epp op[2];
|
3140 |
uint32_t ext_mask; |
3141 |
}; |
3142 |
|
3143 |
struct SSEOpHelper_eppi {
|
3144 |
SSEFunc_0_eppi op[2];
|
3145 |
uint32_t ext_mask; |
3146 |
}; |
3147 |
|
3148 |
#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
|
3149 |
#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } |
3150 |
#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } |
3151 |
#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } |
3152 |
#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ |
3153 |
CPUID_EXT_PCLMULQDQ } |
3154 |
#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } |
3155 |
|
3156 |
static const struct SSEOpHelper_epp sse_op_table6[256] = { |
3157 |
[0x00] = SSSE3_OP(pshufb),
|
3158 |
[0x01] = SSSE3_OP(phaddw),
|
3159 |
[0x02] = SSSE3_OP(phaddd),
|
3160 |
[0x03] = SSSE3_OP(phaddsw),
|
3161 |
[0x04] = SSSE3_OP(pmaddubsw),
|
3162 |
[0x05] = SSSE3_OP(phsubw),
|
3163 |
[0x06] = SSSE3_OP(phsubd),
|
3164 |
[0x07] = SSSE3_OP(phsubsw),
|
3165 |
[0x08] = SSSE3_OP(psignb),
|
3166 |
[0x09] = SSSE3_OP(psignw),
|
3167 |
[0x0a] = SSSE3_OP(psignd),
|
3168 |
[0x0b] = SSSE3_OP(pmulhrsw),
|
3169 |
[0x10] = SSE41_OP(pblendvb),
|
3170 |
[0x14] = SSE41_OP(blendvps),
|
3171 |
[0x15] = SSE41_OP(blendvpd),
|
3172 |
[0x17] = SSE41_OP(ptest),
|
3173 |
[0x1c] = SSSE3_OP(pabsb),
|
3174 |
[0x1d] = SSSE3_OP(pabsw),
|
3175 |
[0x1e] = SSSE3_OP(pabsd),
|
3176 |
[0x20] = SSE41_OP(pmovsxbw),
|
3177 |
[0x21] = SSE41_OP(pmovsxbd),
|
3178 |
[0x22] = SSE41_OP(pmovsxbq),
|
3179 |
[0x23] = SSE41_OP(pmovsxwd),
|
3180 |
[0x24] = SSE41_OP(pmovsxwq),
|
3181 |
[0x25] = SSE41_OP(pmovsxdq),
|
3182 |
[0x28] = SSE41_OP(pmuldq),
|
3183 |
[0x29] = SSE41_OP(pcmpeqq),
|
3184 |
[0x2a] = SSE41_SPECIAL, /* movntqda */ |
3185 |
[0x2b] = SSE41_OP(packusdw),
|
3186 |
[0x30] = SSE41_OP(pmovzxbw),
|
3187 |
[0x31] = SSE41_OP(pmovzxbd),
|
3188 |
[0x32] = SSE41_OP(pmovzxbq),
|
3189 |
[0x33] = SSE41_OP(pmovzxwd),
|
3190 |
[0x34] = SSE41_OP(pmovzxwq),
|
3191 |
[0x35] = SSE41_OP(pmovzxdq),
|
3192 |
[0x37] = SSE42_OP(pcmpgtq),
|
3193 |
[0x38] = SSE41_OP(pminsb),
|
3194 |
[0x39] = SSE41_OP(pminsd),
|
3195 |
[0x3a] = SSE41_OP(pminuw),
|
3196 |
[0x3b] = SSE41_OP(pminud),
|
3197 |
[0x3c] = SSE41_OP(pmaxsb),
|
3198 |
[0x3d] = SSE41_OP(pmaxsd),
|
3199 |
[0x3e] = SSE41_OP(pmaxuw),
|
3200 |
[0x3f] = SSE41_OP(pmaxud),
|
3201 |
[0x40] = SSE41_OP(pmulld),
|
3202 |
[0x41] = SSE41_OP(phminposuw),
|
3203 |
[0xdb] = AESNI_OP(aesimc),
|
3204 |
[0xdc] = AESNI_OP(aesenc),
|
3205 |
[0xdd] = AESNI_OP(aesenclast),
|
3206 |
[0xde] = AESNI_OP(aesdec),
|
3207 |
[0xdf] = AESNI_OP(aesdeclast),
|
3208 |
}; |
3209 |
|
3210 |
static const struct SSEOpHelper_eppi sse_op_table7[256] = { |
3211 |
[0x08] = SSE41_OP(roundps),
|
3212 |
[0x09] = SSE41_OP(roundpd),
|
3213 |
[0x0a] = SSE41_OP(roundss),
|
3214 |
[0x0b] = SSE41_OP(roundsd),
|
3215 |
[0x0c] = SSE41_OP(blendps),
|
3216 |
[0x0d] = SSE41_OP(blendpd),
|
3217 |
[0x0e] = SSE41_OP(pblendw),
|
3218 |
[0x0f] = SSSE3_OP(palignr),
|
3219 |
[0x14] = SSE41_SPECIAL, /* pextrb */ |
3220 |
[0x15] = SSE41_SPECIAL, /* pextrw */ |
3221 |
[0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ |
3222 |
[0x17] = SSE41_SPECIAL, /* extractps */ |
3223 |
[0x20] = SSE41_SPECIAL, /* pinsrb */ |
3224 |
[0x21] = SSE41_SPECIAL, /* insertps */ |
3225 |
[0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ |
3226 |
[0x40] = SSE41_OP(dpps),
|
3227 |
[0x41] = SSE41_OP(dppd),
|
3228 |
[0x42] = SSE41_OP(mpsadbw),
|
3229 |
[0x44] = PCLMULQDQ_OP(pclmulqdq),
|
3230 |
[0x60] = SSE42_OP(pcmpestrm),
|
3231 |
[0x61] = SSE42_OP(pcmpestri),
|
3232 |
[0x62] = SSE42_OP(pcmpistrm),
|
3233 |
[0x63] = SSE42_OP(pcmpistri),
|
3234 |
[0xdf] = AESNI_OP(aeskeygenassist),
|
3235 |
}; |
3236 |
|
3237 |
static void gen_sse(CPUX86State *env, DisasContext *s, int b, |
3238 |
target_ulong pc_start, int rex_r)
|
3239 |
{ |
3240 |
int b1, op1_offset, op2_offset, is_xmm, val, ot;
|
3241 |
int modrm, mod, rm, reg, reg_addr, offset_addr;
|
3242 |
SSEFunc_0_epp sse_fn_epp; |
3243 |
SSEFunc_0_eppi sse_fn_eppi; |
3244 |
SSEFunc_0_ppi sse_fn_ppi; |
3245 |
SSEFunc_0_eppt sse_fn_eppt; |
3246 |
|
3247 |
b &= 0xff;
|
3248 |
if (s->prefix & PREFIX_DATA)
|
3249 |
b1 = 1;
|
3250 |
else if (s->prefix & PREFIX_REPZ) |
3251 |
b1 = 2;
|
3252 |
else if (s->prefix & PREFIX_REPNZ) |
3253 |
b1 = 3;
|
3254 |
else
|
3255 |
b1 = 0;
|
3256 |
sse_fn_epp = sse_op_table1[b][b1]; |
3257 |
if (!sse_fn_epp) {
|
3258 |
goto illegal_op;
|
3259 |
} |
3260 |
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { |
3261 |
is_xmm = 1;
|
3262 |
} else {
|
3263 |
if (b1 == 0) { |
3264 |
/* MMX case */
|
3265 |
is_xmm = 0;
|
3266 |
} else {
|
3267 |
is_xmm = 1;
|
3268 |
} |
3269 |
} |
3270 |
/* simple MMX/SSE operation */
|
3271 |
if (s->flags & HF_TS_MASK) {
|
3272 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
3273 |
return;
|
3274 |
} |
3275 |
if (s->flags & HF_EM_MASK) {
|
3276 |
illegal_op:
|
3277 |
gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); |
3278 |
return;
|
3279 |
} |
3280 |
if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
|
3281 |
if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) |
3282 |
goto illegal_op;
|
3283 |
if (b == 0x0e) { |
3284 |
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
|
3285 |
goto illegal_op;
|
3286 |
/* femms */
|
3287 |
gen_helper_emms(cpu_env); |
3288 |
return;
|
3289 |
} |
3290 |
if (b == 0x77) { |
3291 |
/* emms */
|
3292 |
gen_helper_emms(cpu_env); |
3293 |
return;
|
3294 |
} |
3295 |
/* prepare MMX state (XXX: optimize by storing fptt and fptags in
|
3296 |
the static cpu state) */
|
3297 |
if (!is_xmm) {
|
3298 |
gen_helper_enter_mmx(cpu_env); |
3299 |
} |
3300 |
|
3301 |
modrm = cpu_ldub_code(env, s->pc++); |
3302 |
reg = ((modrm >> 3) & 7); |
3303 |
if (is_xmm)
|
3304 |
reg |= rex_r; |
3305 |
mod = (modrm >> 6) & 3; |
3306 |
if (sse_fn_epp == SSE_SPECIAL) {
|
3307 |
b |= (b1 << 8);
|
3308 |
switch(b) {
|
3309 |
case 0x0e7: /* movntq */ |
3310 |
if (mod == 3) |
3311 |
goto illegal_op;
|
3312 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3313 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); |
3314 |
break;
|
3315 |
case 0x1e7: /* movntdq */ |
3316 |
case 0x02b: /* movntps */ |
3317 |
case 0x12b: /* movntps */ |
3318 |
if (mod == 3) |
3319 |
goto illegal_op;
|
3320 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3321 |
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3322 |
break;
|
3323 |
case 0x3f0: /* lddqu */ |
3324 |
if (mod == 3) |
3325 |
goto illegal_op;
|
3326 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3327 |
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3328 |
break;
|
3329 |
case 0x22b: /* movntss */ |
3330 |
case 0x32b: /* movntsd */ |
3331 |
if (mod == 3) |
3332 |
goto illegal_op;
|
3333 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3334 |
if (b1 & 1) { |
3335 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State, |
3336 |
xmm_regs[reg])); |
3337 |
} else {
|
3338 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
|
3339 |
xmm_regs[reg].XMM_L(0)));
|
3340 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
3341 |
} |
3342 |
break;
|
3343 |
case 0x6e: /* movd mm, ea */ |
3344 |
#ifdef TARGET_X86_64
|
3345 |
if (s->dflag == 2) { |
3346 |
gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
|
3347 |
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
|
3348 |
} else
|
3349 |
#endif
|
3350 |
{ |
3351 |
gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
|
3352 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3353 |
offsetof(CPUX86State,fpregs[reg].mmx)); |
3354 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
3355 |
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); |
3356 |
} |
3357 |
break;
|
3358 |
case 0x16e: /* movd xmm, ea */ |
3359 |
#ifdef TARGET_X86_64
|
3360 |
if (s->dflag == 2) { |
3361 |
gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
|
3362 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3363 |
offsetof(CPUX86State,xmm_regs[reg])); |
3364 |
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
|
3365 |
} else
|
3366 |
#endif
|
3367 |
{ |
3368 |
gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
|
3369 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3370 |
offsetof(CPUX86State,xmm_regs[reg])); |
3371 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
3372 |
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); |
3373 |
} |
3374 |
break;
|
3375 |
case 0x6f: /* movq mm, ea */ |
3376 |
if (mod != 3) { |
3377 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3378 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); |
3379 |
} else {
|
3380 |
rm = (modrm & 7);
|
3381 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, |
3382 |
offsetof(CPUX86State,fpregs[rm].mmx)); |
3383 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, |
3384 |
offsetof(CPUX86State,fpregs[reg].mmx)); |
3385 |
} |
3386 |
break;
|
3387 |
case 0x010: /* movups */ |
3388 |
case 0x110: /* movupd */ |
3389 |
case 0x028: /* movaps */ |
3390 |
case 0x128: /* movapd */ |
3391 |
case 0x16f: /* movdqa xmm, ea */ |
3392 |
case 0x26f: /* movdqu xmm, ea */ |
3393 |
if (mod != 3) { |
3394 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3395 |
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3396 |
} else {
|
3397 |
rm = (modrm & 7) | REX_B(s);
|
3398 |
gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]), |
3399 |
offsetof(CPUX86State,xmm_regs[rm])); |
3400 |
} |
3401 |
break;
|
3402 |
case 0x210: /* movss xmm, ea */ |
3403 |
if (mod != 3) { |
3404 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3405 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
3406 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); |
3407 |
gen_op_movl_T0_0(); |
3408 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); |
3409 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); |
3410 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); |
3411 |
} else {
|
3412 |
rm = (modrm & 7) | REX_B(s);
|
3413 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
|
3414 |
offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
|
3415 |
} |
3416 |
break;
|
3417 |
case 0x310: /* movsd xmm, ea */ |
3418 |
if (mod != 3) { |
3419 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3420 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3421 |
gen_op_movl_T0_0(); |
3422 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); |
3423 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); |
3424 |
} else {
|
3425 |
rm = (modrm & 7) | REX_B(s);
|
3426 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
|
3427 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
|
3428 |
} |
3429 |
break;
|
3430 |
case 0x012: /* movlps */ |
3431 |
case 0x112: /* movlpd */ |
3432 |
if (mod != 3) { |
3433 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3434 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3435 |
} else {
|
3436 |
/* movhlps */
|
3437 |
rm = (modrm & 7) | REX_B(s);
|
3438 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
|
3439 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
|
3440 |
} |
3441 |
break;
|
3442 |
case 0x212: /* movsldup */ |
3443 |
if (mod != 3) { |
3444 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3445 |
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3446 |
} else {
|
3447 |
rm = (modrm & 7) | REX_B(s);
|
3448 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
|
3449 |
offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
|
3450 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
|
3451 |
offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
|
3452 |
} |
3453 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
|
3454 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
|
3455 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
|
3456 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
|
3457 |
break;
|
3458 |
case 0x312: /* movddup */ |
3459 |
if (mod != 3) { |
3460 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3461 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3462 |
} else {
|
3463 |
rm = (modrm & 7) | REX_B(s);
|
3464 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
|
3465 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
|
3466 |
} |
3467 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
|
3468 |
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3469 |
break;
|
3470 |
case 0x016: /* movhps */ |
3471 |
case 0x116: /* movhpd */ |
3472 |
if (mod != 3) { |
3473 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3474 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
|
3475 |
} else {
|
3476 |
/* movlhps */
|
3477 |
rm = (modrm & 7) | REX_B(s);
|
3478 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
|
3479 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
|
3480 |
} |
3481 |
break;
|
3482 |
case 0x216: /* movshdup */ |
3483 |
if (mod != 3) { |
3484 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3485 |
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3486 |
} else {
|
3487 |
rm = (modrm & 7) | REX_B(s);
|
3488 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
|
3489 |
offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
|
3490 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
|
3491 |
offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
|
3492 |
} |
3493 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
|
3494 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
|
3495 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
|
3496 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
|
3497 |
break;
|
3498 |
case 0x178: |
3499 |
case 0x378: |
3500 |
{ |
3501 |
int bit_index, field_length;
|
3502 |
|
3503 |
if (b1 == 1 && reg != 0) |
3504 |
goto illegal_op;
|
3505 |
field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
|
3506 |
bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
|
3507 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3508 |
offsetof(CPUX86State,xmm_regs[reg])); |
3509 |
if (b1 == 1) |
3510 |
gen_helper_extrq_i(cpu_env, cpu_ptr0, |
3511 |
tcg_const_i32(bit_index), |
3512 |
tcg_const_i32(field_length)); |
3513 |
else
|
3514 |
gen_helper_insertq_i(cpu_env, cpu_ptr0, |
3515 |
tcg_const_i32(bit_index), |
3516 |
tcg_const_i32(field_length)); |
3517 |
} |
3518 |
break;
|
3519 |
case 0x7e: /* movd ea, mm */ |
3520 |
#ifdef TARGET_X86_64
|
3521 |
if (s->dflag == 2) { |
3522 |
tcg_gen_ld_i64(cpu_T[0], cpu_env,
|
3523 |
offsetof(CPUX86State,fpregs[reg].mmx)); |
3524 |
gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
|
3525 |
} else
|
3526 |
#endif
|
3527 |
{ |
3528 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
|
3529 |
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
|
3530 |
gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
|
3531 |
} |
3532 |
break;
|
3533 |
case 0x17e: /* movd ea, xmm */ |
3534 |
#ifdef TARGET_X86_64
|
3535 |
if (s->dflag == 2) { |
3536 |
tcg_gen_ld_i64(cpu_T[0], cpu_env,
|
3537 |
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3538 |
gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
|
3539 |
} else
|
3540 |
#endif
|
3541 |
{ |
3542 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
|
3543 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
|
3544 |
gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
|
3545 |
} |
3546 |
break;
|
3547 |
case 0x27e: /* movq xmm, ea */ |
3548 |
if (mod != 3) { |
3549 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3550 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3551 |
} else {
|
3552 |
rm = (modrm & 7) | REX_B(s);
|
3553 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
|
3554 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
|
3555 |
} |
3556 |
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
|
3557 |
break;
|
3558 |
case 0x7f: /* movq ea, mm */ |
3559 |
if (mod != 3) { |
3560 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3561 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); |
3562 |
} else {
|
3563 |
rm = (modrm & 7);
|
3564 |
gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx), |
3565 |
offsetof(CPUX86State,fpregs[reg].mmx)); |
3566 |
} |
3567 |
break;
|
3568 |
case 0x011: /* movups */ |
3569 |
case 0x111: /* movupd */ |
3570 |
case 0x029: /* movaps */ |
3571 |
case 0x129: /* movapd */ |
3572 |
case 0x17f: /* movdqa ea, xmm */ |
3573 |
case 0x27f: /* movdqu ea, xmm */ |
3574 |
if (mod != 3) { |
3575 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3576 |
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); |
3577 |
} else {
|
3578 |
rm = (modrm & 7) | REX_B(s);
|
3579 |
gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]), |
3580 |
offsetof(CPUX86State,xmm_regs[reg])); |
3581 |
} |
3582 |
break;
|
3583 |
case 0x211: /* movss ea, xmm */ |
3584 |
if (mod != 3) { |
3585 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3586 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); |
3587 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
3588 |
} else {
|
3589 |
rm = (modrm & 7) | REX_B(s);
|
3590 |
gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
|
3591 |
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
|
3592 |
} |
3593 |
break;
|
3594 |
case 0x311: /* movsd ea, xmm */ |
3595 |
if (mod != 3) { |
3596 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3597 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3598 |
} else {
|
3599 |
rm = (modrm & 7) | REX_B(s);
|
3600 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
|
3601 |
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3602 |
} |
3603 |
break;
|
3604 |
case 0x013: /* movlps */ |
3605 |
case 0x113: /* movlpd */ |
3606 |
if (mod != 3) { |
3607 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3608 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3609 |
} else {
|
3610 |
goto illegal_op;
|
3611 |
} |
3612 |
break;
|
3613 |
case 0x017: /* movhps */ |
3614 |
case 0x117: /* movhpd */ |
3615 |
if (mod != 3) { |
3616 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3617 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
|
3618 |
} else {
|
3619 |
goto illegal_op;
|
3620 |
} |
3621 |
break;
|
3622 |
case 0x71: /* shift mm, im */ |
3623 |
case 0x72: |
3624 |
case 0x73: |
3625 |
case 0x171: /* shift xmm, im */ |
3626 |
case 0x172: |
3627 |
case 0x173: |
3628 |
if (b1 >= 2) { |
3629 |
goto illegal_op;
|
3630 |
} |
3631 |
val = cpu_ldub_code(env, s->pc++); |
3632 |
if (is_xmm) {
|
3633 |
gen_op_movl_T0_im(val); |
3634 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); |
3635 |
gen_op_movl_T0_0(); |
3636 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); |
3637 |
op1_offset = offsetof(CPUX86State,xmm_t0); |
3638 |
} else {
|
3639 |
gen_op_movl_T0_im(val); |
3640 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); |
3641 |
gen_op_movl_T0_0(); |
3642 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); |
3643 |
op1_offset = offsetof(CPUX86State,mmx_t0); |
3644 |
} |
3645 |
sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + |
3646 |
(((modrm >> 3)) & 7)][b1]; |
3647 |
if (!sse_fn_epp) {
|
3648 |
goto illegal_op;
|
3649 |
} |
3650 |
if (is_xmm) {
|
3651 |
rm = (modrm & 7) | REX_B(s);
|
3652 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm]); |
3653 |
} else {
|
3654 |
rm = (modrm & 7);
|
3655 |
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); |
3656 |
} |
3657 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); |
3658 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); |
3659 |
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
3660 |
break;
|
3661 |
case 0x050: /* movmskps */ |
3662 |
rm = (modrm & 7) | REX_B(s);
|
3663 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3664 |
offsetof(CPUX86State,xmm_regs[rm])); |
3665 |
gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3666 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
3667 |
gen_op_mov_reg_T0(OT_LONG, reg); |
3668 |
break;
|
3669 |
case 0x150: /* movmskpd */ |
3670 |
rm = (modrm & 7) | REX_B(s);
|
3671 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, |
3672 |
offsetof(CPUX86State,xmm_regs[rm])); |
3673 |
gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3674 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
3675 |
gen_op_mov_reg_T0(OT_LONG, reg); |
3676 |
break;
|
3677 |
case 0x02a: /* cvtpi2ps */ |
3678 |
case 0x12a: /* cvtpi2pd */ |
3679 |
gen_helper_enter_mmx(cpu_env); |
3680 |
if (mod != 3) { |
3681 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3682 |
op2_offset = offsetof(CPUX86State,mmx_t0); |
3683 |
gen_ldq_env_A0(s->mem_index, op2_offset); |
3684 |
} else {
|
3685 |
rm = (modrm & 7);
|
3686 |
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); |
3687 |
} |
3688 |
op1_offset = offsetof(CPUX86State,xmm_regs[reg]); |
3689 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
3690 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
3691 |
switch(b >> 8) { |
3692 |
case 0x0: |
3693 |
gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1); |
3694 |
break;
|
3695 |
default:
|
3696 |
case 0x1: |
3697 |
gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1); |
3698 |
break;
|
3699 |
} |
3700 |
break;
|
3701 |
case 0x22a: /* cvtsi2ss */ |
3702 |
case 0x32a: /* cvtsi2sd */ |
3703 |
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
|
3704 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
3705 |
op1_offset = offsetof(CPUX86State,xmm_regs[reg]); |
3706 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
3707 |
if (ot == OT_LONG) {
|
3708 |
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; |
3709 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
3710 |
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32); |
3711 |
} else {
|
3712 |
#ifdef TARGET_X86_64
|
3713 |
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; |
3714 |
sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
|
3715 |
#else
|
3716 |
goto illegal_op;
|
3717 |
#endif
|
3718 |
} |
3719 |
break;
|
3720 |
case 0x02c: /* cvttps2pi */ |
3721 |
case 0x12c: /* cvttpd2pi */ |
3722 |
case 0x02d: /* cvtps2pi */ |
3723 |
case 0x12d: /* cvtpd2pi */ |
3724 |
gen_helper_enter_mmx(cpu_env); |
3725 |
if (mod != 3) { |
3726 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3727 |
op2_offset = offsetof(CPUX86State,xmm_t0); |
3728 |
gen_ldo_env_A0(s->mem_index, op2_offset); |
3729 |
} else {
|
3730 |
rm = (modrm & 7) | REX_B(s);
|
3731 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm]); |
3732 |
} |
3733 |
op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
|
3734 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
3735 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
3736 |
switch(b) {
|
3737 |
case 0x02c: |
3738 |
gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3739 |
break;
|
3740 |
case 0x12c: |
3741 |
gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3742 |
break;
|
3743 |
case 0x02d: |
3744 |
gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3745 |
break;
|
3746 |
case 0x12d: |
3747 |
gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3748 |
break;
|
3749 |
} |
3750 |
break;
|
3751 |
case 0x22c: /* cvttss2si */ |
3752 |
case 0x32c: /* cvttsd2si */ |
3753 |
case 0x22d: /* cvtss2si */ |
3754 |
case 0x32d: /* cvtsd2si */ |
3755 |
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
|
3756 |
if (mod != 3) { |
3757 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3758 |
if ((b >> 8) & 1) { |
3759 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
|
3760 |
} else {
|
3761 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
3762 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); |
3763 |
} |
3764 |
op2_offset = offsetof(CPUX86State,xmm_t0); |
3765 |
} else {
|
3766 |
rm = (modrm & 7) | REX_B(s);
|
3767 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm]); |
3768 |
} |
3769 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); |
3770 |
if (ot == OT_LONG) {
|
3771 |
SSEFunc_i_ep sse_fn_i_ep = |
3772 |
sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; |
3773 |
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3774 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
3775 |
} else {
|
3776 |
#ifdef TARGET_X86_64
|
3777 |
SSEFunc_l_ep sse_fn_l_ep = |
3778 |
sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; |
3779 |
sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
|
3780 |
#else
|
3781 |
goto illegal_op;
|
3782 |
#endif
|
3783 |
} |
3784 |
gen_op_mov_reg_T0(ot, reg); |
3785 |
break;
|
3786 |
case 0xc4: /* pinsrw */ |
3787 |
case 0x1c4: |
3788 |
s->rip_offset = 1;
|
3789 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
3790 |
val = cpu_ldub_code(env, s->pc++); |
3791 |
if (b1) {
|
3792 |
val &= 7;
|
3793 |
tcg_gen_st16_tl(cpu_T[0], cpu_env,
|
3794 |
offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); |
3795 |
} else {
|
3796 |
val &= 3;
|
3797 |
tcg_gen_st16_tl(cpu_T[0], cpu_env,
|
3798 |
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); |
3799 |
} |
3800 |
break;
|
3801 |
case 0xc5: /* pextrw */ |
3802 |
case 0x1c5: |
3803 |
if (mod != 3) |
3804 |
goto illegal_op;
|
3805 |
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
|
3806 |
val = cpu_ldub_code(env, s->pc++); |
3807 |
if (b1) {
|
3808 |
val &= 7;
|
3809 |
rm = (modrm & 7) | REX_B(s);
|
3810 |
tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
|
3811 |
offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); |
3812 |
} else {
|
3813 |
val &= 3;
|
3814 |
rm = (modrm & 7);
|
3815 |
tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
|
3816 |
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); |
3817 |
} |
3818 |
reg = ((modrm >> 3) & 7) | rex_r; |
3819 |
gen_op_mov_reg_T0(ot, reg); |
3820 |
break;
|
3821 |
case 0x1d6: /* movq ea, xmm */ |
3822 |
if (mod != 3) { |
3823 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3824 |
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3825 |
} else {
|
3826 |
rm = (modrm & 7) | REX_B(s);
|
3827 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
|
3828 |
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
|
3829 |
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
|
3830 |
} |
3831 |
break;
|
3832 |
case 0x2d6: /* movq2dq */ |
3833 |
gen_helper_enter_mmx(cpu_env); |
3834 |
rm = (modrm & 7);
|
3835 |
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
|
3836 |
offsetof(CPUX86State,fpregs[rm].mmx)); |
3837 |
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
|
3838 |
break;
|
3839 |
case 0x3d6: /* movdq2q */ |
3840 |
gen_helper_enter_mmx(cpu_env); |
3841 |
rm = (modrm & 7) | REX_B(s);
|
3842 |
gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
|
3843 |
offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
|
3844 |
break;
|
3845 |
case 0xd7: /* pmovmskb */ |
3846 |
case 0x1d7: |
3847 |
if (mod != 3) |
3848 |
goto illegal_op;
|
3849 |
if (b1) {
|
3850 |
rm = (modrm & 7) | REX_B(s);
|
3851 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); |
3852 |
gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3853 |
} else {
|
3854 |
rm = (modrm & 7);
|
3855 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); |
3856 |
gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3857 |
} |
3858 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
3859 |
reg = ((modrm >> 3) & 7) | rex_r; |
3860 |
gen_op_mov_reg_T0(OT_LONG, reg); |
3861 |
break;
|
3862 |
|
3863 |
case 0x138: |
3864 |
case 0x038: |
3865 |
b = modrm; |
3866 |
if ((b & 0xf0) == 0xf0) { |
3867 |
goto do_0f_38_fx;
|
3868 |
} |
3869 |
modrm = cpu_ldub_code(env, s->pc++); |
3870 |
rm = modrm & 7;
|
3871 |
reg = ((modrm >> 3) & 7) | rex_r; |
3872 |
mod = (modrm >> 6) & 3; |
3873 |
if (b1 >= 2) { |
3874 |
goto illegal_op;
|
3875 |
} |
3876 |
|
3877 |
sse_fn_epp = sse_op_table6[b].op[b1]; |
3878 |
if (!sse_fn_epp) {
|
3879 |
goto illegal_op;
|
3880 |
} |
3881 |
if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
|
3882 |
goto illegal_op;
|
3883 |
|
3884 |
if (b1) {
|
3885 |
op1_offset = offsetof(CPUX86State,xmm_regs[reg]); |
3886 |
if (mod == 3) { |
3887 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); |
3888 |
} else {
|
3889 |
op2_offset = offsetof(CPUX86State,xmm_t0); |
3890 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3891 |
switch (b) {
|
3892 |
case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ |
3893 |
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ |
3894 |
case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ |
3895 |
gen_ldq_env_A0(s->mem_index, op2_offset + |
3896 |
offsetof(XMMReg, XMM_Q(0)));
|
3897 |
break;
|
3898 |
case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ |
3899 |
case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ |
3900 |
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, |
3901 |
(s->mem_index >> 2) - 1); |
3902 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); |
3903 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + |
3904 |
offsetof(XMMReg, XMM_L(0)));
|
3905 |
break;
|
3906 |
case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ |
3907 |
tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0, |
3908 |
(s->mem_index >> 2) - 1); |
3909 |
tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + |
3910 |
offsetof(XMMReg, XMM_W(0)));
|
3911 |
break;
|
3912 |
case 0x2a: /* movntqda */ |
3913 |
gen_ldo_env_A0(s->mem_index, op1_offset); |
3914 |
return;
|
3915 |
default:
|
3916 |
gen_ldo_env_A0(s->mem_index, op2_offset); |
3917 |
} |
3918 |
} |
3919 |
} else {
|
3920 |
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); |
3921 |
if (mod == 3) { |
3922 |
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); |
3923 |
} else {
|
3924 |
op2_offset = offsetof(CPUX86State,mmx_t0); |
3925 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
3926 |
gen_ldq_env_A0(s->mem_index, op2_offset); |
3927 |
} |
3928 |
} |
3929 |
if (sse_fn_epp == SSE_SPECIAL) {
|
3930 |
goto illegal_op;
|
3931 |
} |
3932 |
|
3933 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
3934 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
3935 |
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
3936 |
|
3937 |
if (b == 0x17) { |
3938 |
set_cc_op(s, CC_OP_EFLAGS); |
3939 |
} |
3940 |
break;
|
3941 |
|
3942 |
case 0x238: |
3943 |
case 0x338: |
3944 |
do_0f_38_fx:
|
3945 |
/* Various integer extensions at 0f 38 f[0-f]. */
|
3946 |
b = modrm | (b1 << 8);
|
3947 |
modrm = cpu_ldub_code(env, s->pc++); |
3948 |
reg = ((modrm >> 3) & 7) | rex_r; |
3949 |
|
3950 |
switch (b) {
|
3951 |
case 0x3f0: /* crc32 Gd,Eb */ |
3952 |
case 0x3f1: /* crc32 Gd,Ey */ |
3953 |
do_crc32:
|
3954 |
if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
|
3955 |
goto illegal_op;
|
3956 |
} |
3957 |
if ((b & 0xff) == 0xf0) { |
3958 |
ot = OT_BYTE; |
3959 |
} else if (s->dflag != 2) { |
3960 |
ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG); |
3961 |
} else {
|
3962 |
ot = OT_QUAD; |
3963 |
} |
3964 |
|
3965 |
gen_op_mov_TN_reg(OT_LONG, 0, reg);
|
3966 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
3967 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
3968 |
gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
|
3969 |
cpu_T[0], tcg_const_i32(8 << ot)); |
3970 |
|
3971 |
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
|
3972 |
gen_op_mov_reg_T0(ot, reg); |
3973 |
break;
|
3974 |
|
3975 |
case 0x1f0: /* crc32 or movbe */ |
3976 |
case 0x1f1: |
3977 |
/* For these insns, the f3 prefix is supposed to have priority
|
3978 |
over the 66 prefix, but that's not what we implement above
|
3979 |
setting b1. */
|
3980 |
if (s->prefix & PREFIX_REPNZ) {
|
3981 |
goto do_crc32;
|
3982 |
} |
3983 |
/* FALLTHRU */
|
3984 |
case 0x0f0: /* movbe Gy,My */ |
3985 |
case 0x0f1: /* movbe My,Gy */ |
3986 |
if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
|
3987 |
goto illegal_op;
|
3988 |
} |
3989 |
if (s->dflag != 2) { |
3990 |
ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG); |
3991 |
} else {
|
3992 |
ot = OT_QUAD; |
3993 |
} |
3994 |
|
3995 |
/* Load the data incoming to the bswap. Note that the TCG
|
3996 |
implementation of bswap requires the input be zero
|
3997 |
extended. In the case of the loads, we simply know that
|
3998 |
gen_op_ld_v via gen_ldst_modrm does that already. */
|
3999 |
if ((b & 1) == 0) { |
4000 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4001 |
} else {
|
4002 |
switch (ot) {
|
4003 |
case OT_WORD:
|
4004 |
tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[reg]);
|
4005 |
break;
|
4006 |
default:
|
4007 |
tcg_gen_ext32u_tl(cpu_T[0], cpu_regs[reg]);
|
4008 |
break;
|
4009 |
case OT_QUAD:
|
4010 |
tcg_gen_mov_tl(cpu_T[0], cpu_regs[reg]);
|
4011 |
break;
|
4012 |
} |
4013 |
} |
4014 |
|
4015 |
switch (ot) {
|
4016 |
case OT_WORD:
|
4017 |
tcg_gen_bswap16_tl(cpu_T[0], cpu_T[0]); |
4018 |
break;
|
4019 |
default:
|
4020 |
tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]); |
4021 |
break;
|
4022 |
#ifdef TARGET_X86_64
|
4023 |
case OT_QUAD:
|
4024 |
tcg_gen_bswap64_tl(cpu_T[0], cpu_T[0]); |
4025 |
break;
|
4026 |
#endif
|
4027 |
} |
4028 |
|
4029 |
if ((b & 1) == 0) { |
4030 |
gen_op_mov_reg_T0(ot, reg); |
4031 |
} else {
|
4032 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
4033 |
} |
4034 |
break;
|
4035 |
|
4036 |
case 0x0f2: /* andn Gy, By, Ey */ |
4037 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|
4038 |
|| !(s->prefix & PREFIX_VEX) |
4039 |
|| s->vex_l != 0) {
|
4040 |
goto illegal_op;
|
4041 |
} |
4042 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4043 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4044 |
tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]); |
4045 |
gen_op_mov_reg_T0(ot, reg); |
4046 |
gen_op_update1_cc(); |
4047 |
set_cc_op(s, CC_OP_LOGICB + ot); |
4048 |
break;
|
4049 |
|
4050 |
case 0x0f7: /* bextr Gy, Ey, By */ |
4051 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|
4052 |
|| !(s->prefix & PREFIX_VEX) |
4053 |
|| s->vex_l != 0) {
|
4054 |
goto illegal_op;
|
4055 |
} |
4056 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4057 |
{ |
4058 |
TCGv bound, zero; |
4059 |
|
4060 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4061 |
/* Extract START, and shift the operand.
|
4062 |
Shifts larger than operand size get zeros. */
|
4063 |
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]); |
4064 |
tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0); |
4065 |
|
4066 |
bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31); |
4067 |
zero = tcg_const_tl(0);
|
4068 |
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
|
4069 |
cpu_T[0], zero);
|
4070 |
tcg_temp_free(zero); |
4071 |
|
4072 |
/* Extract the LEN into a mask. Lengths larger than
|
4073 |
operand size get all ones. */
|
4074 |
tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
|
4075 |
tcg_gen_ext8u_tl(cpu_A0, cpu_A0); |
4076 |
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound, |
4077 |
cpu_A0, bound); |
4078 |
tcg_temp_free(bound); |
4079 |
tcg_gen_movi_tl(cpu_T[1], 1); |
4080 |
tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0); |
4081 |
tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1); |
4082 |
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
4083 |
|
4084 |
gen_op_mov_reg_T0(ot, reg); |
4085 |
gen_op_update1_cc(); |
4086 |
set_cc_op(s, CC_OP_LOGICB + ot); |
4087 |
} |
4088 |
break;
|
4089 |
|
4090 |
case 0x0f5: /* bzhi Gy, Ey, By */ |
4091 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4092 |
|| !(s->prefix & PREFIX_VEX) |
4093 |
|| s->vex_l != 0) {
|
4094 |
goto illegal_op;
|
4095 |
} |
4096 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4097 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4098 |
tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
|
4099 |
{ |
4100 |
TCGv bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31); |
4101 |
/* Note that since we're using BMILG (in order to get O
|
4102 |
cleared) we need to store the inverse into C. */
|
4103 |
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, |
4104 |
cpu_T[1], bound);
|
4105 |
tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1], |
4106 |
bound, bound, cpu_T[1]);
|
4107 |
tcg_temp_free(bound); |
4108 |
} |
4109 |
tcg_gen_movi_tl(cpu_A0, -1);
|
4110 |
tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
|
4111 |
tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0); |
4112 |
gen_op_mov_reg_T0(ot, reg); |
4113 |
gen_op_update1_cc(); |
4114 |
set_cc_op(s, CC_OP_BMILGB + ot); |
4115 |
break;
|
4116 |
|
4117 |
case 0x3f6: /* mulx By, Gy, rdx, Ey */ |
4118 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4119 |
|| !(s->prefix & PREFIX_VEX) |
4120 |
|| s->vex_l != 0) {
|
4121 |
goto illegal_op;
|
4122 |
} |
4123 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4124 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4125 |
switch (ot) {
|
4126 |
default:
|
4127 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
4128 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]); |
4129 |
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
4130 |
cpu_tmp2_i32, cpu_tmp3_i32); |
4131 |
tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32); |
4132 |
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32); |
4133 |
break;
|
4134 |
#ifdef TARGET_X86_64
|
4135 |
case OT_QUAD:
|
4136 |
tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg], |
4137 |
cpu_T[0], cpu_regs[R_EDX]);
|
4138 |
break;
|
4139 |
#endif
|
4140 |
} |
4141 |
break;
|
4142 |
|
4143 |
case 0x3f5: /* pdep Gy, By, Ey */ |
4144 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4145 |
|| !(s->prefix & PREFIX_VEX) |
4146 |
|| s->vex_l != 0) {
|
4147 |
goto illegal_op;
|
4148 |
} |
4149 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4150 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4151 |
/* Note that by zero-extending the mask operand, we
|
4152 |
automatically handle zero-extending the result. */
|
4153 |
if (s->dflag == 2) { |
4154 |
tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
|
4155 |
} else {
|
4156 |
tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
|
4157 |
} |
4158 |
gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]); |
4159 |
break;
|
4160 |
|
4161 |
case 0x2f5: /* pext Gy, By, Ey */ |
4162 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4163 |
|| !(s->prefix & PREFIX_VEX) |
4164 |
|| s->vex_l != 0) {
|
4165 |
goto illegal_op;
|
4166 |
} |
4167 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4168 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4169 |
/* Note that by zero-extending the mask operand, we
|
4170 |
automatically handle zero-extending the result. */
|
4171 |
if (s->dflag == 2) { |
4172 |
tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
|
4173 |
} else {
|
4174 |
tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
|
4175 |
} |
4176 |
gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]); |
4177 |
break;
|
4178 |
|
4179 |
case 0x1f6: /* adcx Gy, Ey */ |
4180 |
case 0x2f6: /* adox Gy, Ey */ |
4181 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
|
4182 |
goto illegal_op;
|
4183 |
} else {
|
4184 |
TCGv carry_in, carry_out, zero; |
4185 |
int end_op;
|
4186 |
|
4187 |
ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
|
4188 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4189 |
|
4190 |
/* Re-use the carry-out from a previous round. */
|
4191 |
TCGV_UNUSED(carry_in); |
4192 |
carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
|
4193 |
switch (s->cc_op) {
|
4194 |
case CC_OP_ADCX:
|
4195 |
if (b == 0x1f6) { |
4196 |
carry_in = cpu_cc_dst; |
4197 |
end_op = CC_OP_ADCX; |
4198 |
} else {
|
4199 |
end_op = CC_OP_ADCOX; |
4200 |
} |
4201 |
break;
|
4202 |
case CC_OP_ADOX:
|
4203 |
if (b == 0x1f6) { |
4204 |
end_op = CC_OP_ADCOX; |
4205 |
} else {
|
4206 |
carry_in = cpu_cc_src2; |
4207 |
end_op = CC_OP_ADOX; |
4208 |
} |
4209 |
break;
|
4210 |
case CC_OP_ADCOX:
|
4211 |
end_op = CC_OP_ADCOX; |
4212 |
carry_in = carry_out; |
4213 |
break;
|
4214 |
default:
|
4215 |
end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
|
4216 |
break;
|
4217 |
} |
4218 |
/* If we can't reuse carry-out, get it out of EFLAGS. */
|
4219 |
if (TCGV_IS_UNUSED(carry_in)) {
|
4220 |
if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
|
4221 |
gen_compute_eflags(s); |
4222 |
} |
4223 |
carry_in = cpu_tmp0; |
4224 |
tcg_gen_shri_tl(carry_in, cpu_cc_src, |
4225 |
ctz32(b == 0x1f6 ? CC_C : CC_O));
|
4226 |
tcg_gen_andi_tl(carry_in, carry_in, 1);
|
4227 |
} |
4228 |
|
4229 |
switch (ot) {
|
4230 |
#ifdef TARGET_X86_64
|
4231 |
case OT_LONG:
|
4232 |
/* If we know TL is 64-bit, and we want a 32-bit
|
4233 |
result, just do everything in 64-bit arithmetic. */
|
4234 |
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]); |
4235 |
tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]); |
4236 |
tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]); |
4237 |
tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in); |
4238 |
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
|
4239 |
tcg_gen_shri_i64(carry_out, cpu_T[0], 32); |
4240 |
break;
|
4241 |
#endif
|
4242 |
default:
|
4243 |
/* Otherwise compute the carry-out in two steps. */
|
4244 |
zero = tcg_const_tl(0);
|
4245 |
tcg_gen_add2_tl(cpu_T[0], carry_out,
|
4246 |
cpu_T[0], zero,
|
4247 |
carry_in, zero); |
4248 |
tcg_gen_add2_tl(cpu_regs[reg], carry_out, |
4249 |
cpu_regs[reg], carry_out, |
4250 |
cpu_T[0], zero);
|
4251 |
tcg_temp_free(zero); |
4252 |
break;
|
4253 |
} |
4254 |
set_cc_op(s, end_op); |
4255 |
} |
4256 |
break;
|
4257 |
|
4258 |
case 0x1f7: /* shlx Gy, Ey, By */ |
4259 |
case 0x2f7: /* sarx Gy, Ey, By */ |
4260 |
case 0x3f7: /* shrx Gy, Ey, By */ |
4261 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4262 |
|| !(s->prefix & PREFIX_VEX) |
4263 |
|| s->vex_l != 0) {
|
4264 |
goto illegal_op;
|
4265 |
} |
4266 |
ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
|
4267 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4268 |
if (ot == OT_QUAD) {
|
4269 |
tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63); |
4270 |
} else {
|
4271 |
tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31); |
4272 |
} |
4273 |
if (b == 0x1f7) { |
4274 |
tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
4275 |
} else if (b == 0x2f7) { |
4276 |
if (ot != OT_QUAD) {
|
4277 |
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); |
4278 |
} |
4279 |
tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
4280 |
} else {
|
4281 |
if (ot != OT_QUAD) {
|
4282 |
tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); |
4283 |
} |
4284 |
tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
4285 |
} |
4286 |
gen_op_mov_reg_T0(ot, reg); |
4287 |
break;
|
4288 |
|
4289 |
case 0x0f3: |
4290 |
case 0x1f3: |
4291 |
case 0x2f3: |
4292 |
case 0x3f3: /* Group 17 */ |
4293 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|
4294 |
|| !(s->prefix & PREFIX_VEX) |
4295 |
|| s->vex_l != 0) {
|
4296 |
goto illegal_op;
|
4297 |
} |
4298 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4299 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4300 |
|
4301 |
switch (reg & 7) { |
4302 |
case 1: /* blsr By,Ey */ |
4303 |
tcg_gen_neg_tl(cpu_T[1], cpu_T[0]); |
4304 |
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
4305 |
gen_op_mov_reg_T0(ot, s->vex_v); |
4306 |
gen_op_update2_cc(); |
4307 |
set_cc_op(s, CC_OP_BMILGB + ot); |
4308 |
break;
|
4309 |
|
4310 |
case 2: /* blsmsk By,Ey */ |
4311 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
|
4312 |
tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1); |
4313 |
tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src); |
4314 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
4315 |
set_cc_op(s, CC_OP_BMILGB + ot); |
4316 |
break;
|
4317 |
|
4318 |
case 3: /* blsi By, Ey */ |
4319 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
|
4320 |
tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1); |
4321 |
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src); |
4322 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
4323 |
set_cc_op(s, CC_OP_BMILGB + ot); |
4324 |
break;
|
4325 |
|
4326 |
default:
|
4327 |
goto illegal_op;
|
4328 |
} |
4329 |
break;
|
4330 |
|
4331 |
default:
|
4332 |
goto illegal_op;
|
4333 |
} |
4334 |
break;
|
4335 |
|
4336 |
case 0x03a: |
4337 |
case 0x13a: |
4338 |
b = modrm; |
4339 |
modrm = cpu_ldub_code(env, s->pc++); |
4340 |
rm = modrm & 7;
|
4341 |
reg = ((modrm >> 3) & 7) | rex_r; |
4342 |
mod = (modrm >> 6) & 3; |
4343 |
if (b1 >= 2) { |
4344 |
goto illegal_op;
|
4345 |
} |
4346 |
|
4347 |
sse_fn_eppi = sse_op_table7[b].op[b1]; |
4348 |
if (!sse_fn_eppi) {
|
4349 |
goto illegal_op;
|
4350 |
} |
4351 |
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
|
4352 |
goto illegal_op;
|
4353 |
|
4354 |
if (sse_fn_eppi == SSE_SPECIAL) {
|
4355 |
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
|
4356 |
rm = (modrm & 7) | REX_B(s);
|
4357 |
if (mod != 3) |
4358 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4359 |
reg = ((modrm >> 3) & 7) | rex_r; |
4360 |
val = cpu_ldub_code(env, s->pc++); |
4361 |
switch (b) {
|
4362 |
case 0x14: /* pextrb */ |
4363 |
tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
|
4364 |
xmm_regs[reg].XMM_B(val & 15)));
|
4365 |
if (mod == 3) |
4366 |
gen_op_mov_reg_T0(ot, rm); |
4367 |
else
|
4368 |
tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
|
4369 |
(s->mem_index >> 2) - 1); |
4370 |
break;
|
4371 |
case 0x15: /* pextrw */ |
4372 |
tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
|
4373 |
xmm_regs[reg].XMM_W(val & 7)));
|
4374 |
if (mod == 3) |
4375 |
gen_op_mov_reg_T0(ot, rm); |
4376 |
else
|
4377 |
tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
|
4378 |
(s->mem_index >> 2) - 1); |
4379 |
break;
|
4380 |
case 0x16: |
4381 |
if (ot == OT_LONG) { /* pextrd */ |
4382 |
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, |
4383 |
offsetof(CPUX86State, |
4384 |
xmm_regs[reg].XMM_L(val & 3)));
|
4385 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
4386 |
if (mod == 3) |
4387 |
gen_op_mov_reg_v(ot, rm, cpu_T[0]);
|
4388 |
else
|
4389 |
tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
|
4390 |
(s->mem_index >> 2) - 1); |
4391 |
} else { /* pextrq */ |
4392 |
#ifdef TARGET_X86_64
|
4393 |
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, |
4394 |
offsetof(CPUX86State, |
4395 |
xmm_regs[reg].XMM_Q(val & 1)));
|
4396 |
if (mod == 3) |
4397 |
gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64); |
4398 |
else
|
4399 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, |
4400 |
(s->mem_index >> 2) - 1); |
4401 |
#else
|
4402 |
goto illegal_op;
|
4403 |
#endif
|
4404 |
} |
4405 |
break;
|
4406 |
case 0x17: /* extractps */ |
4407 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
|
4408 |
xmm_regs[reg].XMM_L(val & 3)));
|
4409 |
if (mod == 3) |
4410 |
gen_op_mov_reg_T0(ot, rm); |
4411 |
else
|
4412 |
tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
|
4413 |
(s->mem_index >> 2) - 1); |
4414 |
break;
|
4415 |
case 0x20: /* pinsrb */ |
4416 |
if (mod == 3) |
4417 |
gen_op_mov_TN_reg(OT_LONG, 0, rm);
|
4418 |
else
|
4419 |
tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0,
|
4420 |
(s->mem_index >> 2) - 1); |
4421 |
tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
|
4422 |
xmm_regs[reg].XMM_B(val & 15)));
|
4423 |
break;
|
4424 |
case 0x21: /* insertps */ |
4425 |
if (mod == 3) { |
4426 |
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, |
4427 |
offsetof(CPUX86State,xmm_regs[rm] |
4428 |
.XMM_L((val >> 6) & 3))); |
4429 |
} else {
|
4430 |
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, |
4431 |
(s->mem_index >> 2) - 1); |
4432 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); |
4433 |
} |
4434 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, |
4435 |
offsetof(CPUX86State,xmm_regs[reg] |
4436 |
.XMM_L((val >> 4) & 3))); |
4437 |
if ((val >> 0) & 1) |
4438 |
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4439 |
cpu_env, offsetof(CPUX86State, |
4440 |
xmm_regs[reg].XMM_L(0)));
|
4441 |
if ((val >> 1) & 1) |
4442 |
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4443 |
cpu_env, offsetof(CPUX86State, |
4444 |
xmm_regs[reg].XMM_L(1)));
|
4445 |
if ((val >> 2) & 1) |
4446 |
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4447 |
cpu_env, offsetof(CPUX86State, |
4448 |
xmm_regs[reg].XMM_L(2)));
|
4449 |
if ((val >> 3) & 1) |
4450 |
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4451 |
cpu_env, offsetof(CPUX86State, |
4452 |
xmm_regs[reg].XMM_L(3)));
|
4453 |
break;
|
4454 |
case 0x22: |
4455 |
if (ot == OT_LONG) { /* pinsrd */ |
4456 |
if (mod == 3) |
4457 |
gen_op_mov_v_reg(ot, cpu_tmp0, rm); |
4458 |
else
|
4459 |
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, |
4460 |
(s->mem_index >> 2) - 1); |
4461 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); |
4462 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, |
4463 |
offsetof(CPUX86State, |
4464 |
xmm_regs[reg].XMM_L(val & 3)));
|
4465 |
} else { /* pinsrq */ |
4466 |
#ifdef TARGET_X86_64
|
4467 |
if (mod == 3) |
4468 |
gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); |
4469 |
else
|
4470 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, |
4471 |
(s->mem_index >> 2) - 1); |
4472 |
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, |
4473 |
offsetof(CPUX86State, |
4474 |
xmm_regs[reg].XMM_Q(val & 1)));
|
4475 |
#else
|
4476 |
goto illegal_op;
|
4477 |
#endif
|
4478 |
} |
4479 |
break;
|
4480 |
} |
4481 |
return;
|
4482 |
} |
4483 |
|
4484 |
if (b1) {
|
4485 |
op1_offset = offsetof(CPUX86State,xmm_regs[reg]); |
4486 |
if (mod == 3) { |
4487 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); |
4488 |
} else {
|
4489 |
op2_offset = offsetof(CPUX86State,xmm_t0); |
4490 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4491 |
gen_ldo_env_A0(s->mem_index, op2_offset); |
4492 |
} |
4493 |
} else {
|
4494 |
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); |
4495 |
if (mod == 3) { |
4496 |
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); |
4497 |
} else {
|
4498 |
op2_offset = offsetof(CPUX86State,mmx_t0); |
4499 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4500 |
gen_ldq_env_A0(s->mem_index, op2_offset); |
4501 |
} |
4502 |
} |
4503 |
val = cpu_ldub_code(env, s->pc++); |
4504 |
|
4505 |
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ |
4506 |
set_cc_op(s, CC_OP_EFLAGS); |
4507 |
|
4508 |
if (s->dflag == 2) |
4509 |
/* The helper must use entire 64-bit gp registers */
|
4510 |
val |= 1 << 8; |
4511 |
} |
4512 |
|
4513 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4514 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4515 |
sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); |
4516 |
break;
|
4517 |
|
4518 |
case 0x33a: |
4519 |
/* Various integer extensions at 0f 3a f[0-f]. */
|
4520 |
b = modrm | (b1 << 8);
|
4521 |
modrm = cpu_ldub_code(env, s->pc++); |
4522 |
reg = ((modrm >> 3) & 7) | rex_r; |
4523 |
|
4524 |
switch (b) {
|
4525 |
case 0x3f0: /* rorx Gy,Ey, Ib */ |
4526 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|
4527 |
|| !(s->prefix & PREFIX_VEX) |
4528 |
|| s->vex_l != 0) {
|
4529 |
goto illegal_op;
|
4530 |
} |
4531 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
4532 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
4533 |
b = cpu_ldub_code(env, s->pc++); |
4534 |
if (ot == OT_QUAD) {
|
4535 |
tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63); |
4536 |
} else {
|
4537 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
4538 |
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
|
4539 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
4540 |
} |
4541 |
gen_op_mov_reg_T0(ot, reg); |
4542 |
break;
|
4543 |
|
4544 |
default:
|
4545 |
goto illegal_op;
|
4546 |
} |
4547 |
break;
|
4548 |
|
4549 |
default:
|
4550 |
goto illegal_op;
|
4551 |
} |
4552 |
} else {
|
4553 |
/* generic MMX or SSE operation */
|
4554 |
switch(b) {
|
4555 |
case 0x70: /* pshufx insn */ |
4556 |
case 0xc6: /* pshufx insn */ |
4557 |
case 0xc2: /* compare insns */ |
4558 |
s->rip_offset = 1;
|
4559 |
break;
|
4560 |
default:
|
4561 |
break;
|
4562 |
} |
4563 |
if (is_xmm) {
|
4564 |
op1_offset = offsetof(CPUX86State,xmm_regs[reg]); |
4565 |
if (mod != 3) { |
4566 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4567 |
op2_offset = offsetof(CPUX86State,xmm_t0); |
4568 |
if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || |
4569 |
b == 0xc2)) {
|
4570 |
/* specific case for SSE single instructions */
|
4571 |
if (b1 == 2) { |
4572 |
/* 32 bit access */
|
4573 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
4574 |
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); |
4575 |
} else {
|
4576 |
/* 64 bit access */
|
4577 |
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
|
4578 |
} |
4579 |
} else {
|
4580 |
gen_ldo_env_A0(s->mem_index, op2_offset); |
4581 |
} |
4582 |
} else {
|
4583 |
rm = (modrm & 7) | REX_B(s);
|
4584 |
op2_offset = offsetof(CPUX86State,xmm_regs[rm]); |
4585 |
} |
4586 |
} else {
|
4587 |
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); |
4588 |
if (mod != 3) { |
4589 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4590 |
op2_offset = offsetof(CPUX86State,mmx_t0); |
4591 |
gen_ldq_env_A0(s->mem_index, op2_offset); |
4592 |
} else {
|
4593 |
rm = (modrm & 7);
|
4594 |
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); |
4595 |
} |
4596 |
} |
4597 |
switch(b) {
|
4598 |
case 0x0f: /* 3DNow! data insns */ |
4599 |
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
|
4600 |
goto illegal_op;
|
4601 |
val = cpu_ldub_code(env, s->pc++); |
4602 |
sse_fn_epp = sse_op_table5[val]; |
4603 |
if (!sse_fn_epp) {
|
4604 |
goto illegal_op;
|
4605 |
} |
4606 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4607 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4608 |
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4609 |
break;
|
4610 |
case 0x70: /* pshufx insn */ |
4611 |
case 0xc6: /* pshufx insn */ |
4612 |
val = cpu_ldub_code(env, s->pc++); |
4613 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4614 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4615 |
/* XXX: introduce a new table? */
|
4616 |
sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; |
4617 |
sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); |
4618 |
break;
|
4619 |
case 0xc2: |
4620 |
/* compare insns */
|
4621 |
val = cpu_ldub_code(env, s->pc++); |
4622 |
if (val >= 8) |
4623 |
goto illegal_op;
|
4624 |
sse_fn_epp = sse_op_table4[val][b1]; |
4625 |
|
4626 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4627 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4628 |
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4629 |
break;
|
4630 |
case 0xf7: |
4631 |
/* maskmov : we must prepare A0 */
|
4632 |
if (mod != 3) |
4633 |
goto illegal_op;
|
4634 |
#ifdef TARGET_X86_64
|
4635 |
if (s->aflag == 2) { |
4636 |
gen_op_movq_A0_reg(R_EDI); |
4637 |
} else
|
4638 |
#endif
|
4639 |
{ |
4640 |
gen_op_movl_A0_reg(R_EDI); |
4641 |
if (s->aflag == 0) |
4642 |
gen_op_andl_A0_ffff(); |
4643 |
} |
4644 |
gen_add_A0_ds_seg(s); |
4645 |
|
4646 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4647 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4648 |
/* XXX: introduce a new table? */
|
4649 |
sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; |
4650 |
sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); |
4651 |
break;
|
4652 |
default:
|
4653 |
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); |
4654 |
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); |
4655 |
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4656 |
break;
|
4657 |
} |
4658 |
if (b == 0x2e || b == 0x2f) { |
4659 |
set_cc_op(s, CC_OP_EFLAGS); |
4660 |
} |
4661 |
} |
4662 |
} |
4663 |
|
4664 |
/* convert one instruction. s->is_jmp is set if the translation must
|
4665 |
be stopped. Return the next pc value */
|
4666 |
static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
|
4667 |
target_ulong pc_start) |
4668 |
{ |
4669 |
int b, prefixes, aflag, dflag;
|
4670 |
int shift, ot;
|
4671 |
int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
|
4672 |
target_ulong next_eip, tval; |
4673 |
int rex_w, rex_r;
|
4674 |
|
4675 |
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
|
4676 |
tcg_gen_debug_insn_start(pc_start); |
4677 |
} |
4678 |
s->pc = pc_start; |
4679 |
prefixes = 0;
|
4680 |
s->override = -1;
|
4681 |
rex_w = -1;
|
4682 |
rex_r = 0;
|
4683 |
#ifdef TARGET_X86_64
|
4684 |
s->rex_x = 0;
|
4685 |
s->rex_b = 0;
|
4686 |
x86_64_hregs = 0;
|
4687 |
#endif
|
4688 |
s->rip_offset = 0; /* for relative ip address */ |
4689 |
s->vex_l = 0;
|
4690 |
s->vex_v = 0;
|
4691 |
next_byte:
|
4692 |
b = cpu_ldub_code(env, s->pc); |
4693 |
s->pc++; |
4694 |
/* Collect prefixes. */
|
4695 |
switch (b) {
|
4696 |
case 0xf3: |
4697 |
prefixes |= PREFIX_REPZ; |
4698 |
goto next_byte;
|
4699 |
case 0xf2: |
4700 |
prefixes |= PREFIX_REPNZ; |
4701 |
goto next_byte;
|
4702 |
case 0xf0: |
4703 |
prefixes |= PREFIX_LOCK; |
4704 |
goto next_byte;
|
4705 |
case 0x2e: |
4706 |
s->override = R_CS; |
4707 |
goto next_byte;
|
4708 |
case 0x36: |
4709 |
s->override = R_SS; |
4710 |
goto next_byte;
|
4711 |
case 0x3e: |
4712 |
s->override = R_DS; |
4713 |
goto next_byte;
|
4714 |
case 0x26: |
4715 |
s->override = R_ES; |
4716 |
goto next_byte;
|
4717 |
case 0x64: |
4718 |
s->override = R_FS; |
4719 |
goto next_byte;
|
4720 |
case 0x65: |
4721 |
s->override = R_GS; |
4722 |
goto next_byte;
|
4723 |
case 0x66: |
4724 |
prefixes |= PREFIX_DATA; |
4725 |
goto next_byte;
|
4726 |
case 0x67: |
4727 |
prefixes |= PREFIX_ADR; |
4728 |
goto next_byte;
|
4729 |
#ifdef TARGET_X86_64
|
4730 |
case 0x40 ... 0x4f: |
4731 |
if (CODE64(s)) {
|
4732 |
/* REX prefix */
|
4733 |
rex_w = (b >> 3) & 1; |
4734 |
rex_r = (b & 0x4) << 1; |
4735 |
s->rex_x = (b & 0x2) << 2; |
4736 |
REX_B(s) = (b & 0x1) << 3; |
4737 |
x86_64_hregs = 1; /* select uniform byte register addressing */ |
4738 |
goto next_byte;
|
4739 |
} |
4740 |
break;
|
4741 |
#endif
|
4742 |
case 0xc5: /* 2-byte VEX */ |
4743 |
case 0xc4: /* 3-byte VEX */ |
4744 |
/* VEX prefixes cannot be used except in 32-bit mode.
|
4745 |
Otherwise the instruction is LES or LDS. */
|
4746 |
if (s->code32 && !s->vm86) {
|
4747 |
static const int pp_prefix[4] = { |
4748 |
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
|
4749 |
}; |
4750 |
int vex3, vex2 = cpu_ldub_code(env, s->pc);
|
4751 |
|
4752 |
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { |
4753 |
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
|
4754 |
otherwise the instruction is LES or LDS. */
|
4755 |
break;
|
4756 |
} |
4757 |
s->pc++; |
4758 |
|
4759 |
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
|
4760 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
|
4761 |
| PREFIX_LOCK | PREFIX_DATA)) { |
4762 |
goto illegal_op;
|
4763 |
} |
4764 |
#ifdef TARGET_X86_64
|
4765 |
if (x86_64_hregs) {
|
4766 |
goto illegal_op;
|
4767 |
} |
4768 |
#endif
|
4769 |
rex_r = (~vex2 >> 4) & 8; |
4770 |
if (b == 0xc5) { |
4771 |
vex3 = vex2; |
4772 |
b = cpu_ldub_code(env, s->pc++); |
4773 |
} else {
|
4774 |
#ifdef TARGET_X86_64
|
4775 |
s->rex_x = (~vex2 >> 3) & 8; |
4776 |
s->rex_b = (~vex2 >> 2) & 8; |
4777 |
#endif
|
4778 |
vex3 = cpu_ldub_code(env, s->pc++); |
4779 |
rex_w = (vex3 >> 7) & 1; |
4780 |
switch (vex2 & 0x1f) { |
4781 |
case 0x01: /* Implied 0f leading opcode bytes. */ |
4782 |
b = cpu_ldub_code(env, s->pc++) | 0x100;
|
4783 |
break;
|
4784 |
case 0x02: /* Implied 0f 38 leading opcode bytes. */ |
4785 |
b = 0x138;
|
4786 |
break;
|
4787 |
case 0x03: /* Implied 0f 3a leading opcode bytes. */ |
4788 |
b = 0x13a;
|
4789 |
break;
|
4790 |
default: /* Reserved for future use. */ |
4791 |
goto illegal_op;
|
4792 |
} |
4793 |
} |
4794 |
s->vex_v = (~vex3 >> 3) & 0xf; |
4795 |
s->vex_l = (vex3 >> 2) & 1; |
4796 |
prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
|
4797 |
} |
4798 |
break;
|
4799 |
} |
4800 |
|
4801 |
/* Post-process prefixes. */
|
4802 |
if (CODE64(s)) {
|
4803 |
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
|
4804 |
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
|
4805 |
over 0x66 if both are present. */
|
4806 |
dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1); |
4807 |
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
|
4808 |
aflag = (prefixes & PREFIX_ADR ? 1 : 2); |
4809 |
} else {
|
4810 |
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
|
4811 |
dflag = s->code32; |
4812 |
if (prefixes & PREFIX_DATA) {
|
4813 |
dflag ^= 1;
|
4814 |
} |
4815 |
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
|
4816 |
aflag = s->code32; |
4817 |
if (prefixes & PREFIX_ADR) {
|
4818 |
aflag ^= 1;
|
4819 |
} |
4820 |
} |
4821 |
|
4822 |
s->prefix = prefixes; |
4823 |
s->aflag = aflag; |
4824 |
s->dflag = dflag; |
4825 |
|
4826 |
/* lock generation */
|
4827 |
if (prefixes & PREFIX_LOCK)
|
4828 |
gen_helper_lock(); |
4829 |
|
4830 |
/* now check op code */
|
4831 |
reswitch:
|
4832 |
switch(b) {
|
4833 |
case 0x0f: |
4834 |
/**************************/
|
4835 |
/* extended op code */
|
4836 |
b = cpu_ldub_code(env, s->pc++) | 0x100;
|
4837 |
goto reswitch;
|
4838 |
|
4839 |
/**************************/
|
4840 |
/* arith & logic */
|
4841 |
case 0x00 ... 0x05: |
4842 |
case 0x08 ... 0x0d: |
4843 |
case 0x10 ... 0x15: |
4844 |
case 0x18 ... 0x1d: |
4845 |
case 0x20 ... 0x25: |
4846 |
case 0x28 ... 0x2d: |
4847 |
case 0x30 ... 0x35: |
4848 |
case 0x38 ... 0x3d: |
4849 |
{ |
4850 |
int op, f, val;
|
4851 |
op = (b >> 3) & 7; |
4852 |
f = (b >> 1) & 3; |
4853 |
|
4854 |
if ((b & 1) == 0) |
4855 |
ot = OT_BYTE; |
4856 |
else
|
4857 |
ot = dflag + OT_WORD; |
4858 |
|
4859 |
switch(f) {
|
4860 |
case 0: /* OP Ev, Gv */ |
4861 |
modrm = cpu_ldub_code(env, s->pc++); |
4862 |
reg = ((modrm >> 3) & 7) | rex_r; |
4863 |
mod = (modrm >> 6) & 3; |
4864 |
rm = (modrm & 7) | REX_B(s);
|
4865 |
if (mod != 3) { |
4866 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4867 |
opreg = OR_TMP0; |
4868 |
} else if (op == OP_XORL && rm == reg) { |
4869 |
xor_zero:
|
4870 |
/* xor reg, reg optimisation */
|
4871 |
set_cc_op(s, CC_OP_CLR); |
4872 |
gen_op_movl_T0_0(); |
4873 |
gen_op_mov_reg_T0(ot, reg); |
4874 |
break;
|
4875 |
} else {
|
4876 |
opreg = rm; |
4877 |
} |
4878 |
gen_op_mov_TN_reg(ot, 1, reg);
|
4879 |
gen_op(s, op, ot, opreg); |
4880 |
break;
|
4881 |
case 1: /* OP Gv, Ev */ |
4882 |
modrm = cpu_ldub_code(env, s->pc++); |
4883 |
mod = (modrm >> 6) & 3; |
4884 |
reg = ((modrm >> 3) & 7) | rex_r; |
4885 |
rm = (modrm & 7) | REX_B(s);
|
4886 |
if (mod != 3) { |
4887 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4888 |
gen_op_ld_T1_A0(ot + s->mem_index); |
4889 |
} else if (op == OP_XORL && rm == reg) { |
4890 |
goto xor_zero;
|
4891 |
} else {
|
4892 |
gen_op_mov_TN_reg(ot, 1, rm);
|
4893 |
} |
4894 |
gen_op(s, op, ot, reg); |
4895 |
break;
|
4896 |
case 2: /* OP A, Iv */ |
4897 |
val = insn_get(env, s, ot); |
4898 |
gen_op_movl_T1_im(val); |
4899 |
gen_op(s, op, ot, OR_EAX); |
4900 |
break;
|
4901 |
} |
4902 |
} |
4903 |
break;
|
4904 |
|
4905 |
case 0x82: |
4906 |
if (CODE64(s))
|
4907 |
goto illegal_op;
|
4908 |
case 0x80: /* GRP1 */ |
4909 |
case 0x81: |
4910 |
case 0x83: |
4911 |
{ |
4912 |
int val;
|
4913 |
|
4914 |
if ((b & 1) == 0) |
4915 |
ot = OT_BYTE; |
4916 |
else
|
4917 |
ot = dflag + OT_WORD; |
4918 |
|
4919 |
modrm = cpu_ldub_code(env, s->pc++); |
4920 |
mod = (modrm >> 6) & 3; |
4921 |
rm = (modrm & 7) | REX_B(s);
|
4922 |
op = (modrm >> 3) & 7; |
4923 |
|
4924 |
if (mod != 3) { |
4925 |
if (b == 0x83) |
4926 |
s->rip_offset = 1;
|
4927 |
else
|
4928 |
s->rip_offset = insn_const_size(ot); |
4929 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4930 |
opreg = OR_TMP0; |
4931 |
} else {
|
4932 |
opreg = rm; |
4933 |
} |
4934 |
|
4935 |
switch(b) {
|
4936 |
default:
|
4937 |
case 0x80: |
4938 |
case 0x81: |
4939 |
case 0x82: |
4940 |
val = insn_get(env, s, ot); |
4941 |
break;
|
4942 |
case 0x83: |
4943 |
val = (int8_t)insn_get(env, s, OT_BYTE); |
4944 |
break;
|
4945 |
} |
4946 |
gen_op_movl_T1_im(val); |
4947 |
gen_op(s, op, ot, opreg); |
4948 |
} |
4949 |
break;
|
4950 |
|
4951 |
/**************************/
|
4952 |
/* inc, dec, and other misc arith */
|
4953 |
case 0x40 ... 0x47: /* inc Gv */ |
4954 |
ot = dflag ? OT_LONG : OT_WORD; |
4955 |
gen_inc(s, ot, OR_EAX + (b & 7), 1); |
4956 |
break;
|
4957 |
case 0x48 ... 0x4f: /* dec Gv */ |
4958 |
ot = dflag ? OT_LONG : OT_WORD; |
4959 |
gen_inc(s, ot, OR_EAX + (b & 7), -1); |
4960 |
break;
|
4961 |
case 0xf6: /* GRP3 */ |
4962 |
case 0xf7: |
4963 |
if ((b & 1) == 0) |
4964 |
ot = OT_BYTE; |
4965 |
else
|
4966 |
ot = dflag + OT_WORD; |
4967 |
|
4968 |
modrm = cpu_ldub_code(env, s->pc++); |
4969 |
mod = (modrm >> 6) & 3; |
4970 |
rm = (modrm & 7) | REX_B(s);
|
4971 |
op = (modrm >> 3) & 7; |
4972 |
if (mod != 3) { |
4973 |
if (op == 0) |
4974 |
s->rip_offset = insn_const_size(ot); |
4975 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
4976 |
gen_op_ld_T0_A0(ot + s->mem_index); |
4977 |
} else {
|
4978 |
gen_op_mov_TN_reg(ot, 0, rm);
|
4979 |
} |
4980 |
|
4981 |
switch(op) {
|
4982 |
case 0: /* test */ |
4983 |
val = insn_get(env, s, ot); |
4984 |
gen_op_movl_T1_im(val); |
4985 |
gen_op_testl_T0_T1_cc(); |
4986 |
set_cc_op(s, CC_OP_LOGICB + ot); |
4987 |
break;
|
4988 |
case 2: /* not */ |
4989 |
tcg_gen_not_tl(cpu_T[0], cpu_T[0]); |
4990 |
if (mod != 3) { |
4991 |
gen_op_st_T0_A0(ot + s->mem_index); |
4992 |
} else {
|
4993 |
gen_op_mov_reg_T0(ot, rm); |
4994 |
} |
4995 |
break;
|
4996 |
case 3: /* neg */ |
4997 |
tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); |
4998 |
if (mod != 3) { |
4999 |
gen_op_st_T0_A0(ot + s->mem_index); |
5000 |
} else {
|
5001 |
gen_op_mov_reg_T0(ot, rm); |
5002 |
} |
5003 |
gen_op_update_neg_cc(); |
5004 |
set_cc_op(s, CC_OP_SUBB + ot); |
5005 |
break;
|
5006 |
case 4: /* mul */ |
5007 |
switch(ot) {
|
5008 |
case OT_BYTE:
|
5009 |
gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
|
5010 |
tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); |
5011 |
tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]); |
5012 |
/* XXX: use 32 bit mul which could be faster */
|
5013 |
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
5014 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
5015 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
5016 |
tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00); |
5017 |
set_cc_op(s, CC_OP_MULB); |
5018 |
break;
|
5019 |
case OT_WORD:
|
5020 |
gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
|
5021 |
tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); |
5022 |
tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]); |
5023 |
/* XXX: use 32 bit mul which could be faster */
|
5024 |
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
5025 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
5026 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
5027 |
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); |
5028 |
gen_op_mov_reg_T0(OT_WORD, R_EDX); |
5029 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
|
5030 |
set_cc_op(s, CC_OP_MULW); |
5031 |
break;
|
5032 |
default:
|
5033 |
case OT_LONG:
|
5034 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5035 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); |
5036 |
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
5037 |
cpu_tmp2_i32, cpu_tmp3_i32); |
5038 |
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); |
5039 |
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); |
5040 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); |
5041 |
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); |
5042 |
set_cc_op(s, CC_OP_MULL); |
5043 |
break;
|
5044 |
#ifdef TARGET_X86_64
|
5045 |
case OT_QUAD:
|
5046 |
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], |
5047 |
cpu_T[0], cpu_regs[R_EAX]);
|
5048 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); |
5049 |
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); |
5050 |
set_cc_op(s, CC_OP_MULQ); |
5051 |
break;
|
5052 |
#endif
|
5053 |
} |
5054 |
break;
|
5055 |
case 5: /* imul */ |
5056 |
switch(ot) {
|
5057 |
case OT_BYTE:
|
5058 |
gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
|
5059 |
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); |
5060 |
tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]); |
5061 |
/* XXX: use 32 bit mul which could be faster */
|
5062 |
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
5063 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
5064 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
5065 |
tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
|
5066 |
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
|
5067 |
set_cc_op(s, CC_OP_MULB); |
5068 |
break;
|
5069 |
case OT_WORD:
|
5070 |
gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
|
5071 |
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); |
5072 |
tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); |
5073 |
/* XXX: use 32 bit mul which could be faster */
|
5074 |
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
5075 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
5076 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
5077 |
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
|
5078 |
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
|
5079 |
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); |
5080 |
gen_op_mov_reg_T0(OT_WORD, R_EDX); |
5081 |
set_cc_op(s, CC_OP_MULW); |
5082 |
break;
|
5083 |
default:
|
5084 |
case OT_LONG:
|
5085 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5086 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); |
5087 |
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
5088 |
cpu_tmp2_i32, cpu_tmp3_i32); |
5089 |
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); |
5090 |
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); |
5091 |
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
|
5092 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); |
5093 |
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
5094 |
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); |
5095 |
set_cc_op(s, CC_OP_MULL); |
5096 |
break;
|
5097 |
#ifdef TARGET_X86_64
|
5098 |
case OT_QUAD:
|
5099 |
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], |
5100 |
cpu_T[0], cpu_regs[R_EAX]);
|
5101 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); |
5102 |
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
|
5103 |
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); |
5104 |
set_cc_op(s, CC_OP_MULQ); |
5105 |
break;
|
5106 |
#endif
|
5107 |
} |
5108 |
break;
|
5109 |
case 6: /* div */ |
5110 |
switch(ot) {
|
5111 |
case OT_BYTE:
|
5112 |
gen_jmp_im(pc_start - s->cs_base); |
5113 |
gen_helper_divb_AL(cpu_env, cpu_T[0]);
|
5114 |
break;
|
5115 |
case OT_WORD:
|
5116 |
gen_jmp_im(pc_start - s->cs_base); |
5117 |
gen_helper_divw_AX(cpu_env, cpu_T[0]);
|
5118 |
break;
|
5119 |
default:
|
5120 |
case OT_LONG:
|
5121 |
gen_jmp_im(pc_start - s->cs_base); |
5122 |
gen_helper_divl_EAX(cpu_env, cpu_T[0]);
|
5123 |
break;
|
5124 |
#ifdef TARGET_X86_64
|
5125 |
case OT_QUAD:
|
5126 |
gen_jmp_im(pc_start - s->cs_base); |
5127 |
gen_helper_divq_EAX(cpu_env, cpu_T[0]);
|
5128 |
break;
|
5129 |
#endif
|
5130 |
} |
5131 |
break;
|
5132 |
case 7: /* idiv */ |
5133 |
switch(ot) {
|
5134 |
case OT_BYTE:
|
5135 |
gen_jmp_im(pc_start - s->cs_base); |
5136 |
gen_helper_idivb_AL(cpu_env, cpu_T[0]);
|
5137 |
break;
|
5138 |
case OT_WORD:
|
5139 |
gen_jmp_im(pc_start - s->cs_base); |
5140 |
gen_helper_idivw_AX(cpu_env, cpu_T[0]);
|
5141 |
break;
|
5142 |
default:
|
5143 |
case OT_LONG:
|
5144 |
gen_jmp_im(pc_start - s->cs_base); |
5145 |
gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
|
5146 |
break;
|
5147 |
#ifdef TARGET_X86_64
|
5148 |
case OT_QUAD:
|
5149 |
gen_jmp_im(pc_start - s->cs_base); |
5150 |
gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
|
5151 |
break;
|
5152 |
#endif
|
5153 |
} |
5154 |
break;
|
5155 |
default:
|
5156 |
goto illegal_op;
|
5157 |
} |
5158 |
break;
|
5159 |
|
5160 |
case 0xfe: /* GRP4 */ |
5161 |
case 0xff: /* GRP5 */ |
5162 |
if ((b & 1) == 0) |
5163 |
ot = OT_BYTE; |
5164 |
else
|
5165 |
ot = dflag + OT_WORD; |
5166 |
|
5167 |
modrm = cpu_ldub_code(env, s->pc++); |
5168 |
mod = (modrm >> 6) & 3; |
5169 |
rm = (modrm & 7) | REX_B(s);
|
5170 |
op = (modrm >> 3) & 7; |
5171 |
if (op >= 2 && b == 0xfe) { |
5172 |
goto illegal_op;
|
5173 |
} |
5174 |
if (CODE64(s)) {
|
5175 |
if (op == 2 || op == 4) { |
5176 |
/* operand size for jumps is 64 bit */
|
5177 |
ot = OT_QUAD; |
5178 |
} else if (op == 3 || op == 5) { |
5179 |
ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
|
5180 |
} else if (op == 6) { |
5181 |
/* default push size is 64 bit */
|
5182 |
ot = dflag ? OT_QUAD : OT_WORD; |
5183 |
} |
5184 |
} |
5185 |
if (mod != 3) { |
5186 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5187 |
if (op >= 2 && op != 3 && op != 5) |
5188 |
gen_op_ld_T0_A0(ot + s->mem_index); |
5189 |
} else {
|
5190 |
gen_op_mov_TN_reg(ot, 0, rm);
|
5191 |
} |
5192 |
|
5193 |
switch(op) {
|
5194 |
case 0: /* inc Ev */ |
5195 |
if (mod != 3) |
5196 |
opreg = OR_TMP0; |
5197 |
else
|
5198 |
opreg = rm; |
5199 |
gen_inc(s, ot, opreg, 1);
|
5200 |
break;
|
5201 |
case 1: /* dec Ev */ |
5202 |
if (mod != 3) |
5203 |
opreg = OR_TMP0; |
5204 |
else
|
5205 |
opreg = rm; |
5206 |
gen_inc(s, ot, opreg, -1);
|
5207 |
break;
|
5208 |
case 2: /* call Ev */ |
5209 |
/* XXX: optimize if memory (no 'and' is necessary) */
|
5210 |
if (s->dflag == 0) |
5211 |
gen_op_andl_T0_ffff(); |
5212 |
next_eip = s->pc - s->cs_base; |
5213 |
gen_movtl_T1_im(next_eip); |
5214 |
gen_push_T1(s); |
5215 |
gen_op_jmp_T0(); |
5216 |
gen_eob(s); |
5217 |
break;
|
5218 |
case 3: /* lcall Ev */ |
5219 |
gen_op_ld_T1_A0(ot + s->mem_index); |
5220 |
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
5221 |
gen_op_ldu_T0_A0(OT_WORD + s->mem_index); |
5222 |
do_lcall:
|
5223 |
if (s->pe && !s->vm86) {
|
5224 |
gen_update_cc_op(s); |
5225 |
gen_jmp_im(pc_start - s->cs_base); |
5226 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5227 |
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
|
5228 |
tcg_const_i32(dflag), |
5229 |
tcg_const_i32(s->pc - pc_start)); |
5230 |
} else {
|
5231 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5232 |
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
|
5233 |
tcg_const_i32(dflag), |
5234 |
tcg_const_i32(s->pc - s->cs_base)); |
5235 |
} |
5236 |
gen_eob(s); |
5237 |
break;
|
5238 |
case 4: /* jmp Ev */ |
5239 |
if (s->dflag == 0) |
5240 |
gen_op_andl_T0_ffff(); |
5241 |
gen_op_jmp_T0(); |
5242 |
gen_eob(s); |
5243 |
break;
|
5244 |
case 5: /* ljmp Ev */ |
5245 |
gen_op_ld_T1_A0(ot + s->mem_index); |
5246 |
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
5247 |
gen_op_ldu_T0_A0(OT_WORD + s->mem_index); |
5248 |
do_ljmp:
|
5249 |
if (s->pe && !s->vm86) {
|
5250 |
gen_update_cc_op(s); |
5251 |
gen_jmp_im(pc_start - s->cs_base); |
5252 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5253 |
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
|
5254 |
tcg_const_i32(s->pc - pc_start)); |
5255 |
} else {
|
5256 |
gen_op_movl_seg_T0_vm(R_CS); |
5257 |
gen_op_movl_T0_T1(); |
5258 |
gen_op_jmp_T0(); |
5259 |
} |
5260 |
gen_eob(s); |
5261 |
break;
|
5262 |
case 6: /* push Ev */ |
5263 |
gen_push_T0(s); |
5264 |
break;
|
5265 |
default:
|
5266 |
goto illegal_op;
|
5267 |
} |
5268 |
break;
|
5269 |
|
5270 |
case 0x84: /* test Ev, Gv */ |
5271 |
case 0x85: |
5272 |
if ((b & 1) == 0) |
5273 |
ot = OT_BYTE; |
5274 |
else
|
5275 |
ot = dflag + OT_WORD; |
5276 |
|
5277 |
modrm = cpu_ldub_code(env, s->pc++); |
5278 |
reg = ((modrm >> 3) & 7) | rex_r; |
5279 |
|
5280 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
5281 |
gen_op_mov_TN_reg(ot, 1, reg);
|
5282 |
gen_op_testl_T0_T1_cc(); |
5283 |
set_cc_op(s, CC_OP_LOGICB + ot); |
5284 |
break;
|
5285 |
|
5286 |
case 0xa8: /* test eAX, Iv */ |
5287 |
case 0xa9: |
5288 |
if ((b & 1) == 0) |
5289 |
ot = OT_BYTE; |
5290 |
else
|
5291 |
ot = dflag + OT_WORD; |
5292 |
val = insn_get(env, s, ot); |
5293 |
|
5294 |
gen_op_mov_TN_reg(ot, 0, OR_EAX);
|
5295 |
gen_op_movl_T1_im(val); |
5296 |
gen_op_testl_T0_T1_cc(); |
5297 |
set_cc_op(s, CC_OP_LOGICB + ot); |
5298 |
break;
|
5299 |
|
5300 |
case 0x98: /* CWDE/CBW */ |
5301 |
#ifdef TARGET_X86_64
|
5302 |
if (dflag == 2) { |
5303 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
|
5304 |
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); |
5305 |
gen_op_mov_reg_T0(OT_QUAD, R_EAX); |
5306 |
} else
|
5307 |
#endif
|
5308 |
if (dflag == 1) { |
5309 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
|
5310 |
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); |
5311 |
gen_op_mov_reg_T0(OT_LONG, R_EAX); |
5312 |
} else {
|
5313 |
gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
|
5314 |
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); |
5315 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
5316 |
} |
5317 |
break;
|
5318 |
case 0x99: /* CDQ/CWD */ |
5319 |
#ifdef TARGET_X86_64
|
5320 |
if (dflag == 2) { |
5321 |
gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
|
5322 |
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63); |
5323 |
gen_op_mov_reg_T0(OT_QUAD, R_EDX); |
5324 |
} else
|
5325 |
#endif
|
5326 |
if (dflag == 1) { |
5327 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
|
5328 |
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); |
5329 |
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31); |
5330 |
gen_op_mov_reg_T0(OT_LONG, R_EDX); |
5331 |
} else {
|
5332 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
|
5333 |
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); |
5334 |
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15); |
5335 |
gen_op_mov_reg_T0(OT_WORD, R_EDX); |
5336 |
} |
5337 |
break;
|
5338 |
case 0x1af: /* imul Gv, Ev */ |
5339 |
case 0x69: /* imul Gv, Ev, I */ |
5340 |
case 0x6b: |
5341 |
ot = dflag + OT_WORD; |
5342 |
modrm = cpu_ldub_code(env, s->pc++); |
5343 |
reg = ((modrm >> 3) & 7) | rex_r; |
5344 |
if (b == 0x69) |
5345 |
s->rip_offset = insn_const_size(ot); |
5346 |
else if (b == 0x6b) |
5347 |
s->rip_offset = 1;
|
5348 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
5349 |
if (b == 0x69) { |
5350 |
val = insn_get(env, s, ot); |
5351 |
gen_op_movl_T1_im(val); |
5352 |
} else if (b == 0x6b) { |
5353 |
val = (int8_t)insn_get(env, s, OT_BYTE); |
5354 |
gen_op_movl_T1_im(val); |
5355 |
} else {
|
5356 |
gen_op_mov_TN_reg(ot, 1, reg);
|
5357 |
} |
5358 |
switch (ot) {
|
5359 |
#ifdef TARGET_X86_64
|
5360 |
case OT_QUAD:
|
5361 |
tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]); |
5362 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); |
5363 |
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
|
5364 |
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
|
5365 |
break;
|
5366 |
#endif
|
5367 |
case OT_LONG:
|
5368 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
5369 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
|
5370 |
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
5371 |
cpu_tmp2_i32, cpu_tmp3_i32); |
5372 |
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); |
5373 |
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
|
5374 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); |
5375 |
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
5376 |
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); |
5377 |
break;
|
5378 |
default:
|
5379 |
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); |
5380 |
tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); |
5381 |
/* XXX: use 32 bit mul which could be faster */
|
5382 |
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); |
5383 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
5384 |
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
|
5385 |
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
|
5386 |
gen_op_mov_reg_T0(ot, reg); |
5387 |
break;
|
5388 |
} |
5389 |
set_cc_op(s, CC_OP_MULB + ot); |
5390 |
break;
|
5391 |
case 0x1c0: |
5392 |
case 0x1c1: /* xadd Ev, Gv */ |
5393 |
if ((b & 1) == 0) |
5394 |
ot = OT_BYTE; |
5395 |
else
|
5396 |
ot = dflag + OT_WORD; |
5397 |
modrm = cpu_ldub_code(env, s->pc++); |
5398 |
reg = ((modrm >> 3) & 7) | rex_r; |
5399 |
mod = (modrm >> 6) & 3; |
5400 |
if (mod == 3) { |
5401 |
rm = (modrm & 7) | REX_B(s);
|
5402 |
gen_op_mov_TN_reg(ot, 0, reg);
|
5403 |
gen_op_mov_TN_reg(ot, 1, rm);
|
5404 |
gen_op_addl_T0_T1(); |
5405 |
gen_op_mov_reg_T1(ot, reg); |
5406 |
gen_op_mov_reg_T0(ot, rm); |
5407 |
} else {
|
5408 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5409 |
gen_op_mov_TN_reg(ot, 0, reg);
|
5410 |
gen_op_ld_T1_A0(ot + s->mem_index); |
5411 |
gen_op_addl_T0_T1(); |
5412 |
gen_op_st_T0_A0(ot + s->mem_index); |
5413 |
gen_op_mov_reg_T1(ot, reg); |
5414 |
} |
5415 |
gen_op_update2_cc(); |
5416 |
set_cc_op(s, CC_OP_ADDB + ot); |
5417 |
break;
|
5418 |
case 0x1b0: |
5419 |
case 0x1b1: /* cmpxchg Ev, Gv */ |
5420 |
{ |
5421 |
int label1, label2;
|
5422 |
TCGv t0, t1, t2, a0; |
5423 |
|
5424 |
if ((b & 1) == 0) |
5425 |
ot = OT_BYTE; |
5426 |
else
|
5427 |
ot = dflag + OT_WORD; |
5428 |
modrm = cpu_ldub_code(env, s->pc++); |
5429 |
reg = ((modrm >> 3) & 7) | rex_r; |
5430 |
mod = (modrm >> 6) & 3; |
5431 |
t0 = tcg_temp_local_new(); |
5432 |
t1 = tcg_temp_local_new(); |
5433 |
t2 = tcg_temp_local_new(); |
5434 |
a0 = tcg_temp_local_new(); |
5435 |
gen_op_mov_v_reg(ot, t1, reg); |
5436 |
if (mod == 3) { |
5437 |
rm = (modrm & 7) | REX_B(s);
|
5438 |
gen_op_mov_v_reg(ot, t0, rm); |
5439 |
} else {
|
5440 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5441 |
tcg_gen_mov_tl(a0, cpu_A0); |
5442 |
gen_op_ld_v(ot + s->mem_index, t0, a0); |
5443 |
rm = 0; /* avoid warning */ |
5444 |
} |
5445 |
label1 = gen_new_label(); |
5446 |
tcg_gen_mov_tl(t2, cpu_regs[R_EAX]); |
5447 |
gen_extu(ot, t0); |
5448 |
gen_extu(ot, t2); |
5449 |
tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1); |
5450 |
label2 = gen_new_label(); |
5451 |
if (mod == 3) { |
5452 |
gen_op_mov_reg_v(ot, R_EAX, t0); |
5453 |
tcg_gen_br(label2); |
5454 |
gen_set_label(label1); |
5455 |
gen_op_mov_reg_v(ot, rm, t1); |
5456 |
} else {
|
5457 |
/* perform no-op store cycle like physical cpu; must be
|
5458 |
before changing accumulator to ensure idempotency if
|
5459 |
the store faults and the instruction is restarted */
|
5460 |
gen_op_st_v(ot + s->mem_index, t0, a0); |
5461 |
gen_op_mov_reg_v(ot, R_EAX, t0); |
5462 |
tcg_gen_br(label2); |
5463 |
gen_set_label(label1); |
5464 |
gen_op_st_v(ot + s->mem_index, t1, a0); |
5465 |
} |
5466 |
gen_set_label(label2); |
5467 |
tcg_gen_mov_tl(cpu_cc_src, t0); |
5468 |
tcg_gen_mov_tl(cpu_cc_srcT, t2); |
5469 |
tcg_gen_sub_tl(cpu_cc_dst, t2, t0); |
5470 |
set_cc_op(s, CC_OP_SUBB + ot); |
5471 |
tcg_temp_free(t0); |
5472 |
tcg_temp_free(t1); |
5473 |
tcg_temp_free(t2); |
5474 |
tcg_temp_free(a0); |
5475 |
} |
5476 |
break;
|
5477 |
case 0x1c7: /* cmpxchg8b */ |
5478 |
modrm = cpu_ldub_code(env, s->pc++); |
5479 |
mod = (modrm >> 6) & 3; |
5480 |
if ((mod == 3) || ((modrm & 0x38) != 0x8)) |
5481 |
goto illegal_op;
|
5482 |
#ifdef TARGET_X86_64
|
5483 |
if (dflag == 2) { |
5484 |
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
|
5485 |
goto illegal_op;
|
5486 |
gen_jmp_im(pc_start - s->cs_base); |
5487 |
gen_update_cc_op(s); |
5488 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5489 |
gen_helper_cmpxchg16b(cpu_env, cpu_A0); |
5490 |
} else
|
5491 |
#endif
|
5492 |
{ |
5493 |
if (!(s->cpuid_features & CPUID_CX8))
|
5494 |
goto illegal_op;
|
5495 |
gen_jmp_im(pc_start - s->cs_base); |
5496 |
gen_update_cc_op(s); |
5497 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5498 |
gen_helper_cmpxchg8b(cpu_env, cpu_A0); |
5499 |
} |
5500 |
set_cc_op(s, CC_OP_EFLAGS); |
5501 |
break;
|
5502 |
|
5503 |
/**************************/
|
5504 |
/* push/pop */
|
5505 |
case 0x50 ... 0x57: /* push */ |
5506 |
gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s)); |
5507 |
gen_push_T0(s); |
5508 |
break;
|
5509 |
case 0x58 ... 0x5f: /* pop */ |
5510 |
if (CODE64(s)) {
|
5511 |
ot = dflag ? OT_QUAD : OT_WORD; |
5512 |
} else {
|
5513 |
ot = dflag + OT_WORD; |
5514 |
} |
5515 |
gen_pop_T0(s); |
5516 |
/* NOTE: order is important for pop %sp */
|
5517 |
gen_pop_update(s); |
5518 |
gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
|
5519 |
break;
|
5520 |
case 0x60: /* pusha */ |
5521 |
if (CODE64(s))
|
5522 |
goto illegal_op;
|
5523 |
gen_pusha(s); |
5524 |
break;
|
5525 |
case 0x61: /* popa */ |
5526 |
if (CODE64(s))
|
5527 |
goto illegal_op;
|
5528 |
gen_popa(s); |
5529 |
break;
|
5530 |
case 0x68: /* push Iv */ |
5531 |
case 0x6a: |
5532 |
if (CODE64(s)) {
|
5533 |
ot = dflag ? OT_QUAD : OT_WORD; |
5534 |
} else {
|
5535 |
ot = dflag + OT_WORD; |
5536 |
} |
5537 |
if (b == 0x68) |
5538 |
val = insn_get(env, s, ot); |
5539 |
else
|
5540 |
val = (int8_t)insn_get(env, s, OT_BYTE); |
5541 |
gen_op_movl_T0_im(val); |
5542 |
gen_push_T0(s); |
5543 |
break;
|
5544 |
case 0x8f: /* pop Ev */ |
5545 |
if (CODE64(s)) {
|
5546 |
ot = dflag ? OT_QUAD : OT_WORD; |
5547 |
} else {
|
5548 |
ot = dflag + OT_WORD; |
5549 |
} |
5550 |
modrm = cpu_ldub_code(env, s->pc++); |
5551 |
mod = (modrm >> 6) & 3; |
5552 |
gen_pop_T0(s); |
5553 |
if (mod == 3) { |
5554 |
/* NOTE: order is important for pop %sp */
|
5555 |
gen_pop_update(s); |
5556 |
rm = (modrm & 7) | REX_B(s);
|
5557 |
gen_op_mov_reg_T0(ot, rm); |
5558 |
} else {
|
5559 |
/* NOTE: order is important too for MMU exceptions */
|
5560 |
s->popl_esp_hack = 1 << ot;
|
5561 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
5562 |
s->popl_esp_hack = 0;
|
5563 |
gen_pop_update(s); |
5564 |
} |
5565 |
break;
|
5566 |
case 0xc8: /* enter */ |
5567 |
{ |
5568 |
int level;
|
5569 |
val = cpu_lduw_code(env, s->pc); |
5570 |
s->pc += 2;
|
5571 |
level = cpu_ldub_code(env, s->pc++); |
5572 |
gen_enter(s, val, level); |
5573 |
} |
5574 |
break;
|
5575 |
case 0xc9: /* leave */ |
5576 |
/* XXX: exception not precise (ESP is updated before potential exception) */
|
5577 |
if (CODE64(s)) {
|
5578 |
gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
|
5579 |
gen_op_mov_reg_T0(OT_QUAD, R_ESP); |
5580 |
} else if (s->ss32) { |
5581 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
|
5582 |
gen_op_mov_reg_T0(OT_LONG, R_ESP); |
5583 |
} else {
|
5584 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
|
5585 |
gen_op_mov_reg_T0(OT_WORD, R_ESP); |
5586 |
} |
5587 |
gen_pop_T0(s); |
5588 |
if (CODE64(s)) {
|
5589 |
ot = dflag ? OT_QUAD : OT_WORD; |
5590 |
} else {
|
5591 |
ot = dflag + OT_WORD; |
5592 |
} |
5593 |
gen_op_mov_reg_T0(ot, R_EBP); |
5594 |
gen_pop_update(s); |
5595 |
break;
|
5596 |
case 0x06: /* push es */ |
5597 |
case 0x0e: /* push cs */ |
5598 |
case 0x16: /* push ss */ |
5599 |
case 0x1e: /* push ds */ |
5600 |
if (CODE64(s))
|
5601 |
goto illegal_op;
|
5602 |
gen_op_movl_T0_seg(b >> 3);
|
5603 |
gen_push_T0(s); |
5604 |
break;
|
5605 |
case 0x1a0: /* push fs */ |
5606 |
case 0x1a8: /* push gs */ |
5607 |
gen_op_movl_T0_seg((b >> 3) & 7); |
5608 |
gen_push_T0(s); |
5609 |
break;
|
5610 |
case 0x07: /* pop es */ |
5611 |
case 0x17: /* pop ss */ |
5612 |
case 0x1f: /* pop ds */ |
5613 |
if (CODE64(s))
|
5614 |
goto illegal_op;
|
5615 |
reg = b >> 3;
|
5616 |
gen_pop_T0(s); |
5617 |
gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
5618 |
gen_pop_update(s); |
5619 |
if (reg == R_SS) {
|
5620 |
/* if reg == SS, inhibit interrupts/trace. */
|
5621 |
/* If several instructions disable interrupts, only the
|
5622 |
_first_ does it */
|
5623 |
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
|
5624 |
gen_helper_set_inhibit_irq(cpu_env); |
5625 |
s->tf = 0;
|
5626 |
} |
5627 |
if (s->is_jmp) {
|
5628 |
gen_jmp_im(s->pc - s->cs_base); |
5629 |
gen_eob(s); |
5630 |
} |
5631 |
break;
|
5632 |
case 0x1a1: /* pop fs */ |
5633 |
case 0x1a9: /* pop gs */ |
5634 |
gen_pop_T0(s); |
5635 |
gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); |
5636 |
gen_pop_update(s); |
5637 |
if (s->is_jmp) {
|
5638 |
gen_jmp_im(s->pc - s->cs_base); |
5639 |
gen_eob(s); |
5640 |
} |
5641 |
break;
|
5642 |
|
5643 |
/**************************/
|
5644 |
/* mov */
|
5645 |
case 0x88: |
5646 |
case 0x89: /* mov Gv, Ev */ |
5647 |
if ((b & 1) == 0) |
5648 |
ot = OT_BYTE; |
5649 |
else
|
5650 |
ot = dflag + OT_WORD; |
5651 |
modrm = cpu_ldub_code(env, s->pc++); |
5652 |
reg = ((modrm >> 3) & 7) | rex_r; |
5653 |
|
5654 |
/* generate a generic store */
|
5655 |
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
|
5656 |
break;
|
5657 |
case 0xc6: |
5658 |
case 0xc7: /* mov Ev, Iv */ |
5659 |
if ((b & 1) == 0) |
5660 |
ot = OT_BYTE; |
5661 |
else
|
5662 |
ot = dflag + OT_WORD; |
5663 |
modrm = cpu_ldub_code(env, s->pc++); |
5664 |
mod = (modrm >> 6) & 3; |
5665 |
if (mod != 3) { |
5666 |
s->rip_offset = insn_const_size(ot); |
5667 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5668 |
} |
5669 |
val = insn_get(env, s, ot); |
5670 |
gen_op_movl_T0_im(val); |
5671 |
if (mod != 3) |
5672 |
gen_op_st_T0_A0(ot + s->mem_index); |
5673 |
else
|
5674 |
gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
|
5675 |
break;
|
5676 |
case 0x8a: |
5677 |
case 0x8b: /* mov Ev, Gv */ |
5678 |
if ((b & 1) == 0) |
5679 |
ot = OT_BYTE; |
5680 |
else
|
5681 |
ot = OT_WORD + dflag; |
5682 |
modrm = cpu_ldub_code(env, s->pc++); |
5683 |
reg = ((modrm >> 3) & 7) | rex_r; |
5684 |
|
5685 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
5686 |
gen_op_mov_reg_T0(ot, reg); |
5687 |
break;
|
5688 |
case 0x8e: /* mov seg, Gv */ |
5689 |
modrm = cpu_ldub_code(env, s->pc++); |
5690 |
reg = (modrm >> 3) & 7; |
5691 |
if (reg >= 6 || reg == R_CS) |
5692 |
goto illegal_op;
|
5693 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
5694 |
gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
5695 |
if (reg == R_SS) {
|
5696 |
/* if reg == SS, inhibit interrupts/trace */
|
5697 |
/* If several instructions disable interrupts, only the
|
5698 |
_first_ does it */
|
5699 |
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
|
5700 |
gen_helper_set_inhibit_irq(cpu_env); |
5701 |
s->tf = 0;
|
5702 |
} |
5703 |
if (s->is_jmp) {
|
5704 |
gen_jmp_im(s->pc - s->cs_base); |
5705 |
gen_eob(s); |
5706 |
} |
5707 |
break;
|
5708 |
case 0x8c: /* mov Gv, seg */ |
5709 |
modrm = cpu_ldub_code(env, s->pc++); |
5710 |
reg = (modrm >> 3) & 7; |
5711 |
mod = (modrm >> 6) & 3; |
5712 |
if (reg >= 6) |
5713 |
goto illegal_op;
|
5714 |
gen_op_movl_T0_seg(reg); |
5715 |
if (mod == 3) |
5716 |
ot = OT_WORD + dflag; |
5717 |
else
|
5718 |
ot = OT_WORD; |
5719 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
5720 |
break;
|
5721 |
|
5722 |
case 0x1b6: /* movzbS Gv, Eb */ |
5723 |
case 0x1b7: /* movzwS Gv, Eb */ |
5724 |
case 0x1be: /* movsbS Gv, Eb */ |
5725 |
case 0x1bf: /* movswS Gv, Eb */ |
5726 |
{ |
5727 |
int d_ot;
|
5728 |
/* d_ot is the size of destination */
|
5729 |
d_ot = dflag + OT_WORD; |
5730 |
/* ot is the size of source */
|
5731 |
ot = (b & 1) + OT_BYTE;
|
5732 |
modrm = cpu_ldub_code(env, s->pc++); |
5733 |
reg = ((modrm >> 3) & 7) | rex_r; |
5734 |
mod = (modrm >> 6) & 3; |
5735 |
rm = (modrm & 7) | REX_B(s);
|
5736 |
|
5737 |
if (mod == 3) { |
5738 |
gen_op_mov_TN_reg(ot, 0, rm);
|
5739 |
switch(ot | (b & 8)) { |
5740 |
case OT_BYTE:
|
5741 |
tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); |
5742 |
break;
|
5743 |
case OT_BYTE | 8: |
5744 |
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); |
5745 |
break;
|
5746 |
case OT_WORD:
|
5747 |
tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); |
5748 |
break;
|
5749 |
default:
|
5750 |
case OT_WORD | 8: |
5751 |
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); |
5752 |
break;
|
5753 |
} |
5754 |
gen_op_mov_reg_T0(d_ot, reg); |
5755 |
} else {
|
5756 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5757 |
if (b & 8) { |
5758 |
gen_op_lds_T0_A0(ot + s->mem_index); |
5759 |
} else {
|
5760 |
gen_op_ldu_T0_A0(ot + s->mem_index); |
5761 |
} |
5762 |
gen_op_mov_reg_T0(d_ot, reg); |
5763 |
} |
5764 |
} |
5765 |
break;
|
5766 |
|
5767 |
case 0x8d: /* lea */ |
5768 |
ot = dflag + OT_WORD; |
5769 |
modrm = cpu_ldub_code(env, s->pc++); |
5770 |
mod = (modrm >> 6) & 3; |
5771 |
if (mod == 3) |
5772 |
goto illegal_op;
|
5773 |
reg = ((modrm >> 3) & 7) | rex_r; |
5774 |
/* we must ensure that no segment is added */
|
5775 |
s->override = -1;
|
5776 |
val = s->addseg; |
5777 |
s->addseg = 0;
|
5778 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5779 |
s->addseg = val; |
5780 |
gen_op_mov_reg_A0(ot - OT_WORD, reg); |
5781 |
break;
|
5782 |
|
5783 |
case 0xa0: /* mov EAX, Ov */ |
5784 |
case 0xa1: |
5785 |
case 0xa2: /* mov Ov, EAX */ |
5786 |
case 0xa3: |
5787 |
{ |
5788 |
target_ulong offset_addr; |
5789 |
|
5790 |
if ((b & 1) == 0) |
5791 |
ot = OT_BYTE; |
5792 |
else
|
5793 |
ot = dflag + OT_WORD; |
5794 |
#ifdef TARGET_X86_64
|
5795 |
if (s->aflag == 2) { |
5796 |
offset_addr = cpu_ldq_code(env, s->pc); |
5797 |
s->pc += 8;
|
5798 |
gen_op_movq_A0_im(offset_addr); |
5799 |
} else
|
5800 |
#endif
|
5801 |
{ |
5802 |
if (s->aflag) {
|
5803 |
offset_addr = insn_get(env, s, OT_LONG); |
5804 |
} else {
|
5805 |
offset_addr = insn_get(env, s, OT_WORD); |
5806 |
} |
5807 |
gen_op_movl_A0_im(offset_addr); |
5808 |
} |
5809 |
gen_add_A0_ds_seg(s); |
5810 |
if ((b & 2) == 0) { |
5811 |
gen_op_ld_T0_A0(ot + s->mem_index); |
5812 |
gen_op_mov_reg_T0(ot, R_EAX); |
5813 |
} else {
|
5814 |
gen_op_mov_TN_reg(ot, 0, R_EAX);
|
5815 |
gen_op_st_T0_A0(ot + s->mem_index); |
5816 |
} |
5817 |
} |
5818 |
break;
|
5819 |
case 0xd7: /* xlat */ |
5820 |
#ifdef TARGET_X86_64
|
5821 |
if (s->aflag == 2) { |
5822 |
gen_op_movq_A0_reg(R_EBX); |
5823 |
gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
|
5824 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); |
5825 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
|
5826 |
} else
|
5827 |
#endif
|
5828 |
{ |
5829 |
gen_op_movl_A0_reg(R_EBX); |
5830 |
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
|
5831 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); |
5832 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
|
5833 |
if (s->aflag == 0) |
5834 |
gen_op_andl_A0_ffff(); |
5835 |
else
|
5836 |
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
|
5837 |
} |
5838 |
gen_add_A0_ds_seg(s); |
5839 |
gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); |
5840 |
gen_op_mov_reg_T0(OT_BYTE, R_EAX); |
5841 |
break;
|
5842 |
case 0xb0 ... 0xb7: /* mov R, Ib */ |
5843 |
val = insn_get(env, s, OT_BYTE); |
5844 |
gen_op_movl_T0_im(val); |
5845 |
gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
|
5846 |
break;
|
5847 |
case 0xb8 ... 0xbf: /* mov R, Iv */ |
5848 |
#ifdef TARGET_X86_64
|
5849 |
if (dflag == 2) { |
5850 |
uint64_t tmp; |
5851 |
/* 64 bit case */
|
5852 |
tmp = cpu_ldq_code(env, s->pc); |
5853 |
s->pc += 8;
|
5854 |
reg = (b & 7) | REX_B(s);
|
5855 |
gen_movtl_T0_im(tmp); |
5856 |
gen_op_mov_reg_T0(OT_QUAD, reg); |
5857 |
} else
|
5858 |
#endif
|
5859 |
{ |
5860 |
ot = dflag ? OT_LONG : OT_WORD; |
5861 |
val = insn_get(env, s, ot); |
5862 |
reg = (b & 7) | REX_B(s);
|
5863 |
gen_op_movl_T0_im(val); |
5864 |
gen_op_mov_reg_T0(ot, reg); |
5865 |
} |
5866 |
break;
|
5867 |
|
5868 |
case 0x91 ... 0x97: /* xchg R, EAX */ |
5869 |
do_xchg_reg_eax: |
5870 |
ot = dflag + OT_WORD; |
5871 |
reg = (b & 7) | REX_B(s);
|
5872 |
rm = R_EAX; |
5873 |
goto do_xchg_reg;
|
5874 |
case 0x86: |
5875 |
case 0x87: /* xchg Ev, Gv */ |
5876 |
if ((b & 1) == 0) |
5877 |
ot = OT_BYTE; |
5878 |
else
|
5879 |
ot = dflag + OT_WORD; |
5880 |
modrm = cpu_ldub_code(env, s->pc++); |
5881 |
reg = ((modrm >> 3) & 7) | rex_r; |
5882 |
mod = (modrm >> 6) & 3; |
5883 |
if (mod == 3) { |
5884 |
rm = (modrm & 7) | REX_B(s);
|
5885 |
do_xchg_reg:
|
5886 |
gen_op_mov_TN_reg(ot, 0, reg);
|
5887 |
gen_op_mov_TN_reg(ot, 1, rm);
|
5888 |
gen_op_mov_reg_T0(ot, rm); |
5889 |
gen_op_mov_reg_T1(ot, reg); |
5890 |
} else {
|
5891 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5892 |
gen_op_mov_TN_reg(ot, 0, reg);
|
5893 |
/* for xchg, lock is implicit */
|
5894 |
if (!(prefixes & PREFIX_LOCK))
|
5895 |
gen_helper_lock(); |
5896 |
gen_op_ld_T1_A0(ot + s->mem_index); |
5897 |
gen_op_st_T0_A0(ot + s->mem_index); |
5898 |
if (!(prefixes & PREFIX_LOCK))
|
5899 |
gen_helper_unlock(); |
5900 |
gen_op_mov_reg_T1(ot, reg); |
5901 |
} |
5902 |
break;
|
5903 |
case 0xc4: /* les Gv */ |
5904 |
/* In CODE64 this is VEX3; see above. */
|
5905 |
op = R_ES; |
5906 |
goto do_lxx;
|
5907 |
case 0xc5: /* lds Gv */ |
5908 |
/* In CODE64 this is VEX2; see above. */
|
5909 |
op = R_DS; |
5910 |
goto do_lxx;
|
5911 |
case 0x1b2: /* lss Gv */ |
5912 |
op = R_SS; |
5913 |
goto do_lxx;
|
5914 |
case 0x1b4: /* lfs Gv */ |
5915 |
op = R_FS; |
5916 |
goto do_lxx;
|
5917 |
case 0x1b5: /* lgs Gv */ |
5918 |
op = R_GS; |
5919 |
do_lxx:
|
5920 |
ot = dflag ? OT_LONG : OT_WORD; |
5921 |
modrm = cpu_ldub_code(env, s->pc++); |
5922 |
reg = ((modrm >> 3) & 7) | rex_r; |
5923 |
mod = (modrm >> 6) & 3; |
5924 |
if (mod == 3) |
5925 |
goto illegal_op;
|
5926 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5927 |
gen_op_ld_T1_A0(ot + s->mem_index); |
5928 |
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
5929 |
/* load the segment first to handle exceptions properly */
|
5930 |
gen_op_ldu_T0_A0(OT_WORD + s->mem_index); |
5931 |
gen_movl_seg_T0(s, op, pc_start - s->cs_base); |
5932 |
/* then put the data */
|
5933 |
gen_op_mov_reg_T1(ot, reg); |
5934 |
if (s->is_jmp) {
|
5935 |
gen_jmp_im(s->pc - s->cs_base); |
5936 |
gen_eob(s); |
5937 |
} |
5938 |
break;
|
5939 |
|
5940 |
/************************/
|
5941 |
/* shifts */
|
5942 |
case 0xc0: |
5943 |
case 0xc1: |
5944 |
/* shift Ev,Ib */
|
5945 |
shift = 2;
|
5946 |
grp2:
|
5947 |
{ |
5948 |
if ((b & 1) == 0) |
5949 |
ot = OT_BYTE; |
5950 |
else
|
5951 |
ot = dflag + OT_WORD; |
5952 |
|
5953 |
modrm = cpu_ldub_code(env, s->pc++); |
5954 |
mod = (modrm >> 6) & 3; |
5955 |
op = (modrm >> 3) & 7; |
5956 |
|
5957 |
if (mod != 3) { |
5958 |
if (shift == 2) { |
5959 |
s->rip_offset = 1;
|
5960 |
} |
5961 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
5962 |
opreg = OR_TMP0; |
5963 |
} else {
|
5964 |
opreg = (modrm & 7) | REX_B(s);
|
5965 |
} |
5966 |
|
5967 |
/* simpler op */
|
5968 |
if (shift == 0) { |
5969 |
gen_shift(s, op, ot, opreg, OR_ECX); |
5970 |
} else {
|
5971 |
if (shift == 2) { |
5972 |
shift = cpu_ldub_code(env, s->pc++); |
5973 |
} |
5974 |
gen_shifti(s, op, ot, opreg, shift); |
5975 |
} |
5976 |
} |
5977 |
break;
|
5978 |
case 0xd0: |
5979 |
case 0xd1: |
5980 |
/* shift Ev,1 */
|
5981 |
shift = 1;
|
5982 |
goto grp2;
|
5983 |
case 0xd2: |
5984 |
case 0xd3: |
5985 |
/* shift Ev,cl */
|
5986 |
shift = 0;
|
5987 |
goto grp2;
|
5988 |
|
5989 |
case 0x1a4: /* shld imm */ |
5990 |
op = 0;
|
5991 |
shift = 1;
|
5992 |
goto do_shiftd;
|
5993 |
case 0x1a5: /* shld cl */ |
5994 |
op = 0;
|
5995 |
shift = 0;
|
5996 |
goto do_shiftd;
|
5997 |
case 0x1ac: /* shrd imm */ |
5998 |
op = 1;
|
5999 |
shift = 1;
|
6000 |
goto do_shiftd;
|
6001 |
case 0x1ad: /* shrd cl */ |
6002 |
op = 1;
|
6003 |
shift = 0;
|
6004 |
do_shiftd:
|
6005 |
ot = dflag + OT_WORD; |
6006 |
modrm = cpu_ldub_code(env, s->pc++); |
6007 |
mod = (modrm >> 6) & 3; |
6008 |
rm = (modrm & 7) | REX_B(s);
|
6009 |
reg = ((modrm >> 3) & 7) | rex_r; |
6010 |
if (mod != 3) { |
6011 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
6012 |
opreg = OR_TMP0; |
6013 |
} else {
|
6014 |
opreg = rm; |
6015 |
} |
6016 |
gen_op_mov_TN_reg(ot, 1, reg);
|
6017 |
|
6018 |
if (shift) {
|
6019 |
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++)); |
6020 |
gen_shiftd_rm_T1(s, ot, opreg, op, imm); |
6021 |
tcg_temp_free(imm); |
6022 |
} else {
|
6023 |
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); |
6024 |
} |
6025 |
break;
|
6026 |
|
6027 |
/************************/
|
6028 |
/* floats */
|
6029 |
case 0xd8 ... 0xdf: |
6030 |
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
|
6031 |
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
|
6032 |
/* XXX: what to do if illegal op ? */
|
6033 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
6034 |
break;
|
6035 |
} |
6036 |
modrm = cpu_ldub_code(env, s->pc++); |
6037 |
mod = (modrm >> 6) & 3; |
6038 |
rm = modrm & 7;
|
6039 |
op = ((b & 7) << 3) | ((modrm >> 3) & 7); |
6040 |
if (mod != 3) { |
6041 |
/* memory op */
|
6042 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
6043 |
switch(op) {
|
6044 |
case 0x00 ... 0x07: /* fxxxs */ |
6045 |
case 0x10 ... 0x17: /* fixxxl */ |
6046 |
case 0x20 ... 0x27: /* fxxxl */ |
6047 |
case 0x30 ... 0x37: /* fixxx */ |
6048 |
{ |
6049 |
int op1;
|
6050 |
op1 = op & 7;
|
6051 |
|
6052 |
switch(op >> 4) { |
6053 |
case 0: |
6054 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
6055 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6056 |
gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); |
6057 |
break;
|
6058 |
case 1: |
6059 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
6060 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6061 |
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); |
6062 |
break;
|
6063 |
case 2: |
6064 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, |
6065 |
(s->mem_index >> 2) - 1); |
6066 |
gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); |
6067 |
break;
|
6068 |
case 3: |
6069 |
default:
|
6070 |
gen_op_lds_T0_A0(OT_WORD + s->mem_index); |
6071 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6072 |
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); |
6073 |
break;
|
6074 |
} |
6075 |
|
6076 |
gen_helper_fp_arith_ST0_FT0(op1); |
6077 |
if (op1 == 3) { |
6078 |
/* fcomp needs pop */
|
6079 |
gen_helper_fpop(cpu_env); |
6080 |
} |
6081 |
} |
6082 |
break;
|
6083 |
case 0x08: /* flds */ |
6084 |
case 0x0a: /* fsts */ |
6085 |
case 0x0b: /* fstps */ |
6086 |
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ |
6087 |
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ |
6088 |
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ |
6089 |
switch(op & 7) { |
6090 |
case 0: |
6091 |
switch(op >> 4) { |
6092 |
case 0: |
6093 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
6094 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6095 |
gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); |
6096 |
break;
|
6097 |
case 1: |
6098 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
6099 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6100 |
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); |
6101 |
break;
|
6102 |
case 2: |
6103 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, |
6104 |
(s->mem_index >> 2) - 1); |
6105 |
gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); |
6106 |
break;
|
6107 |
case 3: |
6108 |
default:
|
6109 |
gen_op_lds_T0_A0(OT_WORD + s->mem_index); |
6110 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6111 |
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); |
6112 |
break;
|
6113 |
} |
6114 |
break;
|
6115 |
case 1: |
6116 |
/* XXX: the corresponding CPUID bit must be tested ! */
|
6117 |
switch(op >> 4) { |
6118 |
case 1: |
6119 |
gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); |
6120 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6121 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
6122 |
break;
|
6123 |
case 2: |
6124 |
gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); |
6125 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, |
6126 |
(s->mem_index >> 2) - 1); |
6127 |
break;
|
6128 |
case 3: |
6129 |
default:
|
6130 |
gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); |
6131 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6132 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
6133 |
break;
|
6134 |
} |
6135 |
gen_helper_fpop(cpu_env); |
6136 |
break;
|
6137 |
default:
|
6138 |
switch(op >> 4) { |
6139 |
case 0: |
6140 |
gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); |
6141 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6142 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
6143 |
break;
|
6144 |
case 1: |
6145 |
gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); |
6146 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6147 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
6148 |
break;
|
6149 |
case 2: |
6150 |
gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); |
6151 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, |
6152 |
(s->mem_index >> 2) - 1); |
6153 |
break;
|
6154 |
case 3: |
6155 |
default:
|
6156 |
gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); |
6157 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6158 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
6159 |
break;
|
6160 |
} |
6161 |
if ((op & 7) == 3) |
6162 |
gen_helper_fpop(cpu_env); |
6163 |
break;
|
6164 |
} |
6165 |
break;
|
6166 |
case 0x0c: /* fldenv mem */ |
6167 |
gen_update_cc_op(s); |
6168 |
gen_jmp_im(pc_start - s->cs_base); |
6169 |
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); |
6170 |
break;
|
6171 |
case 0x0d: /* fldcw mem */ |
6172 |
gen_op_ld_T0_A0(OT_WORD + s->mem_index); |
6173 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6174 |
gen_helper_fldcw(cpu_env, cpu_tmp2_i32); |
6175 |
break;
|
6176 |
case 0x0e: /* fnstenv mem */ |
6177 |
gen_update_cc_op(s); |
6178 |
gen_jmp_im(pc_start - s->cs_base); |
6179 |
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); |
6180 |
break;
|
6181 |
case 0x0f: /* fnstcw mem */ |
6182 |
gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); |
6183 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6184 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
6185 |
break;
|
6186 |
case 0x1d: /* fldt mem */ |
6187 |
gen_update_cc_op(s); |
6188 |
gen_jmp_im(pc_start - s->cs_base); |
6189 |
gen_helper_fldt_ST0(cpu_env, cpu_A0); |
6190 |
break;
|
6191 |
case 0x1f: /* fstpt mem */ |
6192 |
gen_update_cc_op(s); |
6193 |
gen_jmp_im(pc_start - s->cs_base); |
6194 |
gen_helper_fstt_ST0(cpu_env, cpu_A0); |
6195 |
gen_helper_fpop(cpu_env); |
6196 |
break;
|
6197 |
case 0x2c: /* frstor mem */ |
6198 |
gen_update_cc_op(s); |
6199 |
gen_jmp_im(pc_start - s->cs_base); |
6200 |
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); |
6201 |
break;
|
6202 |
case 0x2e: /* fnsave mem */ |
6203 |
gen_update_cc_op(s); |
6204 |
gen_jmp_im(pc_start - s->cs_base); |
6205 |
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); |
6206 |
break;
|
6207 |
case 0x2f: /* fnstsw mem */ |
6208 |
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); |
6209 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6210 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
6211 |
break;
|
6212 |
case 0x3c: /* fbld */ |
6213 |
gen_update_cc_op(s); |
6214 |
gen_jmp_im(pc_start - s->cs_base); |
6215 |
gen_helper_fbld_ST0(cpu_env, cpu_A0); |
6216 |
break;
|
6217 |
case 0x3e: /* fbstp */ |
6218 |
gen_update_cc_op(s); |
6219 |
gen_jmp_im(pc_start - s->cs_base); |
6220 |
gen_helper_fbst_ST0(cpu_env, cpu_A0); |
6221 |
gen_helper_fpop(cpu_env); |
6222 |
break;
|
6223 |
case 0x3d: /* fildll */ |
6224 |
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, |
6225 |
(s->mem_index >> 2) - 1); |
6226 |
gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); |
6227 |
break;
|
6228 |
case 0x3f: /* fistpll */ |
6229 |
gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); |
6230 |
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, |
6231 |
(s->mem_index >> 2) - 1); |
6232 |
gen_helper_fpop(cpu_env); |
6233 |
break;
|
6234 |
default:
|
6235 |
goto illegal_op;
|
6236 |
} |
6237 |
} else {
|
6238 |
/* register float ops */
|
6239 |
opreg = rm; |
6240 |
|
6241 |
switch(op) {
|
6242 |
case 0x08: /* fld sti */ |
6243 |
gen_helper_fpush(cpu_env); |
6244 |
gen_helper_fmov_ST0_STN(cpu_env, |
6245 |
tcg_const_i32((opreg + 1) & 7)); |
6246 |
break;
|
6247 |
case 0x09: /* fxchg sti */ |
6248 |
case 0x29: /* fxchg4 sti, undocumented op */ |
6249 |
case 0x39: /* fxchg7 sti, undocumented op */ |
6250 |
gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); |
6251 |
break;
|
6252 |
case 0x0a: /* grp d9/2 */ |
6253 |
switch(rm) {
|
6254 |
case 0: /* fnop */ |
6255 |
/* check exceptions (FreeBSD FPU probe) */
|
6256 |
gen_update_cc_op(s); |
6257 |
gen_jmp_im(pc_start - s->cs_base); |
6258 |
gen_helper_fwait(cpu_env); |
6259 |
break;
|
6260 |
default:
|
6261 |
goto illegal_op;
|
6262 |
} |
6263 |
break;
|
6264 |
case 0x0c: /* grp d9/4 */ |
6265 |
switch(rm) {
|
6266 |
case 0: /* fchs */ |
6267 |
gen_helper_fchs_ST0(cpu_env); |
6268 |
break;
|
6269 |
case 1: /* fabs */ |
6270 |
gen_helper_fabs_ST0(cpu_env); |
6271 |
break;
|
6272 |
case 4: /* ftst */ |
6273 |
gen_helper_fldz_FT0(cpu_env); |
6274 |
gen_helper_fcom_ST0_FT0(cpu_env); |
6275 |
break;
|
6276 |
case 5: /* fxam */ |
6277 |
gen_helper_fxam_ST0(cpu_env); |
6278 |
break;
|
6279 |
default:
|
6280 |
goto illegal_op;
|
6281 |
} |
6282 |
break;
|
6283 |
case 0x0d: /* grp d9/5 */ |
6284 |
{ |
6285 |
switch(rm) {
|
6286 |
case 0: |
6287 |
gen_helper_fpush(cpu_env); |
6288 |
gen_helper_fld1_ST0(cpu_env); |
6289 |
break;
|
6290 |
case 1: |
6291 |
gen_helper_fpush(cpu_env); |
6292 |
gen_helper_fldl2t_ST0(cpu_env); |
6293 |
break;
|
6294 |
case 2: |
6295 |
gen_helper_fpush(cpu_env); |
6296 |
gen_helper_fldl2e_ST0(cpu_env); |
6297 |
break;
|
6298 |
case 3: |
6299 |
gen_helper_fpush(cpu_env); |
6300 |
gen_helper_fldpi_ST0(cpu_env); |
6301 |
break;
|
6302 |
case 4: |
6303 |
gen_helper_fpush(cpu_env); |
6304 |
gen_helper_fldlg2_ST0(cpu_env); |
6305 |
break;
|
6306 |
case 5: |
6307 |
gen_helper_fpush(cpu_env); |
6308 |
gen_helper_fldln2_ST0(cpu_env); |
6309 |
break;
|
6310 |
case 6: |
6311 |
gen_helper_fpush(cpu_env); |
6312 |
gen_helper_fldz_ST0(cpu_env); |
6313 |
break;
|
6314 |
default:
|
6315 |
goto illegal_op;
|
6316 |
} |
6317 |
} |
6318 |
break;
|
6319 |
case 0x0e: /* grp d9/6 */ |
6320 |
switch(rm) {
|
6321 |
case 0: /* f2xm1 */ |
6322 |
gen_helper_f2xm1(cpu_env); |
6323 |
break;
|
6324 |
case 1: /* fyl2x */ |
6325 |
gen_helper_fyl2x(cpu_env); |
6326 |
break;
|
6327 |
case 2: /* fptan */ |
6328 |
gen_helper_fptan(cpu_env); |
6329 |
break;
|
6330 |
case 3: /* fpatan */ |
6331 |
gen_helper_fpatan(cpu_env); |
6332 |
break;
|
6333 |
case 4: /* fxtract */ |
6334 |
gen_helper_fxtract(cpu_env); |
6335 |
break;
|
6336 |
case 5: /* fprem1 */ |
6337 |
gen_helper_fprem1(cpu_env); |
6338 |
break;
|
6339 |
case 6: /* fdecstp */ |
6340 |
gen_helper_fdecstp(cpu_env); |
6341 |
break;
|
6342 |
default:
|
6343 |
case 7: /* fincstp */ |
6344 |
gen_helper_fincstp(cpu_env); |
6345 |
break;
|
6346 |
} |
6347 |
break;
|
6348 |
case 0x0f: /* grp d9/7 */ |
6349 |
switch(rm) {
|
6350 |
case 0: /* fprem */ |
6351 |
gen_helper_fprem(cpu_env); |
6352 |
break;
|
6353 |
case 1: /* fyl2xp1 */ |
6354 |
gen_helper_fyl2xp1(cpu_env); |
6355 |
break;
|
6356 |
case 2: /* fsqrt */ |
6357 |
gen_helper_fsqrt(cpu_env); |
6358 |
break;
|
6359 |
case 3: /* fsincos */ |
6360 |
gen_helper_fsincos(cpu_env); |
6361 |
break;
|
6362 |
case 5: /* fscale */ |
6363 |
gen_helper_fscale(cpu_env); |
6364 |
break;
|
6365 |
case 4: /* frndint */ |
6366 |
gen_helper_frndint(cpu_env); |
6367 |
break;
|
6368 |
case 6: /* fsin */ |
6369 |
gen_helper_fsin(cpu_env); |
6370 |
break;
|
6371 |
default:
|
6372 |
case 7: /* fcos */ |
6373 |
gen_helper_fcos(cpu_env); |
6374 |
break;
|
6375 |
} |
6376 |
break;
|
6377 |
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ |
6378 |
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ |
6379 |
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ |
6380 |
{ |
6381 |
int op1;
|
6382 |
|
6383 |
op1 = op & 7;
|
6384 |
if (op >= 0x20) { |
6385 |
gen_helper_fp_arith_STN_ST0(op1, opreg); |
6386 |
if (op >= 0x30) |
6387 |
gen_helper_fpop(cpu_env); |
6388 |
} else {
|
6389 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6390 |
gen_helper_fp_arith_ST0_FT0(op1); |
6391 |
} |
6392 |
} |
6393 |
break;
|
6394 |
case 0x02: /* fcom */ |
6395 |
case 0x22: /* fcom2, undocumented op */ |
6396 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6397 |
gen_helper_fcom_ST0_FT0(cpu_env); |
6398 |
break;
|
6399 |
case 0x03: /* fcomp */ |
6400 |
case 0x23: /* fcomp3, undocumented op */ |
6401 |
case 0x32: /* fcomp5, undocumented op */ |
6402 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6403 |
gen_helper_fcom_ST0_FT0(cpu_env); |
6404 |
gen_helper_fpop(cpu_env); |
6405 |
break;
|
6406 |
case 0x15: /* da/5 */ |
6407 |
switch(rm) {
|
6408 |
case 1: /* fucompp */ |
6409 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
|
6410 |
gen_helper_fucom_ST0_FT0(cpu_env); |
6411 |
gen_helper_fpop(cpu_env); |
6412 |
gen_helper_fpop(cpu_env); |
6413 |
break;
|
6414 |
default:
|
6415 |
goto illegal_op;
|
6416 |
} |
6417 |
break;
|
6418 |
case 0x1c: |
6419 |
switch(rm) {
|
6420 |
case 0: /* feni (287 only, just do nop here) */ |
6421 |
break;
|
6422 |
case 1: /* fdisi (287 only, just do nop here) */ |
6423 |
break;
|
6424 |
case 2: /* fclex */ |
6425 |
gen_helper_fclex(cpu_env); |
6426 |
break;
|
6427 |
case 3: /* fninit */ |
6428 |
gen_helper_fninit(cpu_env); |
6429 |
break;
|
6430 |
case 4: /* fsetpm (287 only, just do nop here) */ |
6431 |
break;
|
6432 |
default:
|
6433 |
goto illegal_op;
|
6434 |
} |
6435 |
break;
|
6436 |
case 0x1d: /* fucomi */ |
6437 |
gen_update_cc_op(s); |
6438 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6439 |
gen_helper_fucomi_ST0_FT0(cpu_env); |
6440 |
set_cc_op(s, CC_OP_EFLAGS); |
6441 |
break;
|
6442 |
case 0x1e: /* fcomi */ |
6443 |
gen_update_cc_op(s); |
6444 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6445 |
gen_helper_fcomi_ST0_FT0(cpu_env); |
6446 |
set_cc_op(s, CC_OP_EFLAGS); |
6447 |
break;
|
6448 |
case 0x28: /* ffree sti */ |
6449 |
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); |
6450 |
break;
|
6451 |
case 0x2a: /* fst sti */ |
6452 |
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); |
6453 |
break;
|
6454 |
case 0x2b: /* fstp sti */ |
6455 |
case 0x0b: /* fstp1 sti, undocumented op */ |
6456 |
case 0x3a: /* fstp8 sti, undocumented op */ |
6457 |
case 0x3b: /* fstp9 sti, undocumented op */ |
6458 |
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); |
6459 |
gen_helper_fpop(cpu_env); |
6460 |
break;
|
6461 |
case 0x2c: /* fucom st(i) */ |
6462 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6463 |
gen_helper_fucom_ST0_FT0(cpu_env); |
6464 |
break;
|
6465 |
case 0x2d: /* fucomp st(i) */ |
6466 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6467 |
gen_helper_fucom_ST0_FT0(cpu_env); |
6468 |
gen_helper_fpop(cpu_env); |
6469 |
break;
|
6470 |
case 0x33: /* de/3 */ |
6471 |
switch(rm) {
|
6472 |
case 1: /* fcompp */ |
6473 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
|
6474 |
gen_helper_fcom_ST0_FT0(cpu_env); |
6475 |
gen_helper_fpop(cpu_env); |
6476 |
gen_helper_fpop(cpu_env); |
6477 |
break;
|
6478 |
default:
|
6479 |
goto illegal_op;
|
6480 |
} |
6481 |
break;
|
6482 |
case 0x38: /* ffreep sti, undocumented op */ |
6483 |
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); |
6484 |
gen_helper_fpop(cpu_env); |
6485 |
break;
|
6486 |
case 0x3c: /* df/4 */ |
6487 |
switch(rm) {
|
6488 |
case 0: |
6489 |
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); |
6490 |
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
|
6491 |
gen_op_mov_reg_T0(OT_WORD, R_EAX); |
6492 |
break;
|
6493 |
default:
|
6494 |
goto illegal_op;
|
6495 |
} |
6496 |
break;
|
6497 |
case 0x3d: /* fucomip */ |
6498 |
gen_update_cc_op(s); |
6499 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6500 |
gen_helper_fucomi_ST0_FT0(cpu_env); |
6501 |
gen_helper_fpop(cpu_env); |
6502 |
set_cc_op(s, CC_OP_EFLAGS); |
6503 |
break;
|
6504 |
case 0x3e: /* fcomip */ |
6505 |
gen_update_cc_op(s); |
6506 |
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6507 |
gen_helper_fcomi_ST0_FT0(cpu_env); |
6508 |
gen_helper_fpop(cpu_env); |
6509 |
set_cc_op(s, CC_OP_EFLAGS); |
6510 |
break;
|
6511 |
case 0x10 ... 0x13: /* fcmovxx */ |
6512 |
case 0x18 ... 0x1b: |
6513 |
{ |
6514 |
int op1, l1;
|
6515 |
static const uint8_t fcmov_cc[8] = { |
6516 |
(JCC_B << 1),
|
6517 |
(JCC_Z << 1),
|
6518 |
(JCC_BE << 1),
|
6519 |
(JCC_P << 1),
|
6520 |
}; |
6521 |
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); |
6522 |
l1 = gen_new_label(); |
6523 |
gen_jcc1_noeob(s, op1, l1); |
6524 |
gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); |
6525 |
gen_set_label(l1); |
6526 |
} |
6527 |
break;
|
6528 |
default:
|
6529 |
goto illegal_op;
|
6530 |
} |
6531 |
} |
6532 |
break;
|
6533 |
/************************/
|
6534 |
/* string ops */
|
6535 |
|
6536 |
case 0xa4: /* movsS */ |
6537 |
case 0xa5: |
6538 |
if ((b & 1) == 0) |
6539 |
ot = OT_BYTE; |
6540 |
else
|
6541 |
ot = dflag + OT_WORD; |
6542 |
|
6543 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
6544 |
gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6545 |
} else {
|
6546 |
gen_movs(s, ot); |
6547 |
} |
6548 |
break;
|
6549 |
|
6550 |
case 0xaa: /* stosS */ |
6551 |
case 0xab: |
6552 |
if ((b & 1) == 0) |
6553 |
ot = OT_BYTE; |
6554 |
else
|
6555 |
ot = dflag + OT_WORD; |
6556 |
|
6557 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
6558 |
gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6559 |
} else {
|
6560 |
gen_stos(s, ot); |
6561 |
} |
6562 |
break;
|
6563 |
case 0xac: /* lodsS */ |
6564 |
case 0xad: |
6565 |
if ((b & 1) == 0) |
6566 |
ot = OT_BYTE; |
6567 |
else
|
6568 |
ot = dflag + OT_WORD; |
6569 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
6570 |
gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6571 |
} else {
|
6572 |
gen_lods(s, ot); |
6573 |
} |
6574 |
break;
|
6575 |
case 0xae: /* scasS */ |
6576 |
case 0xaf: |
6577 |
if ((b & 1) == 0) |
6578 |
ot = OT_BYTE; |
6579 |
else
|
6580 |
ot = dflag + OT_WORD; |
6581 |
if (prefixes & PREFIX_REPNZ) {
|
6582 |
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
|
6583 |
} else if (prefixes & PREFIX_REPZ) { |
6584 |
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
|
6585 |
} else {
|
6586 |
gen_scas(s, ot); |
6587 |
} |
6588 |
break;
|
6589 |
|
6590 |
case 0xa6: /* cmpsS */ |
6591 |
case 0xa7: |
6592 |
if ((b & 1) == 0) |
6593 |
ot = OT_BYTE; |
6594 |
else
|
6595 |
ot = dflag + OT_WORD; |
6596 |
if (prefixes & PREFIX_REPNZ) {
|
6597 |
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
|
6598 |
} else if (prefixes & PREFIX_REPZ) { |
6599 |
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
|
6600 |
} else {
|
6601 |
gen_cmps(s, ot); |
6602 |
} |
6603 |
break;
|
6604 |
case 0x6c: /* insS */ |
6605 |
case 0x6d: |
6606 |
if ((b & 1) == 0) |
6607 |
ot = OT_BYTE; |
6608 |
else
|
6609 |
ot = dflag ? OT_LONG : OT_WORD; |
6610 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
|
6611 |
gen_op_andl_T0_ffff(); |
6612 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6613 |
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
|
6614 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
6615 |
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6616 |
} else {
|
6617 |
gen_ins(s, ot); |
6618 |
if (use_icount) {
|
6619 |
gen_jmp(s, s->pc - s->cs_base); |
6620 |
} |
6621 |
} |
6622 |
break;
|
6623 |
case 0x6e: /* outsS */ |
6624 |
case 0x6f: |
6625 |
if ((b & 1) == 0) |
6626 |
ot = OT_BYTE; |
6627 |
else
|
6628 |
ot = dflag ? OT_LONG : OT_WORD; |
6629 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
|
6630 |
gen_op_andl_T0_ffff(); |
6631 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6632 |
svm_is_rep(prefixes) | 4);
|
6633 |
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
6634 |
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6635 |
} else {
|
6636 |
gen_outs(s, ot); |
6637 |
if (use_icount) {
|
6638 |
gen_jmp(s, s->pc - s->cs_base); |
6639 |
} |
6640 |
} |
6641 |
break;
|
6642 |
|
6643 |
/************************/
|
6644 |
/* port I/O */
|
6645 |
|
6646 |
case 0xe4: |
6647 |
case 0xe5: |
6648 |
if ((b & 1) == 0) |
6649 |
ot = OT_BYTE; |
6650 |
else
|
6651 |
ot = dflag ? OT_LONG : OT_WORD; |
6652 |
val = cpu_ldub_code(env, s->pc++); |
6653 |
gen_op_movl_T0_im(val); |
6654 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6655 |
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); |
6656 |
if (use_icount)
|
6657 |
gen_io_start(); |
6658 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6659 |
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
|
6660 |
gen_op_mov_reg_T1(ot, R_EAX); |
6661 |
if (use_icount) {
|
6662 |
gen_io_end(); |
6663 |
gen_jmp(s, s->pc - s->cs_base); |
6664 |
} |
6665 |
break;
|
6666 |
case 0xe6: |
6667 |
case 0xe7: |
6668 |
if ((b & 1) == 0) |
6669 |
ot = OT_BYTE; |
6670 |
else
|
6671 |
ot = dflag ? OT_LONG : OT_WORD; |
6672 |
val = cpu_ldub_code(env, s->pc++); |
6673 |
gen_op_movl_T0_im(val); |
6674 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6675 |
svm_is_rep(prefixes)); |
6676 |
gen_op_mov_TN_reg(ot, 1, R_EAX);
|
6677 |
|
6678 |
if (use_icount)
|
6679 |
gen_io_start(); |
6680 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6681 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
|
6682 |
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
6683 |
if (use_icount) {
|
6684 |
gen_io_end(); |
6685 |
gen_jmp(s, s->pc - s->cs_base); |
6686 |
} |
6687 |
break;
|
6688 |
case 0xec: |
6689 |
case 0xed: |
6690 |
if ((b & 1) == 0) |
6691 |
ot = OT_BYTE; |
6692 |
else
|
6693 |
ot = dflag ? OT_LONG : OT_WORD; |
6694 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
|
6695 |
gen_op_andl_T0_ffff(); |
6696 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6697 |
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); |
6698 |
if (use_icount)
|
6699 |
gen_io_start(); |
6700 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6701 |
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
|
6702 |
gen_op_mov_reg_T1(ot, R_EAX); |
6703 |
if (use_icount) {
|
6704 |
gen_io_end(); |
6705 |
gen_jmp(s, s->pc - s->cs_base); |
6706 |
} |
6707 |
break;
|
6708 |
case 0xee: |
6709 |
case 0xef: |
6710 |
if ((b & 1) == 0) |
6711 |
ot = OT_BYTE; |
6712 |
else
|
6713 |
ot = dflag ? OT_LONG : OT_WORD; |
6714 |
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
|
6715 |
gen_op_andl_T0_ffff(); |
6716 |
gen_check_io(s, ot, pc_start - s->cs_base, |
6717 |
svm_is_rep(prefixes)); |
6718 |
gen_op_mov_TN_reg(ot, 1, R_EAX);
|
6719 |
|
6720 |
if (use_icount)
|
6721 |
gen_io_start(); |
6722 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
6723 |
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
|
6724 |
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
6725 |
if (use_icount) {
|
6726 |
gen_io_end(); |
6727 |
gen_jmp(s, s->pc - s->cs_base); |
6728 |
} |
6729 |
break;
|
6730 |
|
6731 |
/************************/
|
6732 |
/* control */
|
6733 |
case 0xc2: /* ret im */ |
6734 |
val = cpu_ldsw_code(env, s->pc); |
6735 |
s->pc += 2;
|
6736 |
gen_pop_T0(s); |
6737 |
if (CODE64(s) && s->dflag)
|
6738 |
s->dflag = 2;
|
6739 |
gen_stack_update(s, val + (2 << s->dflag));
|
6740 |
if (s->dflag == 0) |
6741 |
gen_op_andl_T0_ffff(); |
6742 |
gen_op_jmp_T0(); |
6743 |
gen_eob(s); |
6744 |
break;
|
6745 |
case 0xc3: /* ret */ |
6746 |
gen_pop_T0(s); |
6747 |
gen_pop_update(s); |
6748 |
if (s->dflag == 0) |
6749 |
gen_op_andl_T0_ffff(); |
6750 |
gen_op_jmp_T0(); |
6751 |
gen_eob(s); |
6752 |
break;
|
6753 |
case 0xca: /* lret im */ |
6754 |
val = cpu_ldsw_code(env, s->pc); |
6755 |
s->pc += 2;
|
6756 |
do_lret:
|
6757 |
if (s->pe && !s->vm86) {
|
6758 |
gen_update_cc_op(s); |
6759 |
gen_jmp_im(pc_start - s->cs_base); |
6760 |
gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag), |
6761 |
tcg_const_i32(val)); |
6762 |
} else {
|
6763 |
gen_stack_A0(s); |
6764 |
/* pop offset */
|
6765 |
gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
|
6766 |
if (s->dflag == 0) |
6767 |
gen_op_andl_T0_ffff(); |
6768 |
/* NOTE: keeping EIP updated is not a problem in case of
|
6769 |
exception */
|
6770 |
gen_op_jmp_T0(); |
6771 |
/* pop selector */
|
6772 |
gen_op_addl_A0_im(2 << s->dflag);
|
6773 |
gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
|
6774 |
gen_op_movl_seg_T0_vm(R_CS); |
6775 |
/* add stack offset */
|
6776 |
gen_stack_update(s, val + (4 << s->dflag));
|
6777 |
} |
6778 |
gen_eob(s); |
6779 |
break;
|
6780 |
case 0xcb: /* lret */ |
6781 |
val = 0;
|
6782 |
goto do_lret;
|
6783 |
case 0xcf: /* iret */ |
6784 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); |
6785 |
if (!s->pe) {
|
6786 |
/* real mode */
|
6787 |
gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag)); |
6788 |
set_cc_op(s, CC_OP_EFLAGS); |
6789 |
} else if (s->vm86) { |
6790 |
if (s->iopl != 3) { |
6791 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
6792 |
} else {
|
6793 |
gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag)); |
6794 |
set_cc_op(s, CC_OP_EFLAGS); |
6795 |
} |
6796 |
} else {
|
6797 |
gen_update_cc_op(s); |
6798 |
gen_jmp_im(pc_start - s->cs_base); |
6799 |
gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag), |
6800 |
tcg_const_i32(s->pc - s->cs_base)); |
6801 |
set_cc_op(s, CC_OP_EFLAGS); |
6802 |
} |
6803 |
gen_eob(s); |
6804 |
break;
|
6805 |
case 0xe8: /* call im */ |
6806 |
{ |
6807 |
if (dflag)
|
6808 |
tval = (int32_t)insn_get(env, s, OT_LONG); |
6809 |
else
|
6810 |
tval = (int16_t)insn_get(env, s, OT_WORD); |
6811 |
next_eip = s->pc - s->cs_base; |
6812 |
tval += next_eip; |
6813 |
if (s->dflag == 0) |
6814 |
tval &= 0xffff;
|
6815 |
else if(!CODE64(s)) |
6816 |
tval &= 0xffffffff;
|
6817 |
gen_movtl_T0_im(next_eip); |
6818 |
gen_push_T0(s); |
6819 |
gen_jmp(s, tval); |
6820 |
} |
6821 |
break;
|
6822 |
case 0x9a: /* lcall im */ |
6823 |
{ |
6824 |
unsigned int selector, offset; |
6825 |
|
6826 |
if (CODE64(s))
|
6827 |
goto illegal_op;
|
6828 |
ot = dflag ? OT_LONG : OT_WORD; |
6829 |
offset = insn_get(env, s, ot); |
6830 |
selector = insn_get(env, s, OT_WORD); |
6831 |
|
6832 |
gen_op_movl_T0_im(selector); |
6833 |
gen_op_movl_T1_imu(offset); |
6834 |
} |
6835 |
goto do_lcall;
|
6836 |
case 0xe9: /* jmp im */ |
6837 |
if (dflag)
|
6838 |
tval = (int32_t)insn_get(env, s, OT_LONG); |
6839 |
else
|
6840 |
tval = (int16_t)insn_get(env, s, OT_WORD); |
6841 |
tval += s->pc - s->cs_base; |
6842 |
if (s->dflag == 0) |
6843 |
tval &= 0xffff;
|
6844 |
else if(!CODE64(s)) |
6845 |
tval &= 0xffffffff;
|
6846 |
gen_jmp(s, tval); |
6847 |
break;
|
6848 |
case 0xea: /* ljmp im */ |
6849 |
{ |
6850 |
unsigned int selector, offset; |
6851 |
|
6852 |
if (CODE64(s))
|
6853 |
goto illegal_op;
|
6854 |
ot = dflag ? OT_LONG : OT_WORD; |
6855 |
offset = insn_get(env, s, ot); |
6856 |
selector = insn_get(env, s, OT_WORD); |
6857 |
|
6858 |
gen_op_movl_T0_im(selector); |
6859 |
gen_op_movl_T1_imu(offset); |
6860 |
} |
6861 |
goto do_ljmp;
|
6862 |
case 0xeb: /* jmp Jb */ |
6863 |
tval = (int8_t)insn_get(env, s, OT_BYTE); |
6864 |
tval += s->pc - s->cs_base; |
6865 |
if (s->dflag == 0) |
6866 |
tval &= 0xffff;
|
6867 |
gen_jmp(s, tval); |
6868 |
break;
|
6869 |
case 0x70 ... 0x7f: /* jcc Jb */ |
6870 |
tval = (int8_t)insn_get(env, s, OT_BYTE); |
6871 |
goto do_jcc;
|
6872 |
case 0x180 ... 0x18f: /* jcc Jv */ |
6873 |
if (dflag) {
|
6874 |
tval = (int32_t)insn_get(env, s, OT_LONG); |
6875 |
} else {
|
6876 |
tval = (int16_t)insn_get(env, s, OT_WORD); |
6877 |
} |
6878 |
do_jcc:
|
6879 |
next_eip = s->pc - s->cs_base; |
6880 |
tval += next_eip; |
6881 |
if (s->dflag == 0) |
6882 |
tval &= 0xffff;
|
6883 |
gen_jcc(s, b, tval, next_eip); |
6884 |
break;
|
6885 |
|
6886 |
case 0x190 ... 0x19f: /* setcc Gv */ |
6887 |
modrm = cpu_ldub_code(env, s->pc++); |
6888 |
gen_setcc1(s, b, cpu_T[0]);
|
6889 |
gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
|
6890 |
break;
|
6891 |
case 0x140 ... 0x14f: /* cmov Gv, Ev */ |
6892 |
ot = dflag + OT_WORD; |
6893 |
modrm = cpu_ldub_code(env, s->pc++); |
6894 |
reg = ((modrm >> 3) & 7) | rex_r; |
6895 |
gen_cmovcc1(env, s, ot, b, modrm, reg); |
6896 |
break;
|
6897 |
|
6898 |
/************************/
|
6899 |
/* flags */
|
6900 |
case 0x9c: /* pushf */ |
6901 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); |
6902 |
if (s->vm86 && s->iopl != 3) { |
6903 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
6904 |
} else {
|
6905 |
gen_update_cc_op(s); |
6906 |
gen_helper_read_eflags(cpu_T[0], cpu_env);
|
6907 |
gen_push_T0(s); |
6908 |
} |
6909 |
break;
|
6910 |
case 0x9d: /* popf */ |
6911 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); |
6912 |
if (s->vm86 && s->iopl != 3) { |
6913 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
6914 |
} else {
|
6915 |
gen_pop_T0(s); |
6916 |
if (s->cpl == 0) { |
6917 |
if (s->dflag) {
|
6918 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6919 |
tcg_const_i32((TF_MASK | AC_MASK | |
6920 |
ID_MASK | NT_MASK | |
6921 |
IF_MASK | |
6922 |
IOPL_MASK))); |
6923 |
} else {
|
6924 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6925 |
tcg_const_i32((TF_MASK | AC_MASK | |
6926 |
ID_MASK | NT_MASK | |
6927 |
IF_MASK | IOPL_MASK) |
6928 |
& 0xffff));
|
6929 |
} |
6930 |
} else {
|
6931 |
if (s->cpl <= s->iopl) {
|
6932 |
if (s->dflag) {
|
6933 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6934 |
tcg_const_i32((TF_MASK | |
6935 |
AC_MASK | |
6936 |
ID_MASK | |
6937 |
NT_MASK | |
6938 |
IF_MASK))); |
6939 |
} else {
|
6940 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6941 |
tcg_const_i32((TF_MASK | |
6942 |
AC_MASK | |
6943 |
ID_MASK | |
6944 |
NT_MASK | |
6945 |
IF_MASK) |
6946 |
& 0xffff));
|
6947 |
} |
6948 |
} else {
|
6949 |
if (s->dflag) {
|
6950 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6951 |
tcg_const_i32((TF_MASK | AC_MASK | |
6952 |
ID_MASK | NT_MASK))); |
6953 |
} else {
|
6954 |
gen_helper_write_eflags(cpu_env, cpu_T[0],
|
6955 |
tcg_const_i32((TF_MASK | AC_MASK | |
6956 |
ID_MASK | NT_MASK) |
6957 |
& 0xffff));
|
6958 |
} |
6959 |
} |
6960 |
} |
6961 |
gen_pop_update(s); |
6962 |
set_cc_op(s, CC_OP_EFLAGS); |
6963 |
/* abort translation because TF/AC flag may change */
|
6964 |
gen_jmp_im(s->pc - s->cs_base); |
6965 |
gen_eob(s); |
6966 |
} |
6967 |
break;
|
6968 |
case 0x9e: /* sahf */ |
6969 |
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
|
6970 |
goto illegal_op;
|
6971 |
gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
|
6972 |
gen_compute_eflags(s); |
6973 |
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); |
6974 |
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); |
6975 |
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
|
6976 |
break;
|
6977 |
case 0x9f: /* lahf */ |
6978 |
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
|
6979 |
goto illegal_op;
|
6980 |
gen_compute_eflags(s); |
6981 |
/* Note: gen_compute_eflags() only gives the condition codes */
|
6982 |
tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02); |
6983 |
gen_op_mov_reg_T0(OT_BYTE, R_AH); |
6984 |
break;
|
6985 |
case 0xf5: /* cmc */ |
6986 |
gen_compute_eflags(s); |
6987 |
tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); |
6988 |
break;
|
6989 |
case 0xf8: /* clc */ |
6990 |
gen_compute_eflags(s); |
6991 |
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); |
6992 |
break;
|
6993 |
case 0xf9: /* stc */ |
6994 |
gen_compute_eflags(s); |
6995 |
tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); |
6996 |
break;
|
6997 |
case 0xfc: /* cld */ |
6998 |
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
|
6999 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); |
7000 |
break;
|
7001 |
case 0xfd: /* std */ |
7002 |
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
|
7003 |
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); |
7004 |
break;
|
7005 |
|
7006 |
/************************/
|
7007 |
/* bit operations */
|
7008 |
case 0x1ba: /* bt/bts/btr/btc Gv, im */ |
7009 |
ot = dflag + OT_WORD; |
7010 |
modrm = cpu_ldub_code(env, s->pc++); |
7011 |
op = (modrm >> 3) & 7; |
7012 |
mod = (modrm >> 6) & 3; |
7013 |
rm = (modrm & 7) | REX_B(s);
|
7014 |
if (mod != 3) { |
7015 |
s->rip_offset = 1;
|
7016 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7017 |
gen_op_ld_T0_A0(ot + s->mem_index); |
7018 |
} else {
|
7019 |
gen_op_mov_TN_reg(ot, 0, rm);
|
7020 |
} |
7021 |
/* load shift */
|
7022 |
val = cpu_ldub_code(env, s->pc++); |
7023 |
gen_op_movl_T1_im(val); |
7024 |
if (op < 4) |
7025 |
goto illegal_op;
|
7026 |
op -= 4;
|
7027 |
goto bt_op;
|
7028 |
case 0x1a3: /* bt Gv, Ev */ |
7029 |
op = 0;
|
7030 |
goto do_btx;
|
7031 |
case 0x1ab: /* bts */ |
7032 |
op = 1;
|
7033 |
goto do_btx;
|
7034 |
case 0x1b3: /* btr */ |
7035 |
op = 2;
|
7036 |
goto do_btx;
|
7037 |
case 0x1bb: /* btc */ |
7038 |
op = 3;
|
7039 |
do_btx:
|
7040 |
ot = dflag + OT_WORD; |
7041 |
modrm = cpu_ldub_code(env, s->pc++); |
7042 |
reg = ((modrm >> 3) & 7) | rex_r; |
7043 |
mod = (modrm >> 6) & 3; |
7044 |
rm = (modrm & 7) | REX_B(s);
|
7045 |
gen_op_mov_TN_reg(OT_LONG, 1, reg);
|
7046 |
if (mod != 3) { |
7047 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7048 |
/* specific case: we need to add a displacement */
|
7049 |
gen_exts(ot, cpu_T[1]);
|
7050 |
tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot); |
7051 |
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); |
7052 |
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); |
7053 |
gen_op_ld_T0_A0(ot + s->mem_index); |
7054 |
} else {
|
7055 |
gen_op_mov_TN_reg(ot, 0, rm);
|
7056 |
} |
7057 |
bt_op:
|
7058 |
tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1); |
7059 |
switch(op) {
|
7060 |
case 0: |
7061 |
tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]); |
7062 |
tcg_gen_movi_tl(cpu_cc_dst, 0);
|
7063 |
break;
|
7064 |
case 1: |
7065 |
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); |
7066 |
tcg_gen_movi_tl(cpu_tmp0, 1);
|
7067 |
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
|
7068 |
tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); |
7069 |
break;
|
7070 |
case 2: |
7071 |
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); |
7072 |
tcg_gen_movi_tl(cpu_tmp0, 1);
|
7073 |
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
|
7074 |
tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); |
7075 |
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0); |
7076 |
break;
|
7077 |
default:
|
7078 |
case 3: |
7079 |
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); |
7080 |
tcg_gen_movi_tl(cpu_tmp0, 1);
|
7081 |
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
|
7082 |
tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0); |
7083 |
break;
|
7084 |
} |
7085 |
set_cc_op(s, CC_OP_SARB + ot); |
7086 |
if (op != 0) { |
7087 |
if (mod != 3) |
7088 |
gen_op_st_T0_A0(ot + s->mem_index); |
7089 |
else
|
7090 |
gen_op_mov_reg_T0(ot, rm); |
7091 |
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); |
7092 |
tcg_gen_movi_tl(cpu_cc_dst, 0);
|
7093 |
} |
7094 |
break;
|
7095 |
case 0x1bc: /* bsf / tzcnt */ |
7096 |
case 0x1bd: /* bsr / lzcnt */ |
7097 |
ot = dflag + OT_WORD; |
7098 |
modrm = cpu_ldub_code(env, s->pc++); |
7099 |
reg = ((modrm >> 3) & 7) | rex_r; |
7100 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
7101 |
gen_extu(ot, cpu_T[0]);
|
7102 |
|
7103 |
/* Note that lzcnt and tzcnt are in different extensions. */
|
7104 |
if ((prefixes & PREFIX_REPZ)
|
7105 |
&& (b & 1
|
7106 |
? s->cpuid_ext3_features & CPUID_EXT3_ABM |
7107 |
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { |
7108 |
int size = 8 << ot; |
7109 |
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
|
7110 |
if (b & 1) { |
7111 |
/* For lzcnt, reduce the target_ulong result by the
|
7112 |
number of zeros that we expect to find at the top. */
|
7113 |
gen_helper_clz(cpu_T[0], cpu_T[0]); |
7114 |
tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size); |
7115 |
} else {
|
7116 |
/* For tzcnt, a zero input must return the operand size:
|
7117 |
force all bits outside the operand size to 1. */
|
7118 |
target_ulong mask = (target_ulong)-2 << (size - 1); |
7119 |
tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask); |
7120 |
gen_helper_ctz(cpu_T[0], cpu_T[0]); |
7121 |
} |
7122 |
/* For lzcnt/tzcnt, C and Z bits are defined and are
|
7123 |
related to the result. */
|
7124 |
gen_op_update1_cc(); |
7125 |
set_cc_op(s, CC_OP_BMILGB + ot); |
7126 |
} else {
|
7127 |
/* For bsr/bsf, only the Z bit is defined and it is related
|
7128 |
to the input and not the result. */
|
7129 |
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
|
7130 |
set_cc_op(s, CC_OP_LOGICB + ot); |
7131 |
if (b & 1) { |
7132 |
/* For bsr, return the bit index of the first 1 bit,
|
7133 |
not the count of leading zeros. */
|
7134 |
gen_helper_clz(cpu_T[0], cpu_T[0]); |
7135 |
tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1); |
7136 |
} else {
|
7137 |
gen_helper_ctz(cpu_T[0], cpu_T[0]); |
7138 |
} |
7139 |
/* ??? The manual says that the output is undefined when the
|
7140 |
input is zero, but real hardware leaves it unchanged, and
|
7141 |
real programs appear to depend on that. */
|
7142 |
tcg_gen_movi_tl(cpu_tmp0, 0);
|
7143 |
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
|
7144 |
cpu_regs[reg], cpu_T[0]);
|
7145 |
} |
7146 |
gen_op_mov_reg_T0(ot, reg); |
7147 |
break;
|
7148 |
/************************/
|
7149 |
/* bcd */
|
7150 |
case 0x27: /* daa */ |
7151 |
if (CODE64(s))
|
7152 |
goto illegal_op;
|
7153 |
gen_update_cc_op(s); |
7154 |
gen_helper_daa(cpu_env); |
7155 |
set_cc_op(s, CC_OP_EFLAGS); |
7156 |
break;
|
7157 |
case 0x2f: /* das */ |
7158 |
if (CODE64(s))
|
7159 |
goto illegal_op;
|
7160 |
gen_update_cc_op(s); |
7161 |
gen_helper_das(cpu_env); |
7162 |
set_cc_op(s, CC_OP_EFLAGS); |
7163 |
break;
|
7164 |
case 0x37: /* aaa */ |
7165 |
if (CODE64(s))
|
7166 |
goto illegal_op;
|
7167 |
gen_update_cc_op(s); |
7168 |
gen_helper_aaa(cpu_env); |
7169 |
set_cc_op(s, CC_OP_EFLAGS); |
7170 |
break;
|
7171 |
case 0x3f: /* aas */ |
7172 |
if (CODE64(s))
|
7173 |
goto illegal_op;
|
7174 |
gen_update_cc_op(s); |
7175 |
gen_helper_aas(cpu_env); |
7176 |
set_cc_op(s, CC_OP_EFLAGS); |
7177 |
break;
|
7178 |
case 0xd4: /* aam */ |
7179 |
if (CODE64(s))
|
7180 |
goto illegal_op;
|
7181 |
val = cpu_ldub_code(env, s->pc++); |
7182 |
if (val == 0) { |
7183 |
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); |
7184 |
} else {
|
7185 |
gen_helper_aam(cpu_env, tcg_const_i32(val)); |
7186 |
set_cc_op(s, CC_OP_LOGICB); |
7187 |
} |
7188 |
break;
|
7189 |
case 0xd5: /* aad */ |
7190 |
if (CODE64(s))
|
7191 |
goto illegal_op;
|
7192 |
val = cpu_ldub_code(env, s->pc++); |
7193 |
gen_helper_aad(cpu_env, tcg_const_i32(val)); |
7194 |
set_cc_op(s, CC_OP_LOGICB); |
7195 |
break;
|
7196 |
/************************/
|
7197 |
/* misc */
|
7198 |
case 0x90: /* nop */ |
7199 |
/* XXX: correct lock test for all insn */
|
7200 |
if (prefixes & PREFIX_LOCK) {
|
7201 |
goto illegal_op;
|
7202 |
} |
7203 |
/* If REX_B is set, then this is xchg eax, r8d, not a nop. */
|
7204 |
if (REX_B(s)) {
|
7205 |
goto do_xchg_reg_eax;
|
7206 |
} |
7207 |
if (prefixes & PREFIX_REPZ) {
|
7208 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE); |
7209 |
} |
7210 |
break;
|
7211 |
case 0x9b: /* fwait */ |
7212 |
if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
|
7213 |
(HF_MP_MASK | HF_TS_MASK)) { |
7214 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
7215 |
} else {
|
7216 |
gen_update_cc_op(s); |
7217 |
gen_jmp_im(pc_start - s->cs_base); |
7218 |
gen_helper_fwait(cpu_env); |
7219 |
} |
7220 |
break;
|
7221 |
case 0xcc: /* int3 */ |
7222 |
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); |
7223 |
break;
|
7224 |
case 0xcd: /* int N */ |
7225 |
val = cpu_ldub_code(env, s->pc++); |
7226 |
if (s->vm86 && s->iopl != 3) { |
7227 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7228 |
} else {
|
7229 |
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); |
7230 |
} |
7231 |
break;
|
7232 |
case 0xce: /* into */ |
7233 |
if (CODE64(s))
|
7234 |
goto illegal_op;
|
7235 |
gen_update_cc_op(s); |
7236 |
gen_jmp_im(pc_start - s->cs_base); |
7237 |
gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7238 |
break;
|
7239 |
#ifdef WANT_ICEBP
|
7240 |
case 0xf1: /* icebp (undocumented, exits to external debugger) */ |
7241 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); |
7242 |
#if 1 |
7243 |
gen_debug(s, pc_start - s->cs_base); |
7244 |
#else
|
7245 |
/* start debug */
|
7246 |
tb_flush(env); |
7247 |
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); |
7248 |
#endif
|
7249 |
break;
|
7250 |
#endif
|
7251 |
case 0xfa: /* cli */ |
7252 |
if (!s->vm86) {
|
7253 |
if (s->cpl <= s->iopl) {
|
7254 |
gen_helper_cli(cpu_env); |
7255 |
} else {
|
7256 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7257 |
} |
7258 |
} else {
|
7259 |
if (s->iopl == 3) { |
7260 |
gen_helper_cli(cpu_env); |
7261 |
} else {
|
7262 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7263 |
} |
7264 |
} |
7265 |
break;
|
7266 |
case 0xfb: /* sti */ |
7267 |
if (!s->vm86) {
|
7268 |
if (s->cpl <= s->iopl) {
|
7269 |
gen_sti:
|
7270 |
gen_helper_sti(cpu_env); |
7271 |
/* interruptions are enabled only the first insn after sti */
|
7272 |
/* If several instructions disable interrupts, only the
|
7273 |
_first_ does it */
|
7274 |
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
|
7275 |
gen_helper_set_inhibit_irq(cpu_env); |
7276 |
/* give a chance to handle pending irqs */
|
7277 |
gen_jmp_im(s->pc - s->cs_base); |
7278 |
gen_eob(s); |
7279 |
} else {
|
7280 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7281 |
} |
7282 |
} else {
|
7283 |
if (s->iopl == 3) { |
7284 |
goto gen_sti;
|
7285 |
} else {
|
7286 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7287 |
} |
7288 |
} |
7289 |
break;
|
7290 |
case 0x62: /* bound */ |
7291 |
if (CODE64(s))
|
7292 |
goto illegal_op;
|
7293 |
ot = dflag ? OT_LONG : OT_WORD; |
7294 |
modrm = cpu_ldub_code(env, s->pc++); |
7295 |
reg = (modrm >> 3) & 7; |
7296 |
mod = (modrm >> 6) & 3; |
7297 |
if (mod == 3) |
7298 |
goto illegal_op;
|
7299 |
gen_op_mov_TN_reg(ot, 0, reg);
|
7300 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7301 |
gen_jmp_im(pc_start - s->cs_base); |
7302 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
7303 |
if (ot == OT_WORD) {
|
7304 |
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); |
7305 |
} else {
|
7306 |
gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); |
7307 |
} |
7308 |
break;
|
7309 |
case 0x1c8 ... 0x1cf: /* bswap reg */ |
7310 |
reg = (b & 7) | REX_B(s);
|
7311 |
#ifdef TARGET_X86_64
|
7312 |
if (dflag == 2) { |
7313 |
gen_op_mov_TN_reg(OT_QUAD, 0, reg);
|
7314 |
tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]); |
7315 |
gen_op_mov_reg_T0(OT_QUAD, reg); |
7316 |
} else
|
7317 |
#endif
|
7318 |
{ |
7319 |
gen_op_mov_TN_reg(OT_LONG, 0, reg);
|
7320 |
tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); |
7321 |
tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]); |
7322 |
gen_op_mov_reg_T0(OT_LONG, reg); |
7323 |
} |
7324 |
break;
|
7325 |
case 0xd6: /* salc */ |
7326 |
if (CODE64(s))
|
7327 |
goto illegal_op;
|
7328 |
gen_compute_eflags_c(s, cpu_T[0]);
|
7329 |
tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); |
7330 |
gen_op_mov_reg_T0(OT_BYTE, R_EAX); |
7331 |
break;
|
7332 |
case 0xe0: /* loopnz */ |
7333 |
case 0xe1: /* loopz */ |
7334 |
case 0xe2: /* loop */ |
7335 |
case 0xe3: /* jecxz */ |
7336 |
{ |
7337 |
int l1, l2, l3;
|
7338 |
|
7339 |
tval = (int8_t)insn_get(env, s, OT_BYTE); |
7340 |
next_eip = s->pc - s->cs_base; |
7341 |
tval += next_eip; |
7342 |
if (s->dflag == 0) |
7343 |
tval &= 0xffff;
|
7344 |
|
7345 |
l1 = gen_new_label(); |
7346 |
l2 = gen_new_label(); |
7347 |
l3 = gen_new_label(); |
7348 |
b &= 3;
|
7349 |
switch(b) {
|
7350 |
case 0: /* loopnz */ |
7351 |
case 1: /* loopz */ |
7352 |
gen_op_add_reg_im(s->aflag, R_ECX, -1);
|
7353 |
gen_op_jz_ecx(s->aflag, l3); |
7354 |
gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); |
7355 |
break;
|
7356 |
case 2: /* loop */ |
7357 |
gen_op_add_reg_im(s->aflag, R_ECX, -1);
|
7358 |
gen_op_jnz_ecx(s->aflag, l1); |
7359 |
break;
|
7360 |
default:
|
7361 |
case 3: /* jcxz */ |
7362 |
gen_op_jz_ecx(s->aflag, l1); |
7363 |
break;
|
7364 |
} |
7365 |
|
7366 |
gen_set_label(l3); |
7367 |
gen_jmp_im(next_eip); |
7368 |
tcg_gen_br(l2); |
7369 |
|
7370 |
gen_set_label(l1); |
7371 |
gen_jmp_im(tval); |
7372 |
gen_set_label(l2); |
7373 |
gen_eob(s); |
7374 |
} |
7375 |
break;
|
7376 |
case 0x130: /* wrmsr */ |
7377 |
case 0x132: /* rdmsr */ |
7378 |
if (s->cpl != 0) { |
7379 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7380 |
} else {
|
7381 |
gen_update_cc_op(s); |
7382 |
gen_jmp_im(pc_start - s->cs_base); |
7383 |
if (b & 2) { |
7384 |
gen_helper_rdmsr(cpu_env); |
7385 |
} else {
|
7386 |
gen_helper_wrmsr(cpu_env); |
7387 |
} |
7388 |
} |
7389 |
break;
|
7390 |
case 0x131: /* rdtsc */ |
7391 |
gen_update_cc_op(s); |
7392 |
gen_jmp_im(pc_start - s->cs_base); |
7393 |
if (use_icount)
|
7394 |
gen_io_start(); |
7395 |
gen_helper_rdtsc(cpu_env); |
7396 |
if (use_icount) {
|
7397 |
gen_io_end(); |
7398 |
gen_jmp(s, s->pc - s->cs_base); |
7399 |
} |
7400 |
break;
|
7401 |
case 0x133: /* rdpmc */ |
7402 |
gen_update_cc_op(s); |
7403 |
gen_jmp_im(pc_start - s->cs_base); |
7404 |
gen_helper_rdpmc(cpu_env); |
7405 |
break;
|
7406 |
case 0x134: /* sysenter */ |
7407 |
/* For Intel SYSENTER is valid on 64-bit */
|
7408 |
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
|
7409 |
goto illegal_op;
|
7410 |
if (!s->pe) {
|
7411 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7412 |
} else {
|
7413 |
gen_update_cc_op(s); |
7414 |
gen_jmp_im(pc_start - s->cs_base); |
7415 |
gen_helper_sysenter(cpu_env); |
7416 |
gen_eob(s); |
7417 |
} |
7418 |
break;
|
7419 |
case 0x135: /* sysexit */ |
7420 |
/* For Intel SYSEXIT is valid on 64-bit */
|
7421 |
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
|
7422 |
goto illegal_op;
|
7423 |
if (!s->pe) {
|
7424 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7425 |
} else {
|
7426 |
gen_update_cc_op(s); |
7427 |
gen_jmp_im(pc_start - s->cs_base); |
7428 |
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag)); |
7429 |
gen_eob(s); |
7430 |
} |
7431 |
break;
|
7432 |
#ifdef TARGET_X86_64
|
7433 |
case 0x105: /* syscall */ |
7434 |
/* XXX: is it usable in real mode ? */
|
7435 |
gen_update_cc_op(s); |
7436 |
gen_jmp_im(pc_start - s->cs_base); |
7437 |
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7438 |
gen_eob(s); |
7439 |
break;
|
7440 |
case 0x107: /* sysret */ |
7441 |
if (!s->pe) {
|
7442 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7443 |
} else {
|
7444 |
gen_update_cc_op(s); |
7445 |
gen_jmp_im(pc_start - s->cs_base); |
7446 |
gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag)); |
7447 |
/* condition codes are modified only in long mode */
|
7448 |
if (s->lma) {
|
7449 |
set_cc_op(s, CC_OP_EFLAGS); |
7450 |
} |
7451 |
gen_eob(s); |
7452 |
} |
7453 |
break;
|
7454 |
#endif
|
7455 |
case 0x1a2: /* cpuid */ |
7456 |
gen_update_cc_op(s); |
7457 |
gen_jmp_im(pc_start - s->cs_base); |
7458 |
gen_helper_cpuid(cpu_env); |
7459 |
break;
|
7460 |
case 0xf4: /* hlt */ |
7461 |
if (s->cpl != 0) { |
7462 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7463 |
} else {
|
7464 |
gen_update_cc_op(s); |
7465 |
gen_jmp_im(pc_start - s->cs_base); |
7466 |
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7467 |
s->is_jmp = DISAS_TB_JUMP; |
7468 |
} |
7469 |
break;
|
7470 |
case 0x100: |
7471 |
modrm = cpu_ldub_code(env, s->pc++); |
7472 |
mod = (modrm >> 6) & 3; |
7473 |
op = (modrm >> 3) & 7; |
7474 |
switch(op) {
|
7475 |
case 0: /* sldt */ |
7476 |
if (!s->pe || s->vm86)
|
7477 |
goto illegal_op;
|
7478 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); |
7479 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
|
7480 |
ot = OT_WORD; |
7481 |
if (mod == 3) |
7482 |
ot += s->dflag; |
7483 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
7484 |
break;
|
7485 |
case 2: /* lldt */ |
7486 |
if (!s->pe || s->vm86)
|
7487 |
goto illegal_op;
|
7488 |
if (s->cpl != 0) { |
7489 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7490 |
} else {
|
7491 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); |
7492 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
7493 |
gen_jmp_im(pc_start - s->cs_base); |
7494 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
7495 |
gen_helper_lldt(cpu_env, cpu_tmp2_i32); |
7496 |
} |
7497 |
break;
|
7498 |
case 1: /* str */ |
7499 |
if (!s->pe || s->vm86)
|
7500 |
goto illegal_op;
|
7501 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); |
7502 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
|
7503 |
ot = OT_WORD; |
7504 |
if (mod == 3) |
7505 |
ot += s->dflag; |
7506 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
7507 |
break;
|
7508 |
case 3: /* ltr */ |
7509 |
if (!s->pe || s->vm86)
|
7510 |
goto illegal_op;
|
7511 |
if (s->cpl != 0) { |
7512 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7513 |
} else {
|
7514 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); |
7515 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
7516 |
gen_jmp_im(pc_start - s->cs_base); |
7517 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
7518 |
gen_helper_ltr(cpu_env, cpu_tmp2_i32); |
7519 |
} |
7520 |
break;
|
7521 |
case 4: /* verr */ |
7522 |
case 5: /* verw */ |
7523 |
if (!s->pe || s->vm86)
|
7524 |
goto illegal_op;
|
7525 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
7526 |
gen_update_cc_op(s); |
7527 |
if (op == 4) { |
7528 |
gen_helper_verr(cpu_env, cpu_T[0]);
|
7529 |
} else {
|
7530 |
gen_helper_verw(cpu_env, cpu_T[0]);
|
7531 |
} |
7532 |
set_cc_op(s, CC_OP_EFLAGS); |
7533 |
break;
|
7534 |
default:
|
7535 |
goto illegal_op;
|
7536 |
} |
7537 |
break;
|
7538 |
case 0x101: |
7539 |
modrm = cpu_ldub_code(env, s->pc++); |
7540 |
mod = (modrm >> 6) & 3; |
7541 |
op = (modrm >> 3) & 7; |
7542 |
rm = modrm & 7;
|
7543 |
switch(op) {
|
7544 |
case 0: /* sgdt */ |
7545 |
if (mod == 3) |
7546 |
goto illegal_op;
|
7547 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); |
7548 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7549 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
|
7550 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
7551 |
gen_add_A0_im(s, 2);
|
7552 |
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
|
7553 |
if (!s->dflag)
|
7554 |
gen_op_andl_T0_im(0xffffff);
|
7555 |
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); |
7556 |
break;
|
7557 |
case 1: |
7558 |
if (mod == 3) { |
7559 |
switch (rm) {
|
7560 |
case 0: /* monitor */ |
7561 |
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
|
7562 |
s->cpl != 0)
|
7563 |
goto illegal_op;
|
7564 |
gen_update_cc_op(s); |
7565 |
gen_jmp_im(pc_start - s->cs_base); |
7566 |
#ifdef TARGET_X86_64
|
7567 |
if (s->aflag == 2) { |
7568 |
gen_op_movq_A0_reg(R_EAX); |
7569 |
} else
|
7570 |
#endif
|
7571 |
{ |
7572 |
gen_op_movl_A0_reg(R_EAX); |
7573 |
if (s->aflag == 0) |
7574 |
gen_op_andl_A0_ffff(); |
7575 |
} |
7576 |
gen_add_A0_ds_seg(s); |
7577 |
gen_helper_monitor(cpu_env, cpu_A0); |
7578 |
break;
|
7579 |
case 1: /* mwait */ |
7580 |
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
|
7581 |
s->cpl != 0)
|
7582 |
goto illegal_op;
|
7583 |
gen_update_cc_op(s); |
7584 |
gen_jmp_im(pc_start - s->cs_base); |
7585 |
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7586 |
gen_eob(s); |
7587 |
break;
|
7588 |
case 2: /* clac */ |
7589 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
|
7590 |
s->cpl != 0) {
|
7591 |
goto illegal_op;
|
7592 |
} |
7593 |
gen_helper_clac(cpu_env); |
7594 |
gen_jmp_im(s->pc - s->cs_base); |
7595 |
gen_eob(s); |
7596 |
break;
|
7597 |
case 3: /* stac */ |
7598 |
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
|
7599 |
s->cpl != 0) {
|
7600 |
goto illegal_op;
|
7601 |
} |
7602 |
gen_helper_stac(cpu_env); |
7603 |
gen_jmp_im(s->pc - s->cs_base); |
7604 |
gen_eob(s); |
7605 |
break;
|
7606 |
default:
|
7607 |
goto illegal_op;
|
7608 |
} |
7609 |
} else { /* sidt */ |
7610 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); |
7611 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7612 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
|
7613 |
gen_op_st_T0_A0(OT_WORD + s->mem_index); |
7614 |
gen_add_A0_im(s, 2);
|
7615 |
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
|
7616 |
if (!s->dflag)
|
7617 |
gen_op_andl_T0_im(0xffffff);
|
7618 |
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); |
7619 |
} |
7620 |
break;
|
7621 |
case 2: /* lgdt */ |
7622 |
case 3: /* lidt */ |
7623 |
if (mod == 3) { |
7624 |
gen_update_cc_op(s); |
7625 |
gen_jmp_im(pc_start - s->cs_base); |
7626 |
switch(rm) {
|
7627 |
case 0: /* VMRUN */ |
7628 |
if (!(s->flags & HF_SVME_MASK) || !s->pe)
|
7629 |
goto illegal_op;
|
7630 |
if (s->cpl != 0) { |
7631 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7632 |
break;
|
7633 |
} else {
|
7634 |
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag), |
7635 |
tcg_const_i32(s->pc - pc_start)); |
7636 |
tcg_gen_exit_tb(0);
|
7637 |
s->is_jmp = DISAS_TB_JUMP; |
7638 |
} |
7639 |
break;
|
7640 |
case 1: /* VMMCALL */ |
7641 |
if (!(s->flags & HF_SVME_MASK))
|
7642 |
goto illegal_op;
|
7643 |
gen_helper_vmmcall(cpu_env); |
7644 |
break;
|
7645 |
case 2: /* VMLOAD */ |
7646 |
if (!(s->flags & HF_SVME_MASK) || !s->pe)
|
7647 |
goto illegal_op;
|
7648 |
if (s->cpl != 0) { |
7649 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7650 |
break;
|
7651 |
} else {
|
7652 |
gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag)); |
7653 |
} |
7654 |
break;
|
7655 |
case 3: /* VMSAVE */ |
7656 |
if (!(s->flags & HF_SVME_MASK) || !s->pe)
|
7657 |
goto illegal_op;
|
7658 |
if (s->cpl != 0) { |
7659 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7660 |
break;
|
7661 |
} else {
|
7662 |
gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag)); |
7663 |
} |
7664 |
break;
|
7665 |
case 4: /* STGI */ |
7666 |
if ((!(s->flags & HF_SVME_MASK) &&
|
7667 |
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || |
7668 |
!s->pe) |
7669 |
goto illegal_op;
|
7670 |
if (s->cpl != 0) { |
7671 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7672 |
break;
|
7673 |
} else {
|
7674 |
gen_helper_stgi(cpu_env); |
7675 |
} |
7676 |
break;
|
7677 |
case 5: /* CLGI */ |
7678 |
if (!(s->flags & HF_SVME_MASK) || !s->pe)
|
7679 |
goto illegal_op;
|
7680 |
if (s->cpl != 0) { |
7681 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7682 |
break;
|
7683 |
} else {
|
7684 |
gen_helper_clgi(cpu_env); |
7685 |
} |
7686 |
break;
|
7687 |
case 6: /* SKINIT */ |
7688 |
if ((!(s->flags & HF_SVME_MASK) &&
|
7689 |
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || |
7690 |
!s->pe) |
7691 |
goto illegal_op;
|
7692 |
gen_helper_skinit(cpu_env); |
7693 |
break;
|
7694 |
case 7: /* INVLPGA */ |
7695 |
if (!(s->flags & HF_SVME_MASK) || !s->pe)
|
7696 |
goto illegal_op;
|
7697 |
if (s->cpl != 0) { |
7698 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7699 |
break;
|
7700 |
} else {
|
7701 |
gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag)); |
7702 |
} |
7703 |
break;
|
7704 |
default:
|
7705 |
goto illegal_op;
|
7706 |
} |
7707 |
} else if (s->cpl != 0) { |
7708 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7709 |
} else {
|
7710 |
gen_svm_check_intercept(s, pc_start, |
7711 |
op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
|
7712 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7713 |
gen_op_ld_T1_A0(OT_WORD + s->mem_index); |
7714 |
gen_add_A0_im(s, 2);
|
7715 |
gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); |
7716 |
if (!s->dflag)
|
7717 |
gen_op_andl_T0_im(0xffffff);
|
7718 |
if (op == 2) { |
7719 |
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
|
7720 |
tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
|
7721 |
} else {
|
7722 |
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
|
7723 |
tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
|
7724 |
} |
7725 |
} |
7726 |
break;
|
7727 |
case 4: /* smsw */ |
7728 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); |
7729 |
#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
|
7730 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); |
7731 |
#else
|
7732 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); |
7733 |
#endif
|
7734 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
|
7735 |
break;
|
7736 |
case 6: /* lmsw */ |
7737 |
if (s->cpl != 0) { |
7738 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7739 |
} else {
|
7740 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); |
7741 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
7742 |
gen_helper_lmsw(cpu_env, cpu_T[0]);
|
7743 |
gen_jmp_im(s->pc - s->cs_base); |
7744 |
gen_eob(s); |
7745 |
} |
7746 |
break;
|
7747 |
case 7: |
7748 |
if (mod != 3) { /* invlpg */ |
7749 |
if (s->cpl != 0) { |
7750 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7751 |
} else {
|
7752 |
gen_update_cc_op(s); |
7753 |
gen_jmp_im(pc_start - s->cs_base); |
7754 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7755 |
gen_helper_invlpg(cpu_env, cpu_A0); |
7756 |
gen_jmp_im(s->pc - s->cs_base); |
7757 |
gen_eob(s); |
7758 |
} |
7759 |
} else {
|
7760 |
switch (rm) {
|
7761 |
case 0: /* swapgs */ |
7762 |
#ifdef TARGET_X86_64
|
7763 |
if (CODE64(s)) {
|
7764 |
if (s->cpl != 0) { |
7765 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7766 |
} else {
|
7767 |
tcg_gen_ld_tl(cpu_T[0], cpu_env,
|
7768 |
offsetof(CPUX86State,segs[R_GS].base)); |
7769 |
tcg_gen_ld_tl(cpu_T[1], cpu_env,
|
7770 |
offsetof(CPUX86State,kernelgsbase)); |
7771 |
tcg_gen_st_tl(cpu_T[1], cpu_env,
|
7772 |
offsetof(CPUX86State,segs[R_GS].base)); |
7773 |
tcg_gen_st_tl(cpu_T[0], cpu_env,
|
7774 |
offsetof(CPUX86State,kernelgsbase)); |
7775 |
} |
7776 |
} else
|
7777 |
#endif
|
7778 |
{ |
7779 |
goto illegal_op;
|
7780 |
} |
7781 |
break;
|
7782 |
case 1: /* rdtscp */ |
7783 |
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
|
7784 |
goto illegal_op;
|
7785 |
gen_update_cc_op(s); |
7786 |
gen_jmp_im(pc_start - s->cs_base); |
7787 |
if (use_icount)
|
7788 |
gen_io_start(); |
7789 |
gen_helper_rdtscp(cpu_env); |
7790 |
if (use_icount) {
|
7791 |
gen_io_end(); |
7792 |
gen_jmp(s, s->pc - s->cs_base); |
7793 |
} |
7794 |
break;
|
7795 |
default:
|
7796 |
goto illegal_op;
|
7797 |
} |
7798 |
} |
7799 |
break;
|
7800 |
default:
|
7801 |
goto illegal_op;
|
7802 |
} |
7803 |
break;
|
7804 |
case 0x108: /* invd */ |
7805 |
case 0x109: /* wbinvd */ |
7806 |
if (s->cpl != 0) { |
7807 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7808 |
} else {
|
7809 |
gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
|
7810 |
/* nothing to do */
|
7811 |
} |
7812 |
break;
|
7813 |
case 0x63: /* arpl or movslS (x86_64) */ |
7814 |
#ifdef TARGET_X86_64
|
7815 |
if (CODE64(s)) {
|
7816 |
int d_ot;
|
7817 |
/* d_ot is the size of destination */
|
7818 |
d_ot = dflag + OT_WORD; |
7819 |
|
7820 |
modrm = cpu_ldub_code(env, s->pc++); |
7821 |
reg = ((modrm >> 3) & 7) | rex_r; |
7822 |
mod = (modrm >> 6) & 3; |
7823 |
rm = (modrm & 7) | REX_B(s);
|
7824 |
|
7825 |
if (mod == 3) { |
7826 |
gen_op_mov_TN_reg(OT_LONG, 0, rm);
|
7827 |
/* sign extend */
|
7828 |
if (d_ot == OT_QUAD)
|
7829 |
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); |
7830 |
gen_op_mov_reg_T0(d_ot, reg); |
7831 |
} else {
|
7832 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7833 |
if (d_ot == OT_QUAD) {
|
7834 |
gen_op_lds_T0_A0(OT_LONG + s->mem_index); |
7835 |
} else {
|
7836 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
7837 |
} |
7838 |
gen_op_mov_reg_T0(d_ot, reg); |
7839 |
} |
7840 |
} else
|
7841 |
#endif
|
7842 |
{ |
7843 |
int label1;
|
7844 |
TCGv t0, t1, t2, a0; |
7845 |
|
7846 |
if (!s->pe || s->vm86)
|
7847 |
goto illegal_op;
|
7848 |
t0 = tcg_temp_local_new(); |
7849 |
t1 = tcg_temp_local_new(); |
7850 |
t2 = tcg_temp_local_new(); |
7851 |
ot = OT_WORD; |
7852 |
modrm = cpu_ldub_code(env, s->pc++); |
7853 |
reg = (modrm >> 3) & 7; |
7854 |
mod = (modrm >> 6) & 3; |
7855 |
rm = modrm & 7;
|
7856 |
if (mod != 3) { |
7857 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7858 |
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); |
7859 |
a0 = tcg_temp_local_new(); |
7860 |
tcg_gen_mov_tl(a0, cpu_A0); |
7861 |
} else {
|
7862 |
gen_op_mov_v_reg(ot, t0, rm); |
7863 |
TCGV_UNUSED(a0); |
7864 |
} |
7865 |
gen_op_mov_v_reg(ot, t1, reg); |
7866 |
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
|
7867 |
tcg_gen_andi_tl(t1, t1, 3);
|
7868 |
tcg_gen_movi_tl(t2, 0);
|
7869 |
label1 = gen_new_label(); |
7870 |
tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); |
7871 |
tcg_gen_andi_tl(t0, t0, ~3);
|
7872 |
tcg_gen_or_tl(t0, t0, t1); |
7873 |
tcg_gen_movi_tl(t2, CC_Z); |
7874 |
gen_set_label(label1); |
7875 |
if (mod != 3) { |
7876 |
gen_op_st_v(ot + s->mem_index, t0, a0); |
7877 |
tcg_temp_free(a0); |
7878 |
} else {
|
7879 |
gen_op_mov_reg_v(ot, rm, t0); |
7880 |
} |
7881 |
gen_compute_eflags(s); |
7882 |
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); |
7883 |
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); |
7884 |
tcg_temp_free(t0); |
7885 |
tcg_temp_free(t1); |
7886 |
tcg_temp_free(t2); |
7887 |
} |
7888 |
break;
|
7889 |
case 0x102: /* lar */ |
7890 |
case 0x103: /* lsl */ |
7891 |
{ |
7892 |
int label1;
|
7893 |
TCGv t0; |
7894 |
if (!s->pe || s->vm86)
|
7895 |
goto illegal_op;
|
7896 |
ot = dflag ? OT_LONG : OT_WORD; |
7897 |
modrm = cpu_ldub_code(env, s->pc++); |
7898 |
reg = ((modrm >> 3) & 7) | rex_r; |
7899 |
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
|
7900 |
t0 = tcg_temp_local_new(); |
7901 |
gen_update_cc_op(s); |
7902 |
if (b == 0x102) { |
7903 |
gen_helper_lar(t0, cpu_env, cpu_T[0]);
|
7904 |
} else {
|
7905 |
gen_helper_lsl(t0, cpu_env, cpu_T[0]);
|
7906 |
} |
7907 |
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); |
7908 |
label1 = gen_new_label(); |
7909 |
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
|
7910 |
gen_op_mov_reg_v(ot, reg, t0); |
7911 |
gen_set_label(label1); |
7912 |
set_cc_op(s, CC_OP_EFLAGS); |
7913 |
tcg_temp_free(t0); |
7914 |
} |
7915 |
break;
|
7916 |
case 0x118: |
7917 |
modrm = cpu_ldub_code(env, s->pc++); |
7918 |
mod = (modrm >> 6) & 3; |
7919 |
op = (modrm >> 3) & 7; |
7920 |
switch(op) {
|
7921 |
case 0: /* prefetchnta */ |
7922 |
case 1: /* prefetchnt0 */ |
7923 |
case 2: /* prefetchnt0 */ |
7924 |
case 3: /* prefetchnt0 */ |
7925 |
if (mod == 3) |
7926 |
goto illegal_op;
|
7927 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
7928 |
/* nothing more to do */
|
7929 |
break;
|
7930 |
default: /* nop (multi byte) */ |
7931 |
gen_nop_modrm(env, s, modrm); |
7932 |
break;
|
7933 |
} |
7934 |
break;
|
7935 |
case 0x119 ... 0x11f: /* nop (multi byte) */ |
7936 |
modrm = cpu_ldub_code(env, s->pc++); |
7937 |
gen_nop_modrm(env, s, modrm); |
7938 |
break;
|
7939 |
case 0x120: /* mov reg, crN */ |
7940 |
case 0x122: /* mov crN, reg */ |
7941 |
if (s->cpl != 0) { |
7942 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7943 |
} else {
|
7944 |
modrm = cpu_ldub_code(env, s->pc++); |
7945 |
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
7946 |
* AMD documentation (24594.pdf) and testing of
|
7947 |
* intel 386 and 486 processors all show that the mod bits
|
7948 |
* are assumed to be 1's, regardless of actual values.
|
7949 |
*/
|
7950 |
rm = (modrm & 7) | REX_B(s);
|
7951 |
reg = ((modrm >> 3) & 7) | rex_r; |
7952 |
if (CODE64(s))
|
7953 |
ot = OT_QUAD; |
7954 |
else
|
7955 |
ot = OT_LONG; |
7956 |
if ((prefixes & PREFIX_LOCK) && (reg == 0) && |
7957 |
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { |
7958 |
reg = 8;
|
7959 |
} |
7960 |
switch(reg) {
|
7961 |
case 0: |
7962 |
case 2: |
7963 |
case 3: |
7964 |
case 4: |
7965 |
case 8: |
7966 |
gen_update_cc_op(s); |
7967 |
gen_jmp_im(pc_start - s->cs_base); |
7968 |
if (b & 2) { |
7969 |
gen_op_mov_TN_reg(ot, 0, rm);
|
7970 |
gen_helper_write_crN(cpu_env, tcg_const_i32(reg), |
7971 |
cpu_T[0]);
|
7972 |
gen_jmp_im(s->pc - s->cs_base); |
7973 |
gen_eob(s); |
7974 |
} else {
|
7975 |
gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
|
7976 |
gen_op_mov_reg_T0(ot, rm); |
7977 |
} |
7978 |
break;
|
7979 |
default:
|
7980 |
goto illegal_op;
|
7981 |
} |
7982 |
} |
7983 |
break;
|
7984 |
case 0x121: /* mov reg, drN */ |
7985 |
case 0x123: /* mov drN, reg */ |
7986 |
if (s->cpl != 0) { |
7987 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
7988 |
} else {
|
7989 |
modrm = cpu_ldub_code(env, s->pc++); |
7990 |
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
7991 |
* AMD documentation (24594.pdf) and testing of
|
7992 |
* intel 386 and 486 processors all show that the mod bits
|
7993 |
* are assumed to be 1's, regardless of actual values.
|
7994 |
*/
|
7995 |
rm = (modrm & 7) | REX_B(s);
|
7996 |
reg = ((modrm >> 3) & 7) | rex_r; |
7997 |
if (CODE64(s))
|
7998 |
ot = OT_QUAD; |
7999 |
else
|
8000 |
ot = OT_LONG; |
8001 |
/* XXX: do it dynamically with CR4.DE bit */
|
8002 |
if (reg == 4 || reg == 5 || reg >= 8) |
8003 |
goto illegal_op;
|
8004 |
if (b & 2) { |
8005 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); |
8006 |
gen_op_mov_TN_reg(ot, 0, rm);
|
8007 |
gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
|
8008 |
gen_jmp_im(s->pc - s->cs_base); |
8009 |
gen_eob(s); |
8010 |
} else {
|
8011 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); |
8012 |
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
|
8013 |
gen_op_mov_reg_T0(ot, rm); |
8014 |
} |
8015 |
} |
8016 |
break;
|
8017 |
case 0x106: /* clts */ |
8018 |
if (s->cpl != 0) { |
8019 |
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
8020 |
} else {
|
8021 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); |
8022 |
gen_helper_clts(cpu_env); |
8023 |
/* abort block because static cpu state changed */
|
8024 |
gen_jmp_im(s->pc - s->cs_base); |
8025 |
gen_eob(s); |
8026 |
} |
8027 |
break;
|
8028 |
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
|
8029 |
case 0x1c3: /* MOVNTI reg, mem */ |
8030 |
if (!(s->cpuid_features & CPUID_SSE2))
|
8031 |
goto illegal_op;
|
8032 |
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
|
8033 |
modrm = cpu_ldub_code(env, s->pc++); |
8034 |
mod = (modrm >> 6) & 3; |
8035 |
if (mod == 3) |
8036 |
goto illegal_op;
|
8037 |
reg = ((modrm >> 3) & 7) | rex_r; |
8038 |
/* generate a generic store */
|
8039 |
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
|
8040 |
break;
|
8041 |
case 0x1ae: |
8042 |
modrm = cpu_ldub_code(env, s->pc++); |
8043 |
mod = (modrm >> 6) & 3; |
8044 |
op = (modrm >> 3) & 7; |
8045 |
switch(op) {
|
8046 |
case 0: /* fxsave */ |
8047 |
if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || |
8048 |
(s->prefix & PREFIX_LOCK)) |
8049 |
goto illegal_op;
|
8050 |
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
8051 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
8052 |
break;
|
8053 |
} |
8054 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
8055 |
gen_update_cc_op(s); |
8056 |
gen_jmp_im(pc_start - s->cs_base); |
8057 |
gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
|
8058 |
break;
|
8059 |
case 1: /* fxrstor */ |
8060 |
if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || |
8061 |
(s->prefix & PREFIX_LOCK)) |
8062 |
goto illegal_op;
|
8063 |
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
8064 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
8065 |
break;
|
8066 |
} |
8067 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
8068 |
gen_update_cc_op(s); |
8069 |
gen_jmp_im(pc_start - s->cs_base); |
8070 |
gen_helper_fxrstor(cpu_env, cpu_A0, |
8071 |
tcg_const_i32((s->dflag == 2)));
|
8072 |
break;
|
8073 |
case 2: /* ldmxcsr */ |
8074 |
case 3: /* stmxcsr */ |
8075 |
if (s->flags & HF_TS_MASK) {
|
8076 |
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); |
8077 |
break;
|
8078 |
} |
8079 |
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
|
8080 |
mod == 3)
|
8081 |
goto illegal_op;
|
8082 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
8083 |
if (op == 2) { |
8084 |
gen_op_ld_T0_A0(OT_LONG + s->mem_index); |
8085 |
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
|
8086 |
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); |
8087 |
} else {
|
8088 |
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
|
8089 |
gen_op_st_T0_A0(OT_LONG + s->mem_index); |
8090 |
} |
8091 |
break;
|
8092 |
case 5: /* lfence */ |
8093 |
case 6: /* mfence */ |
8094 |
if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2)) |
8095 |
goto illegal_op;
|
8096 |
break;
|
8097 |
case 7: /* sfence / clflush */ |
8098 |
if ((modrm & 0xc7) == 0xc0) { |
8099 |
/* sfence */
|
8100 |
/* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
|
8101 |
if (!(s->cpuid_features & CPUID_SSE))
|
8102 |
goto illegal_op;
|
8103 |
} else {
|
8104 |
/* clflush */
|
8105 |
if (!(s->cpuid_features & CPUID_CLFLUSH))
|
8106 |
goto illegal_op;
|
8107 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
8108 |
} |
8109 |
break;
|
8110 |
default:
|
8111 |
goto illegal_op;
|
8112 |
} |
8113 |
break;
|
8114 |
case 0x10d: /* 3DNow! prefetch(w) */ |
8115 |
modrm = cpu_ldub_code(env, s->pc++); |
8116 |
mod = (modrm >> 6) & 3; |
8117 |
if (mod == 3) |
8118 |
goto illegal_op;
|
8119 |
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); |
8120 |
/* ignore for now */
|
8121 |
break;
|
8122 |
case 0x1aa: /* rsm */ |
8123 |
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); |
8124 |
if (!(s->flags & HF_SMM_MASK))
|
8125 |
goto illegal_op;
|
8126 |
gen_update_cc_op(s); |
8127 |
gen_jmp_im(s->pc - s->cs_base); |
8128 |
gen_helper_rsm(cpu_env); |
8129 |
gen_eob(s); |
8130 |
break;
|
8131 |
case 0x1b8: /* SSE4.2 popcnt */ |
8132 |
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
|
8133 |
PREFIX_REPZ) |
8134 |
goto illegal_op;
|
8135 |
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
|
8136 |
goto illegal_op;
|
8137 |
|
8138 |
modrm = cpu_ldub_code(env, s->pc++); |
8139 |
reg = ((modrm >> 3) & 7) | rex_r; |
8140 |
|
8141 |
if (s->prefix & PREFIX_DATA)
|
8142 |
ot = OT_WORD; |
8143 |
else if (s->dflag != 2) |
8144 |
ot = OT_LONG; |
8145 |
else
|
8146 |
ot = OT_QUAD; |
8147 |
|
8148 |
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
8149 |
gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot)); |
8150 |
gen_op_mov_reg_T0(ot, reg); |
8151 |
|
8152 |
set_cc_op(s, CC_OP_EFLAGS); |
8153 |
break;
|
8154 |
case 0x10e ... 0x10f: |
8155 |
/* 3DNow! instructions, ignore prefixes */
|
8156 |
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); |
8157 |
case 0x110 ... 0x117: |
8158 |
case 0x128 ... 0x12f: |
8159 |
case 0x138 ... 0x13a: |
8160 |
case 0x150 ... 0x179: |
8161 |
case 0x17c ... 0x17f: |
8162 |
case 0x1c2: |
8163 |
case 0x1c4 ... 0x1c6: |
8164 |
case 0x1d0 ... 0x1fe: |
8165 |
gen_sse(env, s, b, pc_start, rex_r); |
8166 |
break;
|
8167 |
default:
|
8168 |
goto illegal_op;
|
8169 |
} |
8170 |
/* lock generation */
|
8171 |
if (s->prefix & PREFIX_LOCK)
|
8172 |
gen_helper_unlock(); |
8173 |
return s->pc;
|
8174 |
illegal_op:
|
8175 |
if (s->prefix & PREFIX_LOCK)
|
8176 |
gen_helper_unlock(); |
8177 |
/* XXX: ensure that no lock was generated */
|
8178 |
gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); |
8179 |
return s->pc;
|
8180 |
} |
8181 |
|
8182 |
void optimize_flags_init(void) |
8183 |
{ |
8184 |
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
8185 |
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, |
8186 |
offsetof(CPUX86State, cc_op), "cc_op");
|
8187 |
cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst), |
8188 |
"cc_dst");
|
8189 |
cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src), |
8190 |
"cc_src");
|
8191 |
cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2), |
8192 |
"cc_src2");
|
8193 |
|
8194 |
#ifdef TARGET_X86_64
|
8195 |
cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0, |
8196 |
offsetof(CPUX86State, regs[R_EAX]), "rax");
|
8197 |
cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0, |
8198 |
offsetof(CPUX86State, regs[R_ECX]), "rcx");
|
8199 |
cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0, |
8200 |
offsetof(CPUX86State, regs[R_EDX]), "rdx");
|
8201 |
cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0, |
8202 |
offsetof(CPUX86State, regs[R_EBX]), "rbx");
|
8203 |
cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0, |
8204 |
offsetof(CPUX86State, regs[R_ESP]), "rsp");
|
8205 |
cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0, |
8206 |
offsetof(CPUX86State, regs[R_EBP]), "rbp");
|
8207 |
cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0, |
8208 |
offsetof(CPUX86State, regs[R_ESI]), "rsi");
|
8209 |
cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0, |
8210 |
offsetof(CPUX86State, regs[R_EDI]), "rdi");
|
8211 |
cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
|
8212 |
offsetof(CPUX86State, regs[8]), "r8"); |
8213 |
cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
|
8214 |
offsetof(CPUX86State, regs[9]), "r9"); |
8215 |
cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
|
8216 |
offsetof(CPUX86State, regs[10]), "r10"); |
8217 |
cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
|
8218 |
offsetof(CPUX86State, regs[11]), "r11"); |
8219 |
cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
|
8220 |
offsetof(CPUX86State, regs[12]), "r12"); |
8221 |
cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
|
8222 |
offsetof(CPUX86State, regs[13]), "r13"); |
8223 |
cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
|
8224 |
offsetof(CPUX86State, regs[14]), "r14"); |
8225 |
cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
|
8226 |
offsetof(CPUX86State, regs[15]), "r15"); |
8227 |
#else
|
8228 |
cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0, |
8229 |
offsetof(CPUX86State, regs[R_EAX]), "eax");
|
8230 |
cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0, |
8231 |
offsetof(CPUX86State, regs[R_ECX]), "ecx");
|
8232 |
cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0, |
8233 |
offsetof(CPUX86State, regs[R_EDX]), "edx");
|
8234 |
cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0, |
8235 |
offsetof(CPUX86State, regs[R_EBX]), "ebx");
|
8236 |
cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0, |
8237 |
offsetof(CPUX86State, regs[R_ESP]), "esp");
|
8238 |
cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0, |
8239 |
offsetof(CPUX86State, regs[R_EBP]), "ebp");
|
8240 |
cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0, |
8241 |
offsetof(CPUX86State, regs[R_ESI]), "esi");
|
8242 |
cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0, |
8243 |
offsetof(CPUX86State, regs[R_EDI]), "edi");
|
8244 |
#endif
|
8245 |
|
8246 |
/* register helpers */
|
8247 |
#define GEN_HELPER 2 |
8248 |
#include "helper.h" |
8249 |
} |
8250 |
|
8251 |
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
8252 |
basic block 'tb'. If search_pc is TRUE, also generate PC
|
8253 |
information for each intermediate instruction. */
|
8254 |
static inline void gen_intermediate_code_internal(X86CPU *cpu, |
8255 |
TranslationBlock *tb, |
8256 |
bool search_pc)
|
8257 |
{ |
8258 |
CPUState *cs = CPU(cpu); |
8259 |
CPUX86State *env = &cpu->env; |
8260 |
DisasContext dc1, *dc = &dc1; |
8261 |
target_ulong pc_ptr; |
8262 |
uint16_t *gen_opc_end; |
8263 |
CPUBreakpoint *bp; |
8264 |
int j, lj;
|
8265 |
uint64_t flags; |
8266 |
target_ulong pc_start; |
8267 |
target_ulong cs_base; |
8268 |
int num_insns;
|
8269 |
int max_insns;
|
8270 |
|
8271 |
/* generate intermediate code */
|
8272 |
pc_start = tb->pc; |
8273 |
cs_base = tb->cs_base; |
8274 |
flags = tb->flags; |
8275 |
|
8276 |
dc->pe = (flags >> HF_PE_SHIFT) & 1;
|
8277 |
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
8278 |
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
|
8279 |
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
|
8280 |
dc->f_st = 0;
|
8281 |
dc->vm86 = (flags >> VM_SHIFT) & 1;
|
8282 |
dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
|
8283 |
dc->iopl = (flags >> IOPL_SHIFT) & 3;
|
8284 |
dc->tf = (flags >> TF_SHIFT) & 1;
|
8285 |
dc->singlestep_enabled = cs->singlestep_enabled; |
8286 |
dc->cc_op = CC_OP_DYNAMIC; |
8287 |
dc->cc_op_dirty = false;
|
8288 |
dc->cs_base = cs_base; |
8289 |
dc->tb = tb; |
8290 |
dc->popl_esp_hack = 0;
|
8291 |
/* select memory access functions */
|
8292 |
dc->mem_index = 0;
|
8293 |
if (flags & HF_SOFTMMU_MASK) {
|
8294 |
dc->mem_index = (cpu_mmu_index(env) + 1) << 2; |
8295 |
} |
8296 |
dc->cpuid_features = env->features[FEAT_1_EDX]; |
8297 |
dc->cpuid_ext_features = env->features[FEAT_1_ECX]; |
8298 |
dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; |
8299 |
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; |
8300 |
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; |
8301 |
#ifdef TARGET_X86_64
|
8302 |
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
|
8303 |
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
|
8304 |
#endif
|
8305 |
dc->flags = flags; |
8306 |
dc->jmp_opt = !(dc->tf || cs->singlestep_enabled || |
8307 |
(flags & HF_INHIBIT_IRQ_MASK) |
8308 |
#ifndef CONFIG_SOFTMMU
|
8309 |
|| (flags & HF_SOFTMMU_MASK) |
8310 |
#endif
|
8311 |
); |
8312 |
#if 0
|
8313 |
/* check addseg logic */
|
8314 |
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
|
8315 |
printf("ERROR addseg\n");
|
8316 |
#endif
|
8317 |
|
8318 |
cpu_T[0] = tcg_temp_new();
|
8319 |
cpu_T[1] = tcg_temp_new();
|
8320 |
cpu_A0 = tcg_temp_new(); |
8321 |
|
8322 |
cpu_tmp0 = tcg_temp_new(); |
8323 |
cpu_tmp1_i64 = tcg_temp_new_i64(); |
8324 |
cpu_tmp2_i32 = tcg_temp_new_i32(); |
8325 |
cpu_tmp3_i32 = tcg_temp_new_i32(); |
8326 |
cpu_tmp4 = tcg_temp_new(); |
8327 |
cpu_ptr0 = tcg_temp_new_ptr(); |
8328 |
cpu_ptr1 = tcg_temp_new_ptr(); |
8329 |
cpu_cc_srcT = tcg_temp_local_new(); |
8330 |
|
8331 |
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; |
8332 |
|
8333 |
dc->is_jmp = DISAS_NEXT; |
8334 |
pc_ptr = pc_start; |
8335 |
lj = -1;
|
8336 |
num_insns = 0;
|
8337 |
max_insns = tb->cflags & CF_COUNT_MASK; |
8338 |
if (max_insns == 0) |
8339 |
max_insns = CF_COUNT_MASK; |
8340 |
|
8341 |
gen_tb_start(); |
8342 |
for(;;) {
|
8343 |
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
|
8344 |
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
8345 |
if (bp->pc == pc_ptr &&
|
8346 |
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { |
8347 |
gen_debug(dc, pc_ptr - dc->cs_base); |
8348 |
break;
|
8349 |
} |
8350 |
} |
8351 |
} |
8352 |
if (search_pc) {
|
8353 |
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; |
8354 |
if (lj < j) {
|
8355 |
lj++; |
8356 |
while (lj < j)
|
8357 |
tcg_ctx.gen_opc_instr_start[lj++] = 0;
|
8358 |
} |
8359 |
tcg_ctx.gen_opc_pc[lj] = pc_ptr; |
8360 |
gen_opc_cc_op[lj] = dc->cc_op; |
8361 |
tcg_ctx.gen_opc_instr_start[lj] = 1;
|
8362 |
tcg_ctx.gen_opc_icount[lj] = num_insns; |
8363 |
} |
8364 |
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) |
8365 |
gen_io_start(); |
8366 |
|
8367 |
pc_ptr = disas_insn(env, dc, pc_ptr); |
8368 |
num_insns++; |
8369 |
/* stop translation if indicated */
|
8370 |
if (dc->is_jmp)
|
8371 |
break;
|
8372 |
/* if single step mode, we generate only one instruction and
|
8373 |
generate an exception */
|
8374 |
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
|
8375 |
the flag and abort the translation to give the irqs a
|
8376 |
change to be happen */
|
8377 |
if (dc->tf || dc->singlestep_enabled ||
|
8378 |
(flags & HF_INHIBIT_IRQ_MASK)) { |
8379 |
gen_jmp_im(pc_ptr - dc->cs_base); |
8380 |
gen_eob(dc); |
8381 |
break;
|
8382 |
} |
8383 |
/* if too long translation, stop generation too */
|
8384 |
if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
|
8385 |
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
|
8386 |
num_insns >= max_insns) { |
8387 |
gen_jmp_im(pc_ptr - dc->cs_base); |
8388 |
gen_eob(dc); |
8389 |
break;
|
8390 |
} |
8391 |
if (singlestep) {
|
8392 |
gen_jmp_im(pc_ptr - dc->cs_base); |
8393 |
gen_eob(dc); |
8394 |
break;
|
8395 |
} |
8396 |
} |
8397 |
if (tb->cflags & CF_LAST_IO)
|
8398 |
gen_io_end(); |
8399 |
gen_tb_end(tb, num_insns); |
8400 |
*tcg_ctx.gen_opc_ptr = INDEX_op_end; |
8401 |
/* we don't forget to fill the last values */
|
8402 |
if (search_pc) {
|
8403 |
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; |
8404 |
lj++; |
8405 |
while (lj <= j)
|
8406 |
tcg_ctx.gen_opc_instr_start[lj++] = 0;
|
8407 |
} |
8408 |
|
8409 |
#ifdef DEBUG_DISAS
|
8410 |
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
|
8411 |
int disas_flags;
|
8412 |
qemu_log("----------------\n");
|
8413 |
qemu_log("IN: %s\n", lookup_symbol(pc_start));
|
8414 |
#ifdef TARGET_X86_64
|
8415 |
if (dc->code64)
|
8416 |
disas_flags = 2;
|
8417 |
else
|
8418 |
#endif
|
8419 |
disas_flags = !dc->code32; |
8420 |
log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags); |
8421 |
qemu_log("\n");
|
8422 |
} |
8423 |
#endif
|
8424 |
|
8425 |
if (!search_pc) {
|
8426 |
tb->size = pc_ptr - pc_start; |
8427 |
tb->icount = num_insns; |
8428 |
} |
8429 |
} |
8430 |
|
8431 |
void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
|
8432 |
{ |
8433 |
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
|
8434 |
} |
8435 |
|
8436 |
void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
|
8437 |
{ |
8438 |
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
|
8439 |
} |
8440 |
|
8441 |
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos) |
8442 |
{ |
8443 |
int cc_op;
|
8444 |
#ifdef DEBUG_DISAS
|
8445 |
if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
|
8446 |
int i;
|
8447 |
qemu_log("RESTORE:\n");
|
8448 |
for(i = 0;i <= pc_pos; i++) { |
8449 |
if (tcg_ctx.gen_opc_instr_start[i]) {
|
8450 |
qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, |
8451 |
tcg_ctx.gen_opc_pc[i]); |
8452 |
} |
8453 |
} |
8454 |
qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n", |
8455 |
pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base, |
8456 |
(uint32_t)tb->cs_base); |
8457 |
} |
8458 |
#endif
|
8459 |
env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base; |
8460 |
cc_op = gen_opc_cc_op[pc_pos]; |
8461 |
if (cc_op != CC_OP_DYNAMIC)
|
8462 |
env->cc_op = cc_op; |
8463 |
} |