root / tcg / i386 / tcg-target.c @ b03cce8e
History | View | Annotate | Download (34.7 kB)
1 |
/*
|
---|---|
2 |
* Tiny Code Generator for QEMU
|
3 |
*
|
4 |
* Copyright (c) 2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
25 |
"%eax",
|
26 |
"%ecx",
|
27 |
"%edx",
|
28 |
"%ebx",
|
29 |
"%esp",
|
30 |
"%ebp",
|
31 |
"%esi",
|
32 |
"%edi",
|
33 |
}; |
34 |
|
35 |
int tcg_target_reg_alloc_order[] = {
|
36 |
TCG_REG_EAX, |
37 |
TCG_REG_EDX, |
38 |
TCG_REG_ECX, |
39 |
TCG_REG_EBX, |
40 |
TCG_REG_ESI, |
41 |
TCG_REG_EDI, |
42 |
TCG_REG_EBP, |
43 |
TCG_REG_ESP, |
44 |
}; |
45 |
|
46 |
const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX }; |
47 |
const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX }; |
48 |
|
49 |
static uint8_t *tb_ret_addr;
|
50 |
|
51 |
static void patch_reloc(uint8_t *code_ptr, int type, |
52 |
tcg_target_long value, tcg_target_long addend) |
53 |
{ |
54 |
value += addend; |
55 |
switch(type) {
|
56 |
case R_386_32:
|
57 |
*(uint32_t *)code_ptr = value; |
58 |
break;
|
59 |
case R_386_PC32:
|
60 |
*(uint32_t *)code_ptr = value - (long)code_ptr;
|
61 |
break;
|
62 |
default:
|
63 |
tcg_abort(); |
64 |
} |
65 |
} |
66 |
|
67 |
/* maximum number of register used for input function arguments */
|
68 |
static inline int tcg_target_get_call_iarg_regs_count(int flags) |
69 |
{ |
70 |
flags &= TCG_CALL_TYPE_MASK; |
71 |
switch(flags) {
|
72 |
case TCG_CALL_TYPE_STD:
|
73 |
return 0; |
74 |
case TCG_CALL_TYPE_REGPARM_1:
|
75 |
case TCG_CALL_TYPE_REGPARM_2:
|
76 |
case TCG_CALL_TYPE_REGPARM:
|
77 |
return flags - TCG_CALL_TYPE_REGPARM_1 + 1; |
78 |
default:
|
79 |
tcg_abort(); |
80 |
} |
81 |
} |
82 |
|
83 |
/* parse target specific constraints */
|
84 |
int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
85 |
{ |
86 |
const char *ct_str; |
87 |
|
88 |
ct_str = *pct_str; |
89 |
switch(ct_str[0]) { |
90 |
case 'a': |
91 |
ct->ct |= TCG_CT_REG; |
92 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); |
93 |
break;
|
94 |
case 'b': |
95 |
ct->ct |= TCG_CT_REG; |
96 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); |
97 |
break;
|
98 |
case 'c': |
99 |
ct->ct |= TCG_CT_REG; |
100 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); |
101 |
break;
|
102 |
case 'd': |
103 |
ct->ct |= TCG_CT_REG; |
104 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); |
105 |
break;
|
106 |
case 'S': |
107 |
ct->ct |= TCG_CT_REG; |
108 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); |
109 |
break;
|
110 |
case 'D': |
111 |
ct->ct |= TCG_CT_REG; |
112 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); |
113 |
break;
|
114 |
case 'q': |
115 |
ct->ct |= TCG_CT_REG; |
116 |
tcg_regset_set32(ct->u.regs, 0, 0xf); |
117 |
break;
|
118 |
case 'r': |
119 |
ct->ct |= TCG_CT_REG; |
120 |
tcg_regset_set32(ct->u.regs, 0, 0xff); |
121 |
break;
|
122 |
|
123 |
/* qemu_ld/st address constraint */
|
124 |
case 'L': |
125 |
ct->ct |= TCG_CT_REG; |
126 |
tcg_regset_set32(ct->u.regs, 0, 0xff); |
127 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); |
128 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX); |
129 |
break;
|
130 |
default:
|
131 |
return -1; |
132 |
} |
133 |
ct_str++; |
134 |
*pct_str = ct_str; |
135 |
return 0; |
136 |
} |
137 |
|
138 |
/* test if a constant matches the constraint */
|
139 |
static inline int tcg_target_const_match(tcg_target_long val, |
140 |
const TCGArgConstraint *arg_ct)
|
141 |
{ |
142 |
int ct;
|
143 |
ct = arg_ct->ct; |
144 |
if (ct & TCG_CT_CONST)
|
145 |
return 1; |
146 |
else
|
147 |
return 0; |
148 |
} |
149 |
|
150 |
#define ARITH_ADD 0 |
151 |
#define ARITH_OR 1 |
152 |
#define ARITH_ADC 2 |
153 |
#define ARITH_SBB 3 |
154 |
#define ARITH_AND 4 |
155 |
#define ARITH_SUB 5 |
156 |
#define ARITH_XOR 6 |
157 |
#define ARITH_CMP 7 |
158 |
|
159 |
#define SHIFT_SHL 4 |
160 |
#define SHIFT_SHR 5 |
161 |
#define SHIFT_SAR 7 |
162 |
|
163 |
#define JCC_JMP (-1) |
164 |
#define JCC_JO 0x0 |
165 |
#define JCC_JNO 0x1 |
166 |
#define JCC_JB 0x2 |
167 |
#define JCC_JAE 0x3 |
168 |
#define JCC_JE 0x4 |
169 |
#define JCC_JNE 0x5 |
170 |
#define JCC_JBE 0x6 |
171 |
#define JCC_JA 0x7 |
172 |
#define JCC_JS 0x8 |
173 |
#define JCC_JNS 0x9 |
174 |
#define JCC_JP 0xa |
175 |
#define JCC_JNP 0xb |
176 |
#define JCC_JL 0xc |
177 |
#define JCC_JGE 0xd |
178 |
#define JCC_JLE 0xe |
179 |
#define JCC_JG 0xf |
180 |
|
181 |
#define P_EXT 0x100 /* 0x0f opcode prefix */ |
182 |
|
183 |
static const uint8_t tcg_cond_to_jcc[10] = { |
184 |
[TCG_COND_EQ] = JCC_JE, |
185 |
[TCG_COND_NE] = JCC_JNE, |
186 |
[TCG_COND_LT] = JCC_JL, |
187 |
[TCG_COND_GE] = JCC_JGE, |
188 |
[TCG_COND_LE] = JCC_JLE, |
189 |
[TCG_COND_GT] = JCC_JG, |
190 |
[TCG_COND_LTU] = JCC_JB, |
191 |
[TCG_COND_GEU] = JCC_JAE, |
192 |
[TCG_COND_LEU] = JCC_JBE, |
193 |
[TCG_COND_GTU] = JCC_JA, |
194 |
}; |
195 |
|
196 |
static inline void tcg_out_opc(TCGContext *s, int opc) |
197 |
{ |
198 |
if (opc & P_EXT)
|
199 |
tcg_out8(s, 0x0f);
|
200 |
tcg_out8(s, opc); |
201 |
} |
202 |
|
203 |
static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) |
204 |
{ |
205 |
tcg_out_opc(s, opc); |
206 |
tcg_out8(s, 0xc0 | (r << 3) | rm); |
207 |
} |
208 |
|
209 |
/* rm == -1 means no register index */
|
210 |
static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, |
211 |
int32_t offset) |
212 |
{ |
213 |
tcg_out_opc(s, opc); |
214 |
if (rm == -1) { |
215 |
tcg_out8(s, 0x05 | (r << 3)); |
216 |
tcg_out32(s, offset); |
217 |
} else if (offset == 0 && rm != TCG_REG_EBP) { |
218 |
if (rm == TCG_REG_ESP) {
|
219 |
tcg_out8(s, 0x04 | (r << 3)); |
220 |
tcg_out8(s, 0x24);
|
221 |
} else {
|
222 |
tcg_out8(s, 0x00 | (r << 3) | rm); |
223 |
} |
224 |
} else if ((int8_t)offset == offset) { |
225 |
if (rm == TCG_REG_ESP) {
|
226 |
tcg_out8(s, 0x44 | (r << 3)); |
227 |
tcg_out8(s, 0x24);
|
228 |
} else {
|
229 |
tcg_out8(s, 0x40 | (r << 3) | rm); |
230 |
} |
231 |
tcg_out8(s, offset); |
232 |
} else {
|
233 |
if (rm == TCG_REG_ESP) {
|
234 |
tcg_out8(s, 0x84 | (r << 3)); |
235 |
tcg_out8(s, 0x24);
|
236 |
} else {
|
237 |
tcg_out8(s, 0x80 | (r << 3) | rm); |
238 |
} |
239 |
tcg_out32(s, offset); |
240 |
} |
241 |
} |
242 |
|
243 |
static inline void tcg_out_mov(TCGContext *s, int ret, int arg) |
244 |
{ |
245 |
if (arg != ret)
|
246 |
tcg_out_modrm(s, 0x8b, ret, arg);
|
247 |
} |
248 |
|
249 |
static inline void tcg_out_movi(TCGContext *s, TCGType type, |
250 |
int ret, int32_t arg)
|
251 |
{ |
252 |
if (arg == 0) { |
253 |
/* xor r0,r0 */
|
254 |
tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); |
255 |
} else {
|
256 |
tcg_out8(s, 0xb8 + ret);
|
257 |
tcg_out32(s, arg); |
258 |
} |
259 |
} |
260 |
|
261 |
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, |
262 |
int arg1, tcg_target_long arg2)
|
263 |
{ |
264 |
/* movl */
|
265 |
tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);
|
266 |
} |
267 |
|
268 |
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, |
269 |
int arg1, tcg_target_long arg2)
|
270 |
{ |
271 |
/* movl */
|
272 |
tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2);
|
273 |
} |
274 |
|
275 |
static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val) |
276 |
{ |
277 |
if (val == (int8_t)val) {
|
278 |
tcg_out_modrm(s, 0x83, c, r0);
|
279 |
tcg_out8(s, val); |
280 |
} else {
|
281 |
tcg_out_modrm(s, 0x81, c, r0);
|
282 |
tcg_out32(s, val); |
283 |
} |
284 |
} |
285 |
|
286 |
void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
287 |
{ |
288 |
if (val != 0) |
289 |
tgen_arithi(s, ARITH_ADD, reg, val); |
290 |
} |
291 |
|
292 |
static void tcg_out_jxx(TCGContext *s, int opc, int label_index) |
293 |
{ |
294 |
int32_t val, val1; |
295 |
TCGLabel *l = &s->labels[label_index]; |
296 |
|
297 |
if (l->has_value) {
|
298 |
val = l->u.value - (tcg_target_long)s->code_ptr; |
299 |
val1 = val - 2;
|
300 |
if ((int8_t)val1 == val1) {
|
301 |
if (opc == -1) |
302 |
tcg_out8(s, 0xeb);
|
303 |
else
|
304 |
tcg_out8(s, 0x70 + opc);
|
305 |
tcg_out8(s, val1); |
306 |
} else {
|
307 |
if (opc == -1) { |
308 |
tcg_out8(s, 0xe9);
|
309 |
tcg_out32(s, val - 5);
|
310 |
} else {
|
311 |
tcg_out8(s, 0x0f);
|
312 |
tcg_out8(s, 0x80 + opc);
|
313 |
tcg_out32(s, val - 6);
|
314 |
} |
315 |
} |
316 |
} else {
|
317 |
if (opc == -1) { |
318 |
tcg_out8(s, 0xe9);
|
319 |
} else {
|
320 |
tcg_out8(s, 0x0f);
|
321 |
tcg_out8(s, 0x80 + opc);
|
322 |
} |
323 |
tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
|
324 |
s->code_ptr += 4;
|
325 |
} |
326 |
} |
327 |
|
328 |
static void tcg_out_brcond(TCGContext *s, int cond, |
329 |
TCGArg arg1, TCGArg arg2, int const_arg2,
|
330 |
int label_index)
|
331 |
{ |
332 |
int c;
|
333 |
if (const_arg2) {
|
334 |
if (arg2 == 0) { |
335 |
/* use test */
|
336 |
switch(cond) {
|
337 |
case TCG_COND_EQ:
|
338 |
c = JCC_JE; |
339 |
break;
|
340 |
case TCG_COND_NE:
|
341 |
c = JCC_JNE; |
342 |
break;
|
343 |
case TCG_COND_LT:
|
344 |
c = JCC_JS; |
345 |
break;
|
346 |
case TCG_COND_GE:
|
347 |
c = JCC_JNS; |
348 |
break;
|
349 |
default:
|
350 |
goto do_cmpi;
|
351 |
} |
352 |
/* test r, r */
|
353 |
tcg_out_modrm(s, 0x85, arg1, arg1);
|
354 |
tcg_out_jxx(s, c, label_index); |
355 |
} else {
|
356 |
do_cmpi:
|
357 |
tgen_arithi(s, ARITH_CMP, arg1, arg2); |
358 |
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); |
359 |
} |
360 |
} else {
|
361 |
tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1); |
362 |
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); |
363 |
} |
364 |
} |
365 |
|
366 |
/* XXX: we implement it at the target level to avoid having to
|
367 |
handle cross basic blocks temporaries */
|
368 |
static void tcg_out_brcond2(TCGContext *s, |
369 |
const TCGArg *args, const int *const_args) |
370 |
{ |
371 |
int label_next;
|
372 |
label_next = gen_new_label(); |
373 |
switch(args[4]) { |
374 |
case TCG_COND_EQ:
|
375 |
tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next); |
376 |
tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]); |
377 |
break;
|
378 |
case TCG_COND_NE:
|
379 |
tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]); |
380 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]); |
381 |
break;
|
382 |
case TCG_COND_LT:
|
383 |
tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); |
384 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
385 |
tcg_out_brcond(s, TCG_COND_LT, args[0], args[2], const_args[2], args[5]); |
386 |
break;
|
387 |
case TCG_COND_LE:
|
388 |
tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); |
389 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
390 |
tcg_out_brcond(s, TCG_COND_LE, args[0], args[2], const_args[2], args[5]); |
391 |
break;
|
392 |
case TCG_COND_GT:
|
393 |
tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); |
394 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
395 |
tcg_out_brcond(s, TCG_COND_GT, args[0], args[2], const_args[2], args[5]); |
396 |
break;
|
397 |
case TCG_COND_GE:
|
398 |
tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); |
399 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
400 |
tcg_out_brcond(s, TCG_COND_GE, args[0], args[2], const_args[2], args[5]); |
401 |
break;
|
402 |
case TCG_COND_LTU:
|
403 |
tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); |
404 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
405 |
tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]); |
406 |
break;
|
407 |
case TCG_COND_LEU:
|
408 |
tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); |
409 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
410 |
tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]); |
411 |
break;
|
412 |
case TCG_COND_GTU:
|
413 |
tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); |
414 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
415 |
tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]); |
416 |
break;
|
417 |
case TCG_COND_GEU:
|
418 |
tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); |
419 |
tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next); |
420 |
tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]); |
421 |
break;
|
422 |
default:
|
423 |
tcg_abort(); |
424 |
} |
425 |
tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr); |
426 |
} |
427 |
|
428 |
#if defined(CONFIG_SOFTMMU)
|
429 |
extern void __ldb_mmu(void); |
430 |
extern void __ldw_mmu(void); |
431 |
extern void __ldl_mmu(void); |
432 |
extern void __ldq_mmu(void); |
433 |
|
434 |
extern void __stb_mmu(void); |
435 |
extern void __stw_mmu(void); |
436 |
extern void __stl_mmu(void); |
437 |
extern void __stq_mmu(void); |
438 |
|
439 |
static void *qemu_ld_helpers[4] = { |
440 |
__ldb_mmu, |
441 |
__ldw_mmu, |
442 |
__ldl_mmu, |
443 |
__ldq_mmu, |
444 |
}; |
445 |
|
446 |
static void *qemu_st_helpers[4] = { |
447 |
__stb_mmu, |
448 |
__stw_mmu, |
449 |
__stl_mmu, |
450 |
__stq_mmu, |
451 |
}; |
452 |
#endif
|
453 |
|
454 |
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
|
455 |
EAX. It will be useful once fixed registers globals are less
|
456 |
common. */
|
457 |
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, |
458 |
int opc)
|
459 |
{ |
460 |
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
|
461 |
#if defined(CONFIG_SOFTMMU)
|
462 |
uint8_t *label1_ptr, *label2_ptr; |
463 |
#endif
|
464 |
#if TARGET_LONG_BITS == 64 |
465 |
#if defined(CONFIG_SOFTMMU)
|
466 |
uint8_t *label3_ptr; |
467 |
#endif
|
468 |
int addr_reg2;
|
469 |
#endif
|
470 |
|
471 |
data_reg = *args++; |
472 |
if (opc == 3) |
473 |
data_reg2 = *args++; |
474 |
else
|
475 |
data_reg2 = 0;
|
476 |
addr_reg = *args++; |
477 |
#if TARGET_LONG_BITS == 64 |
478 |
addr_reg2 = *args++; |
479 |
#endif
|
480 |
mem_index = *args; |
481 |
s_bits = opc & 3;
|
482 |
|
483 |
r0 = TCG_REG_EAX; |
484 |
r1 = TCG_REG_EDX; |
485 |
|
486 |
#if defined(CONFIG_SOFTMMU)
|
487 |
tcg_out_mov(s, r1, addr_reg); |
488 |
|
489 |
tcg_out_mov(s, r0, addr_reg); |
490 |
|
491 |
tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ |
492 |
tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); |
493 |
|
494 |
tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ |
495 |
tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); |
496 |
|
497 |
tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ |
498 |
tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
|
499 |
|
500 |
tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ |
501 |
tcg_out8(s, 0x80 | (r1 << 3) | 0x04); |
502 |
tcg_out8(s, (5 << 3) | r1); |
503 |
tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
|
504 |
|
505 |
/* cmp 0(r1), r0 */
|
506 |
tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); |
507 |
|
508 |
tcg_out_mov(s, r0, addr_reg); |
509 |
|
510 |
#if TARGET_LONG_BITS == 32 |
511 |
/* je label1 */
|
512 |
tcg_out8(s, 0x70 + JCC_JE);
|
513 |
label1_ptr = s->code_ptr; |
514 |
s->code_ptr++; |
515 |
#else
|
516 |
/* jne label3 */
|
517 |
tcg_out8(s, 0x70 + JCC_JNE);
|
518 |
label3_ptr = s->code_ptr; |
519 |
s->code_ptr++; |
520 |
|
521 |
/* cmp 4(r1), addr_reg2 */
|
522 |
tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); |
523 |
|
524 |
/* je label1 */
|
525 |
tcg_out8(s, 0x70 + JCC_JE);
|
526 |
label1_ptr = s->code_ptr; |
527 |
s->code_ptr++; |
528 |
|
529 |
/* label3: */
|
530 |
*label3_ptr = s->code_ptr - label3_ptr - 1;
|
531 |
#endif
|
532 |
|
533 |
/* XXX: move that code at the end of the TB */
|
534 |
#if TARGET_LONG_BITS == 32 |
535 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index); |
536 |
#else
|
537 |
tcg_out_mov(s, TCG_REG_EDX, addr_reg2); |
538 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); |
539 |
#endif
|
540 |
tcg_out8(s, 0xe8);
|
541 |
tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] - |
542 |
(tcg_target_long)s->code_ptr - 4);
|
543 |
|
544 |
switch(opc) {
|
545 |
case 0 | 4: |
546 |
/* movsbl */
|
547 |
tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX);
|
548 |
break;
|
549 |
case 1 | 4: |
550 |
/* movswl */
|
551 |
tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX);
|
552 |
break;
|
553 |
case 0: |
554 |
case 1: |
555 |
case 2: |
556 |
default:
|
557 |
tcg_out_mov(s, data_reg, TCG_REG_EAX); |
558 |
break;
|
559 |
case 3: |
560 |
if (data_reg == TCG_REG_EDX) {
|
561 |
tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */ |
562 |
tcg_out_mov(s, data_reg2, TCG_REG_EAX); |
563 |
} else {
|
564 |
tcg_out_mov(s, data_reg, TCG_REG_EAX); |
565 |
tcg_out_mov(s, data_reg2, TCG_REG_EDX); |
566 |
} |
567 |
break;
|
568 |
} |
569 |
|
570 |
/* jmp label2 */
|
571 |
tcg_out8(s, 0xeb);
|
572 |
label2_ptr = s->code_ptr; |
573 |
s->code_ptr++; |
574 |
|
575 |
/* label1: */
|
576 |
*label1_ptr = s->code_ptr - label1_ptr - 1;
|
577 |
|
578 |
/* add x(r1), r0 */
|
579 |
tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
|
580 |
offsetof(CPUTLBEntry, addr_read)); |
581 |
#else
|
582 |
r0 = addr_reg; |
583 |
#endif
|
584 |
|
585 |
#ifdef TARGET_WORDS_BIGENDIAN
|
586 |
bswap = 1;
|
587 |
#else
|
588 |
bswap = 0;
|
589 |
#endif
|
590 |
switch(opc) {
|
591 |
case 0: |
592 |
/* movzbl */
|
593 |
tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0); |
594 |
break;
|
595 |
case 0 | 4: |
596 |
/* movsbl */
|
597 |
tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0); |
598 |
break;
|
599 |
case 1: |
600 |
/* movzwl */
|
601 |
tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0); |
602 |
if (bswap) {
|
603 |
/* rolw $8, data_reg */
|
604 |
tcg_out8(s, 0x66);
|
605 |
tcg_out_modrm(s, 0xc1, 0, data_reg); |
606 |
tcg_out8(s, 8);
|
607 |
} |
608 |
break;
|
609 |
case 1 | 4: |
610 |
/* movswl */
|
611 |
tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0); |
612 |
if (bswap) {
|
613 |
/* rolw $8, data_reg */
|
614 |
tcg_out8(s, 0x66);
|
615 |
tcg_out_modrm(s, 0xc1, 0, data_reg); |
616 |
tcg_out8(s, 8);
|
617 |
|
618 |
/* movswl data_reg, data_reg */
|
619 |
tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg);
|
620 |
} |
621 |
break;
|
622 |
case 2: |
623 |
/* movl (r0), data_reg */
|
624 |
tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); |
625 |
if (bswap) {
|
626 |
/* bswap */
|
627 |
tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
|
628 |
} |
629 |
break;
|
630 |
case 3: |
631 |
/* XXX: could be nicer */
|
632 |
if (r0 == data_reg) {
|
633 |
r1 = TCG_REG_EDX; |
634 |
if (r1 == data_reg)
|
635 |
r1 = TCG_REG_EAX; |
636 |
tcg_out_mov(s, r1, r0); |
637 |
r0 = r1; |
638 |
} |
639 |
if (!bswap) {
|
640 |
tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); |
641 |
tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4); |
642 |
} else {
|
643 |
tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4); |
644 |
tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
|
645 |
|
646 |
tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0); |
647 |
/* bswap */
|
648 |
tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
|
649 |
} |
650 |
break;
|
651 |
default:
|
652 |
tcg_abort(); |
653 |
} |
654 |
|
655 |
#if defined(CONFIG_SOFTMMU)
|
656 |
/* label2: */
|
657 |
*label2_ptr = s->code_ptr - label2_ptr - 1;
|
658 |
#endif
|
659 |
} |
660 |
|
661 |
|
662 |
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, |
663 |
int opc)
|
664 |
{ |
665 |
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
|
666 |
#if defined(CONFIG_SOFTMMU)
|
667 |
uint8_t *label1_ptr, *label2_ptr; |
668 |
#endif
|
669 |
#if TARGET_LONG_BITS == 64 |
670 |
#if defined(CONFIG_SOFTMMU)
|
671 |
uint8_t *label3_ptr; |
672 |
#endif
|
673 |
int addr_reg2;
|
674 |
#endif
|
675 |
|
676 |
data_reg = *args++; |
677 |
if (opc == 3) |
678 |
data_reg2 = *args++; |
679 |
else
|
680 |
data_reg2 = 0;
|
681 |
addr_reg = *args++; |
682 |
#if TARGET_LONG_BITS == 64 |
683 |
addr_reg2 = *args++; |
684 |
#endif
|
685 |
mem_index = *args; |
686 |
|
687 |
s_bits = opc; |
688 |
|
689 |
r0 = TCG_REG_EAX; |
690 |
r1 = TCG_REG_EDX; |
691 |
|
692 |
#if defined(CONFIG_SOFTMMU)
|
693 |
tcg_out_mov(s, r1, addr_reg); |
694 |
|
695 |
tcg_out_mov(s, r0, addr_reg); |
696 |
|
697 |
tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ |
698 |
tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); |
699 |
|
700 |
tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ |
701 |
tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); |
702 |
|
703 |
tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ |
704 |
tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
|
705 |
|
706 |
tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ |
707 |
tcg_out8(s, 0x80 | (r1 << 3) | 0x04); |
708 |
tcg_out8(s, (5 << 3) | r1); |
709 |
tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
|
710 |
|
711 |
/* cmp 0(r1), r0 */
|
712 |
tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); |
713 |
|
714 |
tcg_out_mov(s, r0, addr_reg); |
715 |
|
716 |
#if TARGET_LONG_BITS == 32 |
717 |
/* je label1 */
|
718 |
tcg_out8(s, 0x70 + JCC_JE);
|
719 |
label1_ptr = s->code_ptr; |
720 |
s->code_ptr++; |
721 |
#else
|
722 |
/* jne label3 */
|
723 |
tcg_out8(s, 0x70 + JCC_JNE);
|
724 |
label3_ptr = s->code_ptr; |
725 |
s->code_ptr++; |
726 |
|
727 |
/* cmp 4(r1), addr_reg2 */
|
728 |
tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); |
729 |
|
730 |
/* je label1 */
|
731 |
tcg_out8(s, 0x70 + JCC_JE);
|
732 |
label1_ptr = s->code_ptr; |
733 |
s->code_ptr++; |
734 |
|
735 |
/* label3: */
|
736 |
*label3_ptr = s->code_ptr - label3_ptr - 1;
|
737 |
#endif
|
738 |
|
739 |
/* XXX: move that code at the end of the TB */
|
740 |
#if TARGET_LONG_BITS == 32 |
741 |
if (opc == 3) { |
742 |
tcg_out_mov(s, TCG_REG_EDX, data_reg); |
743 |
tcg_out_mov(s, TCG_REG_ECX, data_reg2); |
744 |
tcg_out8(s, 0x6a); /* push Ib */ |
745 |
tcg_out8(s, mem_index); |
746 |
tcg_out8(s, 0xe8);
|
747 |
tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - |
748 |
(tcg_target_long)s->code_ptr - 4);
|
749 |
tcg_out_addi(s, TCG_REG_ESP, 4);
|
750 |
} else {
|
751 |
switch(opc) {
|
752 |
case 0: |
753 |
/* movzbl */
|
754 |
tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg);
|
755 |
break;
|
756 |
case 1: |
757 |
/* movzwl */
|
758 |
tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg);
|
759 |
break;
|
760 |
case 2: |
761 |
tcg_out_mov(s, TCG_REG_EDX, data_reg); |
762 |
break;
|
763 |
} |
764 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); |
765 |
tcg_out8(s, 0xe8);
|
766 |
tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - |
767 |
(tcg_target_long)s->code_ptr - 4);
|
768 |
} |
769 |
#else
|
770 |
if (opc == 3) { |
771 |
tcg_out_mov(s, TCG_REG_EDX, addr_reg2); |
772 |
tcg_out8(s, 0x6a); /* push Ib */ |
773 |
tcg_out8(s, mem_index); |
774 |
tcg_out_opc(s, 0x50 + data_reg2); /* push */ |
775 |
tcg_out_opc(s, 0x50 + data_reg); /* push */ |
776 |
tcg_out8(s, 0xe8);
|
777 |
tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - |
778 |
(tcg_target_long)s->code_ptr - 4);
|
779 |
tcg_out_addi(s, TCG_REG_ESP, 12);
|
780 |
} else {
|
781 |
tcg_out_mov(s, TCG_REG_EDX, addr_reg2); |
782 |
switch(opc) {
|
783 |
case 0: |
784 |
/* movzbl */
|
785 |
tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg);
|
786 |
break;
|
787 |
case 1: |
788 |
/* movzwl */
|
789 |
tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg);
|
790 |
break;
|
791 |
case 2: |
792 |
tcg_out_mov(s, TCG_REG_ECX, data_reg); |
793 |
break;
|
794 |
} |
795 |
tcg_out8(s, 0x6a); /* push Ib */ |
796 |
tcg_out8(s, mem_index); |
797 |
tcg_out8(s, 0xe8);
|
798 |
tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - |
799 |
(tcg_target_long)s->code_ptr - 4);
|
800 |
tcg_out_addi(s, TCG_REG_ESP, 4);
|
801 |
} |
802 |
#endif
|
803 |
|
804 |
/* jmp label2 */
|
805 |
tcg_out8(s, 0xeb);
|
806 |
label2_ptr = s->code_ptr; |
807 |
s->code_ptr++; |
808 |
|
809 |
/* label1: */
|
810 |
*label1_ptr = s->code_ptr - label1_ptr - 1;
|
811 |
|
812 |
/* add x(r1), r0 */
|
813 |
tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
|
814 |
offsetof(CPUTLBEntry, addr_write)); |
815 |
#else
|
816 |
r0 = addr_reg; |
817 |
#endif
|
818 |
|
819 |
#ifdef TARGET_WORDS_BIGENDIAN
|
820 |
bswap = 1;
|
821 |
#else
|
822 |
bswap = 0;
|
823 |
#endif
|
824 |
switch(opc) {
|
825 |
case 0: |
826 |
/* movb */
|
827 |
tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0); |
828 |
break;
|
829 |
case 1: |
830 |
if (bswap) {
|
831 |
tcg_out_mov(s, r1, data_reg); |
832 |
tcg_out8(s, 0x66); /* rolw $8, %ecx */ |
833 |
tcg_out_modrm(s, 0xc1, 0, r1); |
834 |
tcg_out8(s, 8);
|
835 |
data_reg = r1; |
836 |
} |
837 |
/* movw */
|
838 |
tcg_out8(s, 0x66);
|
839 |
tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); |
840 |
break;
|
841 |
case 2: |
842 |
if (bswap) {
|
843 |
tcg_out_mov(s, r1, data_reg); |
844 |
/* bswap data_reg */
|
845 |
tcg_out_opc(s, (0xc8 + r1) | P_EXT);
|
846 |
data_reg = r1; |
847 |
} |
848 |
/* movl */
|
849 |
tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); |
850 |
break;
|
851 |
case 3: |
852 |
if (bswap) {
|
853 |
tcg_out_mov(s, r1, data_reg2); |
854 |
/* bswap data_reg */
|
855 |
tcg_out_opc(s, (0xc8 + r1) | P_EXT);
|
856 |
tcg_out_modrm_offset(s, 0x89, r1, r0, 0); |
857 |
tcg_out_mov(s, r1, data_reg); |
858 |
/* bswap data_reg */
|
859 |
tcg_out_opc(s, (0xc8 + r1) | P_EXT);
|
860 |
tcg_out_modrm_offset(s, 0x89, r1, r0, 4); |
861 |
} else {
|
862 |
tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); |
863 |
tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4); |
864 |
} |
865 |
break;
|
866 |
default:
|
867 |
tcg_abort(); |
868 |
} |
869 |
|
870 |
#if defined(CONFIG_SOFTMMU)
|
871 |
/* label2: */
|
872 |
*label2_ptr = s->code_ptr - label2_ptr - 1;
|
873 |
#endif
|
874 |
} |
875 |
|
876 |
static inline void tcg_out_op(TCGContext *s, int opc, |
877 |
const TCGArg *args, const int *const_args) |
878 |
{ |
879 |
int c;
|
880 |
|
881 |
switch(opc) {
|
882 |
case INDEX_op_exit_tb:
|
883 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
|
884 |
tcg_out8(s, 0xe9); /* jmp tb_ret_addr */ |
885 |
tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
|
886 |
break;
|
887 |
case INDEX_op_goto_tb:
|
888 |
if (s->tb_jmp_offset) {
|
889 |
/* direct jump method */
|
890 |
tcg_out8(s, 0xe9); /* jmp im */ |
891 |
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
892 |
tcg_out32(s, 0);
|
893 |
} else {
|
894 |
/* indirect jump method */
|
895 |
/* jmp Ev */
|
896 |
tcg_out_modrm_offset(s, 0xff, 4, -1, |
897 |
(tcg_target_long)(s->tb_next + args[0]));
|
898 |
} |
899 |
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
|
900 |
break;
|
901 |
case INDEX_op_call:
|
902 |
if (const_args[0]) { |
903 |
tcg_out8(s, 0xe8);
|
904 |
tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); |
905 |
} else {
|
906 |
tcg_out_modrm(s, 0xff, 2, args[0]); |
907 |
} |
908 |
break;
|
909 |
case INDEX_op_jmp:
|
910 |
if (const_args[0]) { |
911 |
tcg_out8(s, 0xe9);
|
912 |
tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); |
913 |
} else {
|
914 |
tcg_out_modrm(s, 0xff, 4, args[0]); |
915 |
} |
916 |
break;
|
917 |
case INDEX_op_br:
|
918 |
tcg_out_jxx(s, JCC_JMP, args[0]);
|
919 |
break;
|
920 |
case INDEX_op_movi_i32:
|
921 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); |
922 |
break;
|
923 |
case INDEX_op_ld8u_i32:
|
924 |
/* movzbl */
|
925 |
tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]); |
926 |
break;
|
927 |
case INDEX_op_ld8s_i32:
|
928 |
/* movsbl */
|
929 |
tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]); |
930 |
break;
|
931 |
case INDEX_op_ld16u_i32:
|
932 |
/* movzwl */
|
933 |
tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]); |
934 |
break;
|
935 |
case INDEX_op_ld16s_i32:
|
936 |
/* movswl */
|
937 |
tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]); |
938 |
break;
|
939 |
case INDEX_op_ld_i32:
|
940 |
/* movl */
|
941 |
tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]); |
942 |
break;
|
943 |
case INDEX_op_st8_i32:
|
944 |
/* movb */
|
945 |
tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]); |
946 |
break;
|
947 |
case INDEX_op_st16_i32:
|
948 |
/* movw */
|
949 |
tcg_out8(s, 0x66);
|
950 |
tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); |
951 |
break;
|
952 |
case INDEX_op_st_i32:
|
953 |
/* movl */
|
954 |
tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); |
955 |
break;
|
956 |
case INDEX_op_sub_i32:
|
957 |
c = ARITH_SUB; |
958 |
goto gen_arith;
|
959 |
case INDEX_op_and_i32:
|
960 |
c = ARITH_AND; |
961 |
goto gen_arith;
|
962 |
case INDEX_op_or_i32:
|
963 |
c = ARITH_OR; |
964 |
goto gen_arith;
|
965 |
case INDEX_op_xor_i32:
|
966 |
c = ARITH_XOR; |
967 |
goto gen_arith;
|
968 |
case INDEX_op_add_i32:
|
969 |
c = ARITH_ADD; |
970 |
gen_arith:
|
971 |
if (const_args[2]) { |
972 |
tgen_arithi(s, c, args[0], args[2]); |
973 |
} else {
|
974 |
tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]); |
975 |
} |
976 |
break;
|
977 |
case INDEX_op_mul_i32:
|
978 |
if (const_args[2]) { |
979 |
int32_t val; |
980 |
val = args[2];
|
981 |
if (val == (int8_t)val) {
|
982 |
tcg_out_modrm(s, 0x6b, args[0], args[0]); |
983 |
tcg_out8(s, val); |
984 |
} else {
|
985 |
tcg_out_modrm(s, 0x69, args[0], args[0]); |
986 |
tcg_out32(s, val); |
987 |
} |
988 |
} else {
|
989 |
tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]); |
990 |
} |
991 |
break;
|
992 |
case INDEX_op_mulu2_i32:
|
993 |
tcg_out_modrm(s, 0xf7, 4, args[3]); |
994 |
break;
|
995 |
case INDEX_op_div2_i32:
|
996 |
tcg_out_modrm(s, 0xf7, 7, args[4]); |
997 |
break;
|
998 |
case INDEX_op_divu2_i32:
|
999 |
tcg_out_modrm(s, 0xf7, 6, args[4]); |
1000 |
break;
|
1001 |
case INDEX_op_shl_i32:
|
1002 |
c = SHIFT_SHL; |
1003 |
gen_shift32:
|
1004 |
if (const_args[2]) { |
1005 |
if (args[2] == 1) { |
1006 |
tcg_out_modrm(s, 0xd1, c, args[0]); |
1007 |
} else {
|
1008 |
tcg_out_modrm(s, 0xc1, c, args[0]); |
1009 |
tcg_out8(s, args[2]);
|
1010 |
} |
1011 |
} else {
|
1012 |
tcg_out_modrm(s, 0xd3, c, args[0]); |
1013 |
} |
1014 |
break;
|
1015 |
case INDEX_op_shr_i32:
|
1016 |
c = SHIFT_SHR; |
1017 |
goto gen_shift32;
|
1018 |
case INDEX_op_sar_i32:
|
1019 |
c = SHIFT_SAR; |
1020 |
goto gen_shift32;
|
1021 |
|
1022 |
case INDEX_op_add2_i32:
|
1023 |
if (const_args[4]) |
1024 |
tgen_arithi(s, ARITH_ADD, args[0], args[4]); |
1025 |
else
|
1026 |
tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]); |
1027 |
if (const_args[5]) |
1028 |
tgen_arithi(s, ARITH_ADC, args[1], args[5]); |
1029 |
else
|
1030 |
tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]); |
1031 |
break;
|
1032 |
case INDEX_op_sub2_i32:
|
1033 |
if (const_args[4]) |
1034 |
tgen_arithi(s, ARITH_SUB, args[0], args[4]); |
1035 |
else
|
1036 |
tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]); |
1037 |
if (const_args[5]) |
1038 |
tgen_arithi(s, ARITH_SBB, args[1], args[5]); |
1039 |
else
|
1040 |
tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]); |
1041 |
break;
|
1042 |
case INDEX_op_brcond_i32:
|
1043 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]); |
1044 |
break;
|
1045 |
case INDEX_op_brcond2_i32:
|
1046 |
tcg_out_brcond2(s, args, const_args); |
1047 |
break;
|
1048 |
|
1049 |
case INDEX_op_qemu_ld8u:
|
1050 |
tcg_out_qemu_ld(s, args, 0);
|
1051 |
break;
|
1052 |
case INDEX_op_qemu_ld8s:
|
1053 |
tcg_out_qemu_ld(s, args, 0 | 4); |
1054 |
break;
|
1055 |
case INDEX_op_qemu_ld16u:
|
1056 |
tcg_out_qemu_ld(s, args, 1);
|
1057 |
break;
|
1058 |
case INDEX_op_qemu_ld16s:
|
1059 |
tcg_out_qemu_ld(s, args, 1 | 4); |
1060 |
break;
|
1061 |
case INDEX_op_qemu_ld32u:
|
1062 |
tcg_out_qemu_ld(s, args, 2);
|
1063 |
break;
|
1064 |
case INDEX_op_qemu_ld64:
|
1065 |
tcg_out_qemu_ld(s, args, 3);
|
1066 |
break;
|
1067 |
|
1068 |
case INDEX_op_qemu_st8:
|
1069 |
tcg_out_qemu_st(s, args, 0);
|
1070 |
break;
|
1071 |
case INDEX_op_qemu_st16:
|
1072 |
tcg_out_qemu_st(s, args, 1);
|
1073 |
break;
|
1074 |
case INDEX_op_qemu_st32:
|
1075 |
tcg_out_qemu_st(s, args, 2);
|
1076 |
break;
|
1077 |
case INDEX_op_qemu_st64:
|
1078 |
tcg_out_qemu_st(s, args, 3);
|
1079 |
break;
|
1080 |
|
1081 |
default:
|
1082 |
tcg_abort(); |
1083 |
} |
1084 |
} |
1085 |
|
1086 |
static const TCGTargetOpDef x86_op_defs[] = { |
1087 |
{ INDEX_op_exit_tb, { } }, |
1088 |
{ INDEX_op_goto_tb, { } }, |
1089 |
{ INDEX_op_call, { "ri" } },
|
1090 |
{ INDEX_op_jmp, { "ri" } },
|
1091 |
{ INDEX_op_br, { } }, |
1092 |
{ INDEX_op_mov_i32, { "r", "r" } }, |
1093 |
{ INDEX_op_movi_i32, { "r" } },
|
1094 |
{ INDEX_op_ld8u_i32, { "r", "r" } }, |
1095 |
{ INDEX_op_ld8s_i32, { "r", "r" } }, |
1096 |
{ INDEX_op_ld16u_i32, { "r", "r" } }, |
1097 |
{ INDEX_op_ld16s_i32, { "r", "r" } }, |
1098 |
{ INDEX_op_ld_i32, { "r", "r" } }, |
1099 |
{ INDEX_op_st8_i32, { "q", "r" } }, |
1100 |
{ INDEX_op_st16_i32, { "r", "r" } }, |
1101 |
{ INDEX_op_st_i32, { "r", "r" } }, |
1102 |
|
1103 |
{ INDEX_op_add_i32, { "r", "0", "ri" } }, |
1104 |
{ INDEX_op_sub_i32, { "r", "0", "ri" } }, |
1105 |
{ INDEX_op_mul_i32, { "r", "0", "ri" } }, |
1106 |
{ INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, |
1107 |
{ INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, |
1108 |
{ INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, |
1109 |
{ INDEX_op_and_i32, { "r", "0", "ri" } }, |
1110 |
{ INDEX_op_or_i32, { "r", "0", "ri" } }, |
1111 |
{ INDEX_op_xor_i32, { "r", "0", "ri" } }, |
1112 |
|
1113 |
{ INDEX_op_shl_i32, { "r", "0", "ci" } }, |
1114 |
{ INDEX_op_shr_i32, { "r", "0", "ci" } }, |
1115 |
{ INDEX_op_sar_i32, { "r", "0", "ci" } }, |
1116 |
|
1117 |
{ INDEX_op_brcond_i32, { "r", "ri" } }, |
1118 |
|
1119 |
{ INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, |
1120 |
{ INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, |
1121 |
{ INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, |
1122 |
|
1123 |
#if TARGET_LONG_BITS == 32 |
1124 |
{ INDEX_op_qemu_ld8u, { "r", "L" } }, |
1125 |
{ INDEX_op_qemu_ld8s, { "r", "L" } }, |
1126 |
{ INDEX_op_qemu_ld16u, { "r", "L" } }, |
1127 |
{ INDEX_op_qemu_ld16s, { "r", "L" } }, |
1128 |
{ INDEX_op_qemu_ld32u, { "r", "L" } }, |
1129 |
{ INDEX_op_qemu_ld64, { "r", "r", "L" } }, |
1130 |
|
1131 |
{ INDEX_op_qemu_st8, { "cb", "L" } }, |
1132 |
{ INDEX_op_qemu_st16, { "L", "L" } }, |
1133 |
{ INDEX_op_qemu_st32, { "L", "L" } }, |
1134 |
{ INDEX_op_qemu_st64, { "L", "L", "L" } }, |
1135 |
#else
|
1136 |
{ INDEX_op_qemu_ld8u, { "r", "L", "L" } }, |
1137 |
{ INDEX_op_qemu_ld8s, { "r", "L", "L" } }, |
1138 |
{ INDEX_op_qemu_ld16u, { "r", "L", "L" } }, |
1139 |
{ INDEX_op_qemu_ld16s, { "r", "L", "L" } }, |
1140 |
{ INDEX_op_qemu_ld32u, { "r", "L", "L" } }, |
1141 |
{ INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, |
1142 |
|
1143 |
{ INDEX_op_qemu_st8, { "cb", "L", "L" } }, |
1144 |
{ INDEX_op_qemu_st16, { "L", "L", "L" } }, |
1145 |
{ INDEX_op_qemu_st32, { "L", "L", "L" } }, |
1146 |
{ INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, |
1147 |
#endif
|
1148 |
{ -1 },
|
1149 |
}; |
1150 |
|
1151 |
static int tcg_target_callee_save_regs[] = { |
1152 |
/* TCG_REG_EBP, */ /* currently used for the global env, so no |
1153 |
need to save */
|
1154 |
TCG_REG_EBX, |
1155 |
TCG_REG_ESI, |
1156 |
TCG_REG_EDI, |
1157 |
}; |
1158 |
|
1159 |
static inline void tcg_out_push(TCGContext *s, int reg) |
1160 |
{ |
1161 |
tcg_out_opc(s, 0x50 + reg);
|
1162 |
} |
1163 |
|
1164 |
static inline void tcg_out_pop(TCGContext *s, int reg) |
1165 |
{ |
1166 |
tcg_out_opc(s, 0x58 + reg);
|
1167 |
} |
1168 |
|
1169 |
/* Generate global QEMU prologue and epilogue code */
|
1170 |
void tcg_target_qemu_prologue(TCGContext *s)
|
1171 |
{ |
1172 |
int i, frame_size, push_size, stack_addend;
|
1173 |
|
1174 |
/* TB prologue */
|
1175 |
/* save all callee saved registers */
|
1176 |
for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { |
1177 |
tcg_out_push(s, tcg_target_callee_save_regs[i]); |
1178 |
} |
1179 |
/* reserve some stack space */
|
1180 |
push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4; |
1181 |
frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE; |
1182 |
frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
|
1183 |
~(TCG_TARGET_STACK_ALIGN - 1);
|
1184 |
stack_addend = frame_size - push_size; |
1185 |
tcg_out_addi(s, TCG_REG_ESP, -stack_addend); |
1186 |
|
1187 |
tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */ |
1188 |
|
1189 |
/* TB epilogue */
|
1190 |
tb_ret_addr = s->code_ptr; |
1191 |
tcg_out_addi(s, TCG_REG_ESP, stack_addend); |
1192 |
for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { |
1193 |
tcg_out_pop(s, tcg_target_callee_save_regs[i]); |
1194 |
} |
1195 |
tcg_out8(s, 0xc3); /* ret */ |
1196 |
} |
1197 |
|
1198 |
void tcg_target_init(TCGContext *s)
|
1199 |
{ |
1200 |
/* fail safe */
|
1201 |
if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) |
1202 |
tcg_abort(); |
1203 |
|
1204 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); |
1205 |
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
|
1206 |
(1 << TCG_REG_EAX) |
|
1207 |
(1 << TCG_REG_EDX) |
|
1208 |
(1 << TCG_REG_ECX));
|
1209 |
|
1210 |
tcg_regset_clear(s->reserved_regs); |
1211 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP); |
1212 |
|
1213 |
tcg_add_target_add_op_defs(x86_op_defs); |
1214 |
} |