root / tcg / x86_64 / tcg-target.c @ b03cce8e
History | View | Annotate | Download (36 kB)
1 |
/*
|
---|---|
2 |
* Tiny Code Generator for QEMU
|
3 |
*
|
4 |
* Copyright (c) 2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
25 |
"%rax",
|
26 |
"%rcx",
|
27 |
"%rdx",
|
28 |
"%rbx",
|
29 |
"%rsp",
|
30 |
"%rbp",
|
31 |
"%rsi",
|
32 |
"%rdi",
|
33 |
"%r8",
|
34 |
"%r9",
|
35 |
"%r10",
|
36 |
"%r11",
|
37 |
"%r12",
|
38 |
"%r13",
|
39 |
"%r14",
|
40 |
"%r15",
|
41 |
}; |
42 |
|
43 |
int tcg_target_reg_alloc_order[] = {
|
44 |
TCG_REG_RDI, |
45 |
TCG_REG_RSI, |
46 |
TCG_REG_RDX, |
47 |
TCG_REG_RCX, |
48 |
TCG_REG_R8, |
49 |
TCG_REG_R9, |
50 |
TCG_REG_RAX, |
51 |
TCG_REG_R10, |
52 |
TCG_REG_R11, |
53 |
|
54 |
TCG_REG_RBP, |
55 |
TCG_REG_RBX, |
56 |
TCG_REG_R12, |
57 |
TCG_REG_R13, |
58 |
TCG_REG_R14, |
59 |
TCG_REG_R15, |
60 |
}; |
61 |
|
62 |
const int tcg_target_call_iarg_regs[6] = { |
63 |
TCG_REG_RDI, |
64 |
TCG_REG_RSI, |
65 |
TCG_REG_RDX, |
66 |
TCG_REG_RCX, |
67 |
TCG_REG_R8, |
68 |
TCG_REG_R9, |
69 |
}; |
70 |
|
71 |
const int tcg_target_call_oarg_regs[2] = { |
72 |
TCG_REG_RAX, |
73 |
TCG_REG_RDX |
74 |
}; |
75 |
|
76 |
static uint8_t *tb_ret_addr;
|
77 |
|
78 |
static void patch_reloc(uint8_t *code_ptr, int type, |
79 |
tcg_target_long value, tcg_target_long addend) |
80 |
{ |
81 |
value += addend; |
82 |
switch(type) {
|
83 |
case R_X86_64_32:
|
84 |
if (value != (uint32_t)value)
|
85 |
tcg_abort(); |
86 |
*(uint32_t *)code_ptr = value; |
87 |
break;
|
88 |
case R_X86_64_32S:
|
89 |
if (value != (int32_t)value)
|
90 |
tcg_abort(); |
91 |
*(uint32_t *)code_ptr = value; |
92 |
break;
|
93 |
case R_386_PC32:
|
94 |
value -= (long)code_ptr;
|
95 |
if (value != (int32_t)value)
|
96 |
tcg_abort(); |
97 |
*(uint32_t *)code_ptr = value; |
98 |
break;
|
99 |
default:
|
100 |
tcg_abort(); |
101 |
} |
102 |
} |
103 |
|
104 |
/* maximum number of register used for input function arguments */
|
105 |
static inline int tcg_target_get_call_iarg_regs_count(int flags) |
106 |
{ |
107 |
return 6; |
108 |
} |
109 |
|
110 |
/* parse target specific constraints */
|
111 |
int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
112 |
{ |
113 |
const char *ct_str; |
114 |
|
115 |
ct_str = *pct_str; |
116 |
switch(ct_str[0]) { |
117 |
case 'a': |
118 |
ct->ct |= TCG_CT_REG; |
119 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RAX); |
120 |
break;
|
121 |
case 'b': |
122 |
ct->ct |= TCG_CT_REG; |
123 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RBX); |
124 |
break;
|
125 |
case 'c': |
126 |
ct->ct |= TCG_CT_REG; |
127 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RCX); |
128 |
break;
|
129 |
case 'd': |
130 |
ct->ct |= TCG_CT_REG; |
131 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RDX); |
132 |
break;
|
133 |
case 'S': |
134 |
ct->ct |= TCG_CT_REG; |
135 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RSI); |
136 |
break;
|
137 |
case 'D': |
138 |
ct->ct |= TCG_CT_REG; |
139 |
tcg_regset_set_reg(ct->u.regs, TCG_REG_RDI); |
140 |
break;
|
141 |
case 'q': |
142 |
ct->ct |= TCG_CT_REG; |
143 |
tcg_regset_set32(ct->u.regs, 0, 0xf); |
144 |
break;
|
145 |
case 'r': |
146 |
ct->ct |= TCG_CT_REG; |
147 |
tcg_regset_set32(ct->u.regs, 0, 0xffff); |
148 |
break;
|
149 |
case 'L': /* qemu_ld/st constraint */ |
150 |
ct->ct |= TCG_CT_REG; |
151 |
tcg_regset_set32(ct->u.regs, 0, 0xffff); |
152 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI); |
153 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI); |
154 |
break;
|
155 |
case 'e': |
156 |
ct->ct |= TCG_CT_CONST_S32; |
157 |
break;
|
158 |
case 'Z': |
159 |
ct->ct |= TCG_CT_CONST_U32; |
160 |
break;
|
161 |
default:
|
162 |
return -1; |
163 |
} |
164 |
ct_str++; |
165 |
*pct_str = ct_str; |
166 |
return 0; |
167 |
} |
168 |
|
169 |
/* test if a constant matches the constraint */
|
170 |
static inline int tcg_target_const_match(tcg_target_long val, |
171 |
const TCGArgConstraint *arg_ct)
|
172 |
{ |
173 |
int ct;
|
174 |
ct = arg_ct->ct; |
175 |
if (ct & TCG_CT_CONST)
|
176 |
return 1; |
177 |
else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) |
178 |
return 1; |
179 |
else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) |
180 |
return 1; |
181 |
else
|
182 |
return 0; |
183 |
} |
184 |
|
185 |
#define ARITH_ADD 0 |
186 |
#define ARITH_OR 1 |
187 |
#define ARITH_ADC 2 |
188 |
#define ARITH_SBB 3 |
189 |
#define ARITH_AND 4 |
190 |
#define ARITH_SUB 5 |
191 |
#define ARITH_XOR 6 |
192 |
#define ARITH_CMP 7 |
193 |
|
194 |
#define SHIFT_SHL 4 |
195 |
#define SHIFT_SHR 5 |
196 |
#define SHIFT_SAR 7 |
197 |
|
198 |
#define JCC_JMP (-1) |
199 |
#define JCC_JO 0x0 |
200 |
#define JCC_JNO 0x1 |
201 |
#define JCC_JB 0x2 |
202 |
#define JCC_JAE 0x3 |
203 |
#define JCC_JE 0x4 |
204 |
#define JCC_JNE 0x5 |
205 |
#define JCC_JBE 0x6 |
206 |
#define JCC_JA 0x7 |
207 |
#define JCC_JS 0x8 |
208 |
#define JCC_JNS 0x9 |
209 |
#define JCC_JP 0xa |
210 |
#define JCC_JNP 0xb |
211 |
#define JCC_JL 0xc |
212 |
#define JCC_JGE 0xd |
213 |
#define JCC_JLE 0xe |
214 |
#define JCC_JG 0xf |
215 |
|
216 |
#define P_EXT 0x100 /* 0x0f opcode prefix */ |
217 |
#define P_REXW 0x200 /* set rex.w = 1 */ |
218 |
#define P_REX 0x400 /* force rex usage */ |
219 |
|
220 |
static const uint8_t tcg_cond_to_jcc[10] = { |
221 |
[TCG_COND_EQ] = JCC_JE, |
222 |
[TCG_COND_NE] = JCC_JNE, |
223 |
[TCG_COND_LT] = JCC_JL, |
224 |
[TCG_COND_GE] = JCC_JGE, |
225 |
[TCG_COND_LE] = JCC_JLE, |
226 |
[TCG_COND_GT] = JCC_JG, |
227 |
[TCG_COND_LTU] = JCC_JB, |
228 |
[TCG_COND_GEU] = JCC_JAE, |
229 |
[TCG_COND_LEU] = JCC_JBE, |
230 |
[TCG_COND_GTU] = JCC_JA, |
231 |
}; |
232 |
|
233 |
static inline void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) |
234 |
{ |
235 |
int rex;
|
236 |
rex = ((opc >> 6) & 0x8) | ((r >> 1) & 0x4) | |
237 |
((x >> 2) & 2) | ((rm >> 3) & 1); |
238 |
if (rex || (opc & P_REX)) {
|
239 |
tcg_out8(s, rex | 0x40);
|
240 |
} |
241 |
if (opc & P_EXT)
|
242 |
tcg_out8(s, 0x0f);
|
243 |
tcg_out8(s, opc); |
244 |
} |
245 |
|
246 |
static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) |
247 |
{ |
248 |
tcg_out_opc(s, opc, r, rm, 0);
|
249 |
tcg_out8(s, 0xc0 | ((r & 7) << 3) | (rm & 7)); |
250 |
} |
251 |
|
252 |
/* rm < 0 means no register index plus (-rm - 1 immediate bytes) */
|
253 |
static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, |
254 |
tcg_target_long offset) |
255 |
{ |
256 |
if (rm < 0) { |
257 |
tcg_target_long val; |
258 |
tcg_out_opc(s, opc, r, 0, 0); |
259 |
val = offset - ((tcg_target_long)s->code_ptr + 5 + (-rm - 1)); |
260 |
if (val == (int32_t)val) {
|
261 |
/* eip relative */
|
262 |
tcg_out8(s, 0x05 | ((r & 7) << 3)); |
263 |
tcg_out32(s, val); |
264 |
} else if (offset == (int32_t)offset) { |
265 |
tcg_out8(s, 0x04 | ((r & 7) << 3)); |
266 |
tcg_out8(s, 0x25); /* sib */ |
267 |
tcg_out32(s, offset); |
268 |
} else {
|
269 |
tcg_abort(); |
270 |
} |
271 |
} else if (offset == 0 && (rm & 7) != TCG_REG_RBP) { |
272 |
tcg_out_opc(s, opc, r, rm, 0);
|
273 |
if ((rm & 7) == TCG_REG_RSP) { |
274 |
tcg_out8(s, 0x04 | ((r & 7) << 3)); |
275 |
tcg_out8(s, 0x24);
|
276 |
} else {
|
277 |
tcg_out8(s, 0x00 | ((r & 7) << 3) | (rm & 7)); |
278 |
} |
279 |
} else if ((int8_t)offset == offset) { |
280 |
tcg_out_opc(s, opc, r, rm, 0);
|
281 |
if ((rm & 7) == TCG_REG_RSP) { |
282 |
tcg_out8(s, 0x44 | ((r & 7) << 3)); |
283 |
tcg_out8(s, 0x24);
|
284 |
} else {
|
285 |
tcg_out8(s, 0x40 | ((r & 7) << 3) | (rm & 7)); |
286 |
} |
287 |
tcg_out8(s, offset); |
288 |
} else {
|
289 |
tcg_out_opc(s, opc, r, rm, 0);
|
290 |
if ((rm & 7) == TCG_REG_RSP) { |
291 |
tcg_out8(s, 0x84 | ((r & 7) << 3)); |
292 |
tcg_out8(s, 0x24);
|
293 |
} else {
|
294 |
tcg_out8(s, 0x80 | ((r & 7) << 3) | (rm & 7)); |
295 |
} |
296 |
tcg_out32(s, offset); |
297 |
} |
298 |
} |
299 |
|
300 |
#if defined(CONFIG_SOFTMMU)
|
301 |
/* XXX: incomplete. index must be different from ESP */
|
302 |
static void tcg_out_modrm_offset2(TCGContext *s, int opc, int r, int rm, |
303 |
int index, int shift, |
304 |
tcg_target_long offset) |
305 |
{ |
306 |
int mod;
|
307 |
if (rm == -1) |
308 |
tcg_abort(); |
309 |
if (offset == 0 && (rm & 7) != TCG_REG_RBP) { |
310 |
mod = 0;
|
311 |
} else if (offset == (int8_t)offset) { |
312 |
mod = 0x40;
|
313 |
} else if (offset == (int32_t)offset) { |
314 |
mod = 0x80;
|
315 |
} else {
|
316 |
tcg_abort(); |
317 |
} |
318 |
if (index == -1) { |
319 |
tcg_out_opc(s, opc, r, rm, 0);
|
320 |
if ((rm & 7) == TCG_REG_RSP) { |
321 |
tcg_out8(s, mod | ((r & 7) << 3) | 0x04); |
322 |
tcg_out8(s, 0x04 | (rm & 7)); |
323 |
} else {
|
324 |
tcg_out8(s, mod | ((r & 7) << 3) | (rm & 7)); |
325 |
} |
326 |
} else {
|
327 |
tcg_out_opc(s, opc, r, rm, index); |
328 |
tcg_out8(s, mod | ((r & 7) << 3) | 0x04); |
329 |
tcg_out8(s, (shift << 6) | ((index & 7) << 3) | (rm & 7)); |
330 |
} |
331 |
if (mod == 0x40) { |
332 |
tcg_out8(s, offset); |
333 |
} else if (mod == 0x80) { |
334 |
tcg_out32(s, offset); |
335 |
} |
336 |
} |
337 |
#endif
|
338 |
|
339 |
static inline void tcg_out_mov(TCGContext *s, int ret, int arg) |
340 |
{ |
341 |
tcg_out_modrm(s, 0x8b | P_REXW, ret, arg);
|
342 |
} |
343 |
|
344 |
static inline void tcg_out_movi(TCGContext *s, TCGType type, |
345 |
int ret, tcg_target_long arg)
|
346 |
{ |
347 |
if (arg == 0) { |
348 |
tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); /* xor r0,r0 */ |
349 |
} else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { |
350 |
tcg_out_opc(s, 0xb8 + (ret & 7), 0, ret, 0); |
351 |
tcg_out32(s, arg); |
352 |
} else if (arg == (int32_t)arg) { |
353 |
tcg_out_modrm(s, 0xc7 | P_REXW, 0, ret); |
354 |
tcg_out32(s, arg); |
355 |
} else {
|
356 |
tcg_out_opc(s, (0xb8 + (ret & 7)) | P_REXW, 0, ret, 0); |
357 |
tcg_out32(s, arg); |
358 |
tcg_out32(s, arg >> 32);
|
359 |
} |
360 |
} |
361 |
|
362 |
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, |
363 |
int arg1, tcg_target_long arg2)
|
364 |
{ |
365 |
if (type == TCG_TYPE_I32)
|
366 |
tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2); /* movl */ |
367 |
else
|
368 |
tcg_out_modrm_offset(s, 0x8b | P_REXW, ret, arg1, arg2); /* movq */ |
369 |
} |
370 |
|
371 |
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, |
372 |
int arg1, tcg_target_long arg2)
|
373 |
{ |
374 |
if (type == TCG_TYPE_I32)
|
375 |
tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2); /* movl */ |
376 |
else
|
377 |
tcg_out_modrm_offset(s, 0x89 | P_REXW, arg, arg1, arg2); /* movq */ |
378 |
} |
379 |
|
380 |
static inline void tgen_arithi32(TCGContext *s, int c, int r0, int32_t val) |
381 |
{ |
382 |
if (val == (int8_t)val) {
|
383 |
tcg_out_modrm(s, 0x83, c, r0);
|
384 |
tcg_out8(s, val); |
385 |
} else {
|
386 |
tcg_out_modrm(s, 0x81, c, r0);
|
387 |
tcg_out32(s, val); |
388 |
} |
389 |
} |
390 |
|
391 |
static inline void tgen_arithi64(TCGContext *s, int c, int r0, int64_t val) |
392 |
{ |
393 |
if (val == (int8_t)val) {
|
394 |
tcg_out_modrm(s, 0x83 | P_REXW, c, r0);
|
395 |
tcg_out8(s, val); |
396 |
} else if (val == (int32_t)val) { |
397 |
tcg_out_modrm(s, 0x81 | P_REXW, c, r0);
|
398 |
tcg_out32(s, val); |
399 |
} else if (c == ARITH_AND && val == (uint32_t)val) { |
400 |
tcg_out_modrm(s, 0x81, c, r0);
|
401 |
tcg_out32(s, val); |
402 |
} else {
|
403 |
tcg_abort(); |
404 |
} |
405 |
} |
406 |
|
407 |
void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
408 |
{ |
409 |
if (val != 0) |
410 |
tgen_arithi64(s, ARITH_ADD, reg, val); |
411 |
} |
412 |
|
413 |
static void tcg_out_jxx(TCGContext *s, int opc, int label_index) |
414 |
{ |
415 |
int32_t val, val1; |
416 |
TCGLabel *l = &s->labels[label_index]; |
417 |
|
418 |
if (l->has_value) {
|
419 |
val = l->u.value - (tcg_target_long)s->code_ptr; |
420 |
val1 = val - 2;
|
421 |
if ((int8_t)val1 == val1) {
|
422 |
if (opc == -1) |
423 |
tcg_out8(s, 0xeb);
|
424 |
else
|
425 |
tcg_out8(s, 0x70 + opc);
|
426 |
tcg_out8(s, val1); |
427 |
} else {
|
428 |
if (opc == -1) { |
429 |
tcg_out8(s, 0xe9);
|
430 |
tcg_out32(s, val - 5);
|
431 |
} else {
|
432 |
tcg_out8(s, 0x0f);
|
433 |
tcg_out8(s, 0x80 + opc);
|
434 |
tcg_out32(s, val - 6);
|
435 |
} |
436 |
} |
437 |
} else {
|
438 |
if (opc == -1) { |
439 |
tcg_out8(s, 0xe9);
|
440 |
} else {
|
441 |
tcg_out8(s, 0x0f);
|
442 |
tcg_out8(s, 0x80 + opc);
|
443 |
} |
444 |
tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
|
445 |
s->code_ptr += 4;
|
446 |
} |
447 |
} |
448 |
|
449 |
static void tcg_out_brcond(TCGContext *s, int cond, |
450 |
TCGArg arg1, TCGArg arg2, int const_arg2,
|
451 |
int label_index, int rexw) |
452 |
{ |
453 |
int c;
|
454 |
if (const_arg2) {
|
455 |
if (arg2 == 0) { |
456 |
/* use test */
|
457 |
switch(cond) {
|
458 |
case TCG_COND_EQ:
|
459 |
c = JCC_JE; |
460 |
break;
|
461 |
case TCG_COND_NE:
|
462 |
c = JCC_JNE; |
463 |
break;
|
464 |
case TCG_COND_LT:
|
465 |
c = JCC_JS; |
466 |
break;
|
467 |
case TCG_COND_GE:
|
468 |
c = JCC_JNS; |
469 |
break;
|
470 |
default:
|
471 |
goto do_cmpi;
|
472 |
} |
473 |
/* test r, r */
|
474 |
tcg_out_modrm(s, 0x85 | rexw, arg1, arg1);
|
475 |
tcg_out_jxx(s, c, label_index); |
476 |
} else {
|
477 |
do_cmpi:
|
478 |
if (rexw)
|
479 |
tgen_arithi64(s, ARITH_CMP, arg1, arg2); |
480 |
else
|
481 |
tgen_arithi32(s, ARITH_CMP, arg1, arg2); |
482 |
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); |
483 |
} |
484 |
} else {
|
485 |
tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3) | rexw, arg2, arg1); |
486 |
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); |
487 |
} |
488 |
} |
489 |
|
490 |
#if defined(CONFIG_SOFTMMU)
|
491 |
extern void __ldb_mmu(void); |
492 |
extern void __ldw_mmu(void); |
493 |
extern void __ldl_mmu(void); |
494 |
extern void __ldq_mmu(void); |
495 |
|
496 |
extern void __stb_mmu(void); |
497 |
extern void __stw_mmu(void); |
498 |
extern void __stl_mmu(void); |
499 |
extern void __stq_mmu(void); |
500 |
|
501 |
|
502 |
static void *qemu_ld_helpers[4] = { |
503 |
__ldb_mmu, |
504 |
__ldw_mmu, |
505 |
__ldl_mmu, |
506 |
__ldq_mmu, |
507 |
}; |
508 |
|
509 |
static void *qemu_st_helpers[4] = { |
510 |
__stb_mmu, |
511 |
__stw_mmu, |
512 |
__stl_mmu, |
513 |
__stq_mmu, |
514 |
}; |
515 |
#endif
|
516 |
|
517 |
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, |
518 |
int opc)
|
519 |
{ |
520 |
int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
|
521 |
#if defined(CONFIG_SOFTMMU)
|
522 |
uint8_t *label1_ptr, *label2_ptr; |
523 |
#endif
|
524 |
|
525 |
data_reg = *args++; |
526 |
addr_reg = *args++; |
527 |
mem_index = *args; |
528 |
s_bits = opc & 3;
|
529 |
|
530 |
r0 = TCG_REG_RDI; |
531 |
r1 = TCG_REG_RSI; |
532 |
|
533 |
#if TARGET_LONG_BITS == 32 |
534 |
rexw = 0;
|
535 |
#else
|
536 |
rexw = P_REXW; |
537 |
#endif
|
538 |
#if defined(CONFIG_SOFTMMU)
|
539 |
/* mov */
|
540 |
tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
|
541 |
|
542 |
/* mov */
|
543 |
tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
|
544 |
|
545 |
tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */ |
546 |
tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); |
547 |
|
548 |
tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */ |
549 |
tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); |
550 |
|
551 |
tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ |
552 |
tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
|
553 |
|
554 |
/* lea offset(r1, env), r1 */
|
555 |
tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0, |
556 |
offsetof(CPUState, tlb_table[mem_index][0].addr_read));
|
557 |
|
558 |
/* cmp 0(r1), r0 */
|
559 |
tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0); |
560 |
|
561 |
/* mov */
|
562 |
tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
|
563 |
|
564 |
/* je label1 */
|
565 |
tcg_out8(s, 0x70 + JCC_JE);
|
566 |
label1_ptr = s->code_ptr; |
567 |
s->code_ptr++; |
568 |
|
569 |
/* XXX: move that code at the end of the TB */
|
570 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RSI, mem_index); |
571 |
tcg_out8(s, 0xe8);
|
572 |
tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] - |
573 |
(tcg_target_long)s->code_ptr - 4);
|
574 |
|
575 |
switch(opc) {
|
576 |
case 0 | 4: |
577 |
/* movsbq */
|
578 |
tcg_out_modrm(s, 0xbe | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
|
579 |
break;
|
580 |
case 1 | 4: |
581 |
/* movswq */
|
582 |
tcg_out_modrm(s, 0xbf | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
|
583 |
break;
|
584 |
case 2 | 4: |
585 |
/* movslq */
|
586 |
tcg_out_modrm(s, 0x63 | P_REXW, data_reg, TCG_REG_RAX);
|
587 |
break;
|
588 |
case 0: |
589 |
case 1: |
590 |
case 2: |
591 |
default:
|
592 |
/* movl */
|
593 |
tcg_out_modrm(s, 0x8b, data_reg, TCG_REG_RAX);
|
594 |
break;
|
595 |
case 3: |
596 |
tcg_out_mov(s, data_reg, TCG_REG_RAX); |
597 |
break;
|
598 |
} |
599 |
|
600 |
/* jmp label2 */
|
601 |
tcg_out8(s, 0xeb);
|
602 |
label2_ptr = s->code_ptr; |
603 |
s->code_ptr++; |
604 |
|
605 |
/* label1: */
|
606 |
*label1_ptr = s->code_ptr - label1_ptr - 1;
|
607 |
|
608 |
/* add x(r1), r0 */
|
609 |
tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
|
610 |
offsetof(CPUTLBEntry, addr_read)); |
611 |
#else
|
612 |
r0 = addr_reg; |
613 |
#endif
|
614 |
|
615 |
#ifdef TARGET_WORDS_BIGENDIAN
|
616 |
bswap = 1;
|
617 |
#else
|
618 |
bswap = 0;
|
619 |
#endif
|
620 |
switch(opc) {
|
621 |
case 0: |
622 |
/* movzbl */
|
623 |
tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0); |
624 |
break;
|
625 |
case 0 | 4: |
626 |
/* movsbX */
|
627 |
tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, 0); |
628 |
break;
|
629 |
case 1: |
630 |
/* movzwl */
|
631 |
tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0); |
632 |
if (bswap) {
|
633 |
/* rolw $8, data_reg */
|
634 |
tcg_out8(s, 0x66);
|
635 |
tcg_out_modrm(s, 0xc1, 0, data_reg); |
636 |
tcg_out8(s, 8);
|
637 |
} |
638 |
break;
|
639 |
case 1 | 4: |
640 |
if (bswap) {
|
641 |
/* movzwl */
|
642 |
tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0); |
643 |
/* rolw $8, data_reg */
|
644 |
tcg_out8(s, 0x66);
|
645 |
tcg_out_modrm(s, 0xc1, 0, data_reg); |
646 |
tcg_out8(s, 8);
|
647 |
|
648 |
/* movswX data_reg, data_reg */
|
649 |
tcg_out_modrm(s, 0xbf | P_EXT | rexw, data_reg, data_reg);
|
650 |
} else {
|
651 |
/* movswX */
|
652 |
tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, 0); |
653 |
} |
654 |
break;
|
655 |
case 2: |
656 |
/* movl (r0), data_reg */
|
657 |
tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); |
658 |
if (bswap) {
|
659 |
/* bswap */
|
660 |
tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0); |
661 |
} |
662 |
break;
|
663 |
case 2 | 4: |
664 |
if (bswap) {
|
665 |
/* movl (r0), data_reg */
|
666 |
tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); |
667 |
/* bswap */
|
668 |
tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0); |
669 |
/* movslq */
|
670 |
tcg_out_modrm(s, 0x63 | P_REXW, data_reg, data_reg);
|
671 |
} else {
|
672 |
/* movslq */
|
673 |
tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, 0); |
674 |
} |
675 |
break;
|
676 |
case 3: |
677 |
/* movq (r0), data_reg */
|
678 |
tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, 0); |
679 |
if (bswap) {
|
680 |
/* bswap */
|
681 |
tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT | P_REXW, 0, data_reg, 0); |
682 |
} |
683 |
break;
|
684 |
default:
|
685 |
tcg_abort(); |
686 |
} |
687 |
|
688 |
#if defined(CONFIG_SOFTMMU)
|
689 |
/* label2: */
|
690 |
*label2_ptr = s->code_ptr - label2_ptr - 1;
|
691 |
#endif
|
692 |
} |
693 |
|
694 |
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, |
695 |
int opc)
|
696 |
{ |
697 |
int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
|
698 |
#if defined(CONFIG_SOFTMMU)
|
699 |
uint8_t *label1_ptr, *label2_ptr; |
700 |
#endif
|
701 |
|
702 |
data_reg = *args++; |
703 |
addr_reg = *args++; |
704 |
mem_index = *args; |
705 |
|
706 |
s_bits = opc; |
707 |
|
708 |
r0 = TCG_REG_RDI; |
709 |
r1 = TCG_REG_RSI; |
710 |
|
711 |
#if TARGET_LONG_BITS == 32 |
712 |
rexw = 0;
|
713 |
#else
|
714 |
rexw = P_REXW; |
715 |
#endif
|
716 |
#if defined(CONFIG_SOFTMMU)
|
717 |
/* mov */
|
718 |
tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
|
719 |
|
720 |
/* mov */
|
721 |
tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
|
722 |
|
723 |
tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */ |
724 |
tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); |
725 |
|
726 |
tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */ |
727 |
tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); |
728 |
|
729 |
tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ |
730 |
tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
|
731 |
|
732 |
/* lea offset(r1, env), r1 */
|
733 |
tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0, |
734 |
offsetof(CPUState, tlb_table[mem_index][0].addr_write));
|
735 |
|
736 |
/* cmp 0(r1), r0 */
|
737 |
tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0); |
738 |
|
739 |
/* mov */
|
740 |
tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
|
741 |
|
742 |
/* je label1 */
|
743 |
tcg_out8(s, 0x70 + JCC_JE);
|
744 |
label1_ptr = s->code_ptr; |
745 |
s->code_ptr++; |
746 |
|
747 |
/* XXX: move that code at the end of the TB */
|
748 |
switch(opc) {
|
749 |
case 0: |
750 |
/* movzbl */
|
751 |
tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_RSI, data_reg);
|
752 |
break;
|
753 |
case 1: |
754 |
/* movzwl */
|
755 |
tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_RSI, data_reg);
|
756 |
break;
|
757 |
case 2: |
758 |
/* movl */
|
759 |
tcg_out_modrm(s, 0x8b, TCG_REG_RSI, data_reg);
|
760 |
break;
|
761 |
default:
|
762 |
case 3: |
763 |
tcg_out_mov(s, TCG_REG_RSI, data_reg); |
764 |
break;
|
765 |
} |
766 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); |
767 |
tcg_out8(s, 0xe8);
|
768 |
tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - |
769 |
(tcg_target_long)s->code_ptr - 4);
|
770 |
|
771 |
/* jmp label2 */
|
772 |
tcg_out8(s, 0xeb);
|
773 |
label2_ptr = s->code_ptr; |
774 |
s->code_ptr++; |
775 |
|
776 |
/* label1: */
|
777 |
*label1_ptr = s->code_ptr - label1_ptr - 1;
|
778 |
|
779 |
/* add x(r1), r0 */
|
780 |
tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
|
781 |
offsetof(CPUTLBEntry, addr_write)); |
782 |
#else
|
783 |
r0 = addr_reg; |
784 |
#endif
|
785 |
|
786 |
#ifdef TARGET_WORDS_BIGENDIAN
|
787 |
bswap = 1;
|
788 |
#else
|
789 |
bswap = 0;
|
790 |
#endif
|
791 |
switch(opc) {
|
792 |
case 0: |
793 |
/* movb */
|
794 |
tcg_out_modrm_offset(s, 0x88 | P_REX, data_reg, r0, 0); |
795 |
break;
|
796 |
case 1: |
797 |
if (bswap) {
|
798 |
tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */ |
799 |
tcg_out8(s, 0x66); /* rolw $8, %ecx */ |
800 |
tcg_out_modrm(s, 0xc1, 0, r1); |
801 |
tcg_out8(s, 8);
|
802 |
data_reg = r1; |
803 |
} |
804 |
/* movw */
|
805 |
tcg_out8(s, 0x66);
|
806 |
tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); |
807 |
break;
|
808 |
case 2: |
809 |
if (bswap) {
|
810 |
tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */ |
811 |
/* bswap data_reg */
|
812 |
tcg_out_opc(s, (0xc8 + r1) | P_EXT, 0, r1, 0); |
813 |
data_reg = r1; |
814 |
} |
815 |
/* movl */
|
816 |
tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); |
817 |
break;
|
818 |
case 3: |
819 |
if (bswap) {
|
820 |
tcg_out_mov(s, r1, data_reg); |
821 |
/* bswap data_reg */
|
822 |
tcg_out_opc(s, (0xc8 + r1) | P_EXT | P_REXW, 0, r1, 0); |
823 |
data_reg = r1; |
824 |
} |
825 |
/* movq */
|
826 |
tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, 0); |
827 |
break;
|
828 |
default:
|
829 |
tcg_abort(); |
830 |
} |
831 |
|
832 |
#if defined(CONFIG_SOFTMMU)
|
833 |
/* label2: */
|
834 |
*label2_ptr = s->code_ptr - label2_ptr - 1;
|
835 |
#endif
|
836 |
} |
837 |
|
838 |
static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args, |
839 |
const int *const_args) |
840 |
{ |
841 |
int c;
|
842 |
|
843 |
switch(opc) {
|
844 |
case INDEX_op_exit_tb:
|
845 |
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, args[0]);
|
846 |
tcg_out8(s, 0xe9); /* jmp tb_ret_addr */ |
847 |
tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
|
848 |
break;
|
849 |
case INDEX_op_goto_tb:
|
850 |
if (s->tb_jmp_offset) {
|
851 |
/* direct jump method */
|
852 |
tcg_out8(s, 0xe9); /* jmp im */ |
853 |
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
854 |
tcg_out32(s, 0);
|
855 |
} else {
|
856 |
/* indirect jump method */
|
857 |
/* jmp Ev */
|
858 |
tcg_out_modrm_offset(s, 0xff, 4, -1, |
859 |
(tcg_target_long)(s->tb_next + |
860 |
args[0]));
|
861 |
} |
862 |
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
|
863 |
break;
|
864 |
case INDEX_op_call:
|
865 |
if (const_args[0]) { |
866 |
tcg_out8(s, 0xe8);
|
867 |
tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); |
868 |
} else {
|
869 |
tcg_out_modrm(s, 0xff, 2, args[0]); |
870 |
} |
871 |
break;
|
872 |
case INDEX_op_jmp:
|
873 |
if (const_args[0]) { |
874 |
tcg_out8(s, 0xe9);
|
875 |
tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); |
876 |
} else {
|
877 |
tcg_out_modrm(s, 0xff, 4, args[0]); |
878 |
} |
879 |
break;
|
880 |
case INDEX_op_br:
|
881 |
tcg_out_jxx(s, JCC_JMP, args[0]);
|
882 |
break;
|
883 |
case INDEX_op_movi_i32:
|
884 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); |
885 |
break;
|
886 |
case INDEX_op_movi_i64:
|
887 |
tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); |
888 |
break;
|
889 |
case INDEX_op_ld8u_i32:
|
890 |
case INDEX_op_ld8u_i64:
|
891 |
/* movzbl */
|
892 |
tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]); |
893 |
break;
|
894 |
case INDEX_op_ld8s_i32:
|
895 |
/* movsbl */
|
896 |
tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]); |
897 |
break;
|
898 |
case INDEX_op_ld8s_i64:
|
899 |
/* movsbq */
|
900 |
tcg_out_modrm_offset(s, 0xbe | P_EXT | P_REXW, args[0], args[1], args[2]); |
901 |
break;
|
902 |
case INDEX_op_ld16u_i32:
|
903 |
case INDEX_op_ld16u_i64:
|
904 |
/* movzwl */
|
905 |
tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]); |
906 |
break;
|
907 |
case INDEX_op_ld16s_i32:
|
908 |
/* movswl */
|
909 |
tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]); |
910 |
break;
|
911 |
case INDEX_op_ld16s_i64:
|
912 |
/* movswq */
|
913 |
tcg_out_modrm_offset(s, 0xbf | P_EXT | P_REXW, args[0], args[1], args[2]); |
914 |
break;
|
915 |
case INDEX_op_ld_i32:
|
916 |
case INDEX_op_ld32u_i64:
|
917 |
/* movl */
|
918 |
tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]); |
919 |
break;
|
920 |
case INDEX_op_ld32s_i64:
|
921 |
/* movslq */
|
922 |
tcg_out_modrm_offset(s, 0x63 | P_REXW, args[0], args[1], args[2]); |
923 |
break;
|
924 |
case INDEX_op_ld_i64:
|
925 |
/* movq */
|
926 |
tcg_out_modrm_offset(s, 0x8b | P_REXW, args[0], args[1], args[2]); |
927 |
break;
|
928 |
|
929 |
case INDEX_op_st8_i32:
|
930 |
case INDEX_op_st8_i64:
|
931 |
/* movb */
|
932 |
tcg_out_modrm_offset(s, 0x88 | P_REX, args[0], args[1], args[2]); |
933 |
break;
|
934 |
case INDEX_op_st16_i32:
|
935 |
case INDEX_op_st16_i64:
|
936 |
/* movw */
|
937 |
tcg_out8(s, 0x66);
|
938 |
tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); |
939 |
break;
|
940 |
case INDEX_op_st_i32:
|
941 |
case INDEX_op_st32_i64:
|
942 |
/* movl */
|
943 |
tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); |
944 |
break;
|
945 |
case INDEX_op_st_i64:
|
946 |
/* movq */
|
947 |
tcg_out_modrm_offset(s, 0x89 | P_REXW, args[0], args[1], args[2]); |
948 |
break;
|
949 |
|
950 |
case INDEX_op_sub_i32:
|
951 |
c = ARITH_SUB; |
952 |
goto gen_arith32;
|
953 |
case INDEX_op_and_i32:
|
954 |
c = ARITH_AND; |
955 |
goto gen_arith32;
|
956 |
case INDEX_op_or_i32:
|
957 |
c = ARITH_OR; |
958 |
goto gen_arith32;
|
959 |
case INDEX_op_xor_i32:
|
960 |
c = ARITH_XOR; |
961 |
goto gen_arith32;
|
962 |
case INDEX_op_add_i32:
|
963 |
c = ARITH_ADD; |
964 |
gen_arith32:
|
965 |
if (const_args[2]) { |
966 |
tgen_arithi32(s, c, args[0], args[2]); |
967 |
} else {
|
968 |
tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]); |
969 |
} |
970 |
break;
|
971 |
|
972 |
case INDEX_op_sub_i64:
|
973 |
c = ARITH_SUB; |
974 |
goto gen_arith64;
|
975 |
case INDEX_op_and_i64:
|
976 |
c = ARITH_AND; |
977 |
goto gen_arith64;
|
978 |
case INDEX_op_or_i64:
|
979 |
c = ARITH_OR; |
980 |
goto gen_arith64;
|
981 |
case INDEX_op_xor_i64:
|
982 |
c = ARITH_XOR; |
983 |
goto gen_arith64;
|
984 |
case INDEX_op_add_i64:
|
985 |
c = ARITH_ADD; |
986 |
gen_arith64:
|
987 |
if (const_args[2]) { |
988 |
tgen_arithi64(s, c, args[0], args[2]); |
989 |
} else {
|
990 |
tcg_out_modrm(s, 0x01 | (c << 3) | P_REXW, args[2], args[0]); |
991 |
} |
992 |
break;
|
993 |
|
994 |
case INDEX_op_mul_i32:
|
995 |
if (const_args[2]) { |
996 |
int32_t val; |
997 |
val = args[2];
|
998 |
if (val == (int8_t)val) {
|
999 |
tcg_out_modrm(s, 0x6b, args[0], args[0]); |
1000 |
tcg_out8(s, val); |
1001 |
} else {
|
1002 |
tcg_out_modrm(s, 0x69, args[0], args[0]); |
1003 |
tcg_out32(s, val); |
1004 |
} |
1005 |
} else {
|
1006 |
tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]); |
1007 |
} |
1008 |
break;
|
1009 |
case INDEX_op_mul_i64:
|
1010 |
if (const_args[2]) { |
1011 |
int32_t val; |
1012 |
val = args[2];
|
1013 |
if (val == (int8_t)val) {
|
1014 |
tcg_out_modrm(s, 0x6b | P_REXW, args[0], args[0]); |
1015 |
tcg_out8(s, val); |
1016 |
} else {
|
1017 |
tcg_out_modrm(s, 0x69 | P_REXW, args[0], args[0]); |
1018 |
tcg_out32(s, val); |
1019 |
} |
1020 |
} else {
|
1021 |
tcg_out_modrm(s, 0xaf | P_EXT | P_REXW, args[0], args[2]); |
1022 |
} |
1023 |
break;
|
1024 |
case INDEX_op_div2_i32:
|
1025 |
tcg_out_modrm(s, 0xf7, 7, args[4]); |
1026 |
break;
|
1027 |
case INDEX_op_divu2_i32:
|
1028 |
tcg_out_modrm(s, 0xf7, 6, args[4]); |
1029 |
break;
|
1030 |
case INDEX_op_div2_i64:
|
1031 |
tcg_out_modrm(s, 0xf7 | P_REXW, 7, args[4]); |
1032 |
break;
|
1033 |
case INDEX_op_divu2_i64:
|
1034 |
tcg_out_modrm(s, 0xf7 | P_REXW, 6, args[4]); |
1035 |
break;
|
1036 |
|
1037 |
case INDEX_op_shl_i32:
|
1038 |
c = SHIFT_SHL; |
1039 |
gen_shift32:
|
1040 |
if (const_args[2]) { |
1041 |
if (args[2] == 1) { |
1042 |
tcg_out_modrm(s, 0xd1, c, args[0]); |
1043 |
} else {
|
1044 |
tcg_out_modrm(s, 0xc1, c, args[0]); |
1045 |
tcg_out8(s, args[2]);
|
1046 |
} |
1047 |
} else {
|
1048 |
tcg_out_modrm(s, 0xd3, c, args[0]); |
1049 |
} |
1050 |
break;
|
1051 |
case INDEX_op_shr_i32:
|
1052 |
c = SHIFT_SHR; |
1053 |
goto gen_shift32;
|
1054 |
case INDEX_op_sar_i32:
|
1055 |
c = SHIFT_SAR; |
1056 |
goto gen_shift32;
|
1057 |
|
1058 |
case INDEX_op_shl_i64:
|
1059 |
c = SHIFT_SHL; |
1060 |
gen_shift64:
|
1061 |
if (const_args[2]) { |
1062 |
if (args[2] == 1) { |
1063 |
tcg_out_modrm(s, 0xd1 | P_REXW, c, args[0]); |
1064 |
} else {
|
1065 |
tcg_out_modrm(s, 0xc1 | P_REXW, c, args[0]); |
1066 |
tcg_out8(s, args[2]);
|
1067 |
} |
1068 |
} else {
|
1069 |
tcg_out_modrm(s, 0xd3 | P_REXW, c, args[0]); |
1070 |
} |
1071 |
break;
|
1072 |
case INDEX_op_shr_i64:
|
1073 |
c = SHIFT_SHR; |
1074 |
goto gen_shift64;
|
1075 |
case INDEX_op_sar_i64:
|
1076 |
c = SHIFT_SAR; |
1077 |
goto gen_shift64;
|
1078 |
|
1079 |
case INDEX_op_brcond_i32:
|
1080 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], |
1081 |
args[3], 0); |
1082 |
break;
|
1083 |
case INDEX_op_brcond_i64:
|
1084 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], |
1085 |
args[3], P_REXW);
|
1086 |
break;
|
1087 |
|
1088 |
case INDEX_op_bswap_i32:
|
1089 |
tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT, 0, args[0], 0); |
1090 |
break;
|
1091 |
case INDEX_op_bswap_i64:
|
1092 |
tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT | P_REXW, 0, args[0], 0); |
1093 |
break;
|
1094 |
|
1095 |
case INDEX_op_qemu_ld8u:
|
1096 |
tcg_out_qemu_ld(s, args, 0);
|
1097 |
break;
|
1098 |
case INDEX_op_qemu_ld8s:
|
1099 |
tcg_out_qemu_ld(s, args, 0 | 4); |
1100 |
break;
|
1101 |
case INDEX_op_qemu_ld16u:
|
1102 |
tcg_out_qemu_ld(s, args, 1);
|
1103 |
break;
|
1104 |
case INDEX_op_qemu_ld16s:
|
1105 |
tcg_out_qemu_ld(s, args, 1 | 4); |
1106 |
break;
|
1107 |
case INDEX_op_qemu_ld32u:
|
1108 |
tcg_out_qemu_ld(s, args, 2);
|
1109 |
break;
|
1110 |
case INDEX_op_qemu_ld32s:
|
1111 |
tcg_out_qemu_ld(s, args, 2 | 4); |
1112 |
break;
|
1113 |
case INDEX_op_qemu_ld64:
|
1114 |
tcg_out_qemu_ld(s, args, 3);
|
1115 |
break;
|
1116 |
|
1117 |
case INDEX_op_qemu_st8:
|
1118 |
tcg_out_qemu_st(s, args, 0);
|
1119 |
break;
|
1120 |
case INDEX_op_qemu_st16:
|
1121 |
tcg_out_qemu_st(s, args, 1);
|
1122 |
break;
|
1123 |
case INDEX_op_qemu_st32:
|
1124 |
tcg_out_qemu_st(s, args, 2);
|
1125 |
break;
|
1126 |
case INDEX_op_qemu_st64:
|
1127 |
tcg_out_qemu_st(s, args, 3);
|
1128 |
break;
|
1129 |
|
1130 |
default:
|
1131 |
tcg_abort(); |
1132 |
} |
1133 |
} |
1134 |
|
1135 |
static int tcg_target_callee_save_regs[] = { |
1136 |
TCG_REG_R10, |
1137 |
TCG_REG_R11, |
1138 |
TCG_REG_RBP, |
1139 |
TCG_REG_RBX, |
1140 |
TCG_REG_R12, |
1141 |
TCG_REG_R13, |
1142 |
/* TCG_REG_R14, */ /* currently used for the global env, so no |
1143 |
need to save */
|
1144 |
TCG_REG_R15, |
1145 |
}; |
1146 |
|
1147 |
static inline void tcg_out_push(TCGContext *s, int reg) |
1148 |
{ |
1149 |
tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0); |
1150 |
} |
1151 |
|
1152 |
static inline void tcg_out_pop(TCGContext *s, int reg) |
1153 |
{ |
1154 |
tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0); |
1155 |
} |
1156 |
|
1157 |
/* Generate global QEMU prologue and epilogue code */
|
1158 |
void tcg_target_qemu_prologue(TCGContext *s)
|
1159 |
{ |
1160 |
int i, frame_size, push_size, stack_addend;
|
1161 |
|
1162 |
/* TB prologue */
|
1163 |
/* save all callee saved registers */
|
1164 |
for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { |
1165 |
tcg_out_push(s, tcg_target_callee_save_regs[i]); |
1166 |
|
1167 |
} |
1168 |
/* reserve some stack space */
|
1169 |
push_size = 8 + ARRAY_SIZE(tcg_target_callee_save_regs) * 8; |
1170 |
frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE; |
1171 |
frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
|
1172 |
~(TCG_TARGET_STACK_ALIGN - 1);
|
1173 |
stack_addend = frame_size - push_size; |
1174 |
tcg_out_addi(s, TCG_REG_RSP, -stack_addend); |
1175 |
|
1176 |
tcg_out_modrm(s, 0xff, 4, TCG_REG_RDI); /* jmp *%rdi */ |
1177 |
|
1178 |
/* TB epilogue */
|
1179 |
tb_ret_addr = s->code_ptr; |
1180 |
tcg_out_addi(s, TCG_REG_RSP, stack_addend); |
1181 |
for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { |
1182 |
tcg_out_pop(s, tcg_target_callee_save_regs[i]); |
1183 |
} |
1184 |
tcg_out8(s, 0xc3); /* ret */ |
1185 |
} |
1186 |
|
1187 |
static const TCGTargetOpDef x86_64_op_defs[] = { |
1188 |
{ INDEX_op_exit_tb, { } }, |
1189 |
{ INDEX_op_goto_tb, { } }, |
1190 |
{ INDEX_op_call, { "ri" } }, /* XXX: might need a specific constant constraint */ |
1191 |
{ INDEX_op_jmp, { "ri" } }, /* XXX: might need a specific constant constraint */ |
1192 |
{ INDEX_op_br, { } }, |
1193 |
|
1194 |
{ INDEX_op_mov_i32, { "r", "r" } }, |
1195 |
{ INDEX_op_movi_i32, { "r" } },
|
1196 |
{ INDEX_op_ld8u_i32, { "r", "r" } }, |
1197 |
{ INDEX_op_ld8s_i32, { "r", "r" } }, |
1198 |
{ INDEX_op_ld16u_i32, { "r", "r" } }, |
1199 |
{ INDEX_op_ld16s_i32, { "r", "r" } }, |
1200 |
{ INDEX_op_ld_i32, { "r", "r" } }, |
1201 |
{ INDEX_op_st8_i32, { "r", "r" } }, |
1202 |
{ INDEX_op_st16_i32, { "r", "r" } }, |
1203 |
{ INDEX_op_st_i32, { "r", "r" } }, |
1204 |
|
1205 |
{ INDEX_op_add_i32, { "r", "0", "ri" } }, |
1206 |
{ INDEX_op_mul_i32, { "r", "0", "ri" } }, |
1207 |
{ INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, |
1208 |
{ INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, |
1209 |
{ INDEX_op_sub_i32, { "r", "0", "ri" } }, |
1210 |
{ INDEX_op_and_i32, { "r", "0", "ri" } }, |
1211 |
{ INDEX_op_or_i32, { "r", "0", "ri" } }, |
1212 |
{ INDEX_op_xor_i32, { "r", "0", "ri" } }, |
1213 |
|
1214 |
{ INDEX_op_shl_i32, { "r", "0", "ci" } }, |
1215 |
{ INDEX_op_shr_i32, { "r", "0", "ci" } }, |
1216 |
{ INDEX_op_sar_i32, { "r", "0", "ci" } }, |
1217 |
|
1218 |
{ INDEX_op_brcond_i32, { "r", "ri" } }, |
1219 |
|
1220 |
{ INDEX_op_mov_i64, { "r", "r" } }, |
1221 |
{ INDEX_op_movi_i64, { "r" } },
|
1222 |
{ INDEX_op_ld8u_i64, { "r", "r" } }, |
1223 |
{ INDEX_op_ld8s_i64, { "r", "r" } }, |
1224 |
{ INDEX_op_ld16u_i64, { "r", "r" } }, |
1225 |
{ INDEX_op_ld16s_i64, { "r", "r" } }, |
1226 |
{ INDEX_op_ld32u_i64, { "r", "r" } }, |
1227 |
{ INDEX_op_ld32s_i64, { "r", "r" } }, |
1228 |
{ INDEX_op_ld_i64, { "r", "r" } }, |
1229 |
{ INDEX_op_st8_i64, { "r", "r" } }, |
1230 |
{ INDEX_op_st16_i64, { "r", "r" } }, |
1231 |
{ INDEX_op_st32_i64, { "r", "r" } }, |
1232 |
{ INDEX_op_st_i64, { "r", "r" } }, |
1233 |
|
1234 |
{ INDEX_op_add_i64, { "r", "0", "re" } }, |
1235 |
{ INDEX_op_mul_i64, { "r", "0", "re" } }, |
1236 |
{ INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } }, |
1237 |
{ INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } }, |
1238 |
{ INDEX_op_sub_i64, { "r", "0", "re" } }, |
1239 |
{ INDEX_op_and_i64, { "r", "0", "reZ" } }, |
1240 |
{ INDEX_op_or_i64, { "r", "0", "re" } }, |
1241 |
{ INDEX_op_xor_i64, { "r", "0", "re" } }, |
1242 |
|
1243 |
{ INDEX_op_shl_i64, { "r", "0", "ci" } }, |
1244 |
{ INDEX_op_shr_i64, { "r", "0", "ci" } }, |
1245 |
{ INDEX_op_sar_i64, { "r", "0", "ci" } }, |
1246 |
|
1247 |
{ INDEX_op_brcond_i64, { "r", "re" } }, |
1248 |
|
1249 |
{ INDEX_op_bswap_i32, { "r", "0" } }, |
1250 |
{ INDEX_op_bswap_i64, { "r", "0" } }, |
1251 |
|
1252 |
{ INDEX_op_qemu_ld8u, { "r", "L" } }, |
1253 |
{ INDEX_op_qemu_ld8s, { "r", "L" } }, |
1254 |
{ INDEX_op_qemu_ld16u, { "r", "L" } }, |
1255 |
{ INDEX_op_qemu_ld16s, { "r", "L" } }, |
1256 |
{ INDEX_op_qemu_ld32u, { "r", "L" } }, |
1257 |
{ INDEX_op_qemu_ld32s, { "r", "L" } }, |
1258 |
{ INDEX_op_qemu_ld64, { "r", "L" } }, |
1259 |
|
1260 |
{ INDEX_op_qemu_st8, { "L", "L" } }, |
1261 |
{ INDEX_op_qemu_st16, { "L", "L" } }, |
1262 |
{ INDEX_op_qemu_st32, { "L", "L" } }, |
1263 |
{ INDEX_op_qemu_st64, { "L", "L", "L" } }, |
1264 |
|
1265 |
{ -1 },
|
1266 |
}; |
1267 |
|
1268 |
void tcg_target_init(TCGContext *s)
|
1269 |
{ |
1270 |
/* fail safe */
|
1271 |
if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) |
1272 |
tcg_abort(); |
1273 |
|
1274 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); |
1275 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); |
1276 |
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
|
1277 |
(1 << TCG_REG_RDI) |
|
1278 |
(1 << TCG_REG_RSI) |
|
1279 |
(1 << TCG_REG_RDX) |
|
1280 |
(1 << TCG_REG_RCX) |
|
1281 |
(1 << TCG_REG_R8) |
|
1282 |
(1 << TCG_REG_R9) |
|
1283 |
(1 << TCG_REG_RAX) |
|
1284 |
(1 << TCG_REG_R10) |
|
1285 |
(1 << TCG_REG_R11));
|
1286 |
|
1287 |
tcg_regset_clear(s->reserved_regs); |
1288 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RSP); |
1289 |
|
1290 |
tcg_add_target_add_op_defs(x86_64_op_defs); |
1291 |
} |