Revision 0573fbfc target-i386/op.c

b/target-i386/op.c
513 513
} UREG64;
514 514
#endif
515 515

  
516
#ifdef TARGET_X86_64
517

  
518 516
#define PARAMQ1 \
519 517
({\
520 518
    UREG64 __p;\
......
523 521
    __p.q;\
524 522
})
525 523

  
524
#ifdef TARGET_X86_64
525

  
526 526
void OPPROTO op_movq_T0_im64(void)
527 527
{
528 528
    T0 = PARAMQ1;
......
1242 1242
    helper_ltr_T0();
1243 1243
}
1244 1244

  
1245
/* CR registers access */
1245
/* CR registers access. */
1246 1246
void OPPROTO op_movl_crN_T0(void)
1247 1247
{
1248 1248
    helper_movl_crN_T0(PARAM1);
1249 1249
}
1250 1250

  
1251
/* These pseudo-opcodes check for SVM intercepts. */
1252
void OPPROTO op_svm_check_intercept(void)
1253
{
1254
    A0 = PARAM1 & PARAM2;
1255
    svm_check_intercept(PARAMQ1);
1256
}
1257

  
1258
void OPPROTO op_svm_check_intercept_param(void)
1259
{
1260
    A0 = PARAM1 & PARAM2;
1261
    svm_check_intercept_param(PARAMQ1, T1);
1262
}
1263

  
1264
void OPPROTO op_svm_vmexit(void)
1265
{
1266
    A0 = PARAM1 & PARAM2;
1267
    vmexit(PARAMQ1, T1);
1268
}
1269

  
1270
void OPPROTO op_geneflags(void)
1271
{
1272
    CC_SRC = cc_table[CC_OP].compute_all();
1273
}
1274

  
1275
/* This pseudo-opcode checks for IO intercepts. */
1276
#if !defined(CONFIG_USER_ONLY)
1277
void OPPROTO op_svm_check_intercept_io(void)
1278
{
1279
    A0 = PARAM1 & PARAM2;
1280
    /* PARAMQ1 = TYPE (0 = OUT, 1 = IN; 4 = STRING; 8 = REP)
1281
       T0      = PORT
1282
       T1      = next eip */
1283
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), T1);
1284
    /* ASIZE does not appear on real hw */
1285
    svm_check_intercept_param(SVM_EXIT_IOIO,
1286
                              (PARAMQ1 & ~SVM_IOIO_ASIZE_MASK) |
1287
                              ((T0 & 0xffff) << 16));
1288
}
1289
#endif
1290

  
1251 1291
#if !defined(CONFIG_USER_ONLY)
1252 1292
void OPPROTO op_movtl_T0_cr8(void)
1253 1293
{
......
2452 2492

  
2453 2493
#define SHIFT 1
2454 2494
#include "ops_sse.h"
2495

  
2496
/* Secure Virtual Machine ops */
2497

  
2498
void OPPROTO op_vmrun(void)
2499
{
2500
    helper_vmrun(EAX);
2501
}
2502

  
2503
void OPPROTO op_vmmcall(void)
2504
{
2505
    helper_vmmcall();
2506
}
2507

  
2508
void OPPROTO op_vmload(void)
2509
{
2510
    helper_vmload(EAX);
2511
}
2512

  
2513
void OPPROTO op_vmsave(void)
2514
{
2515
    helper_vmsave(EAX);
2516
}
2517

  
2518
void OPPROTO op_stgi(void)
2519
{
2520
    helper_stgi();
2521
}
2522

  
2523
void OPPROTO op_clgi(void)
2524
{
2525
    helper_clgi();
2526
}
2527

  
2528
void OPPROTO op_skinit(void)
2529
{
2530
    helper_skinit();
2531
}
2532

  
2533
void OPPROTO op_invlpga(void)
2534
{
2535
    helper_invlpga();
2536
}

Also available in: Unified diff