Revision f1665b21
b/kvm-all.c | ||
---|---|---|
71 | 71 |
#endif |
72 | 72 |
int irqchip_in_kernel; |
73 | 73 |
int pit_in_kernel; |
74 |
int xsave, xcrs; |
|
74 | 75 |
}; |
75 | 76 |
|
76 | 77 |
static KVMState *kvm_state; |
... | ... | |
686 | 687 |
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); |
687 | 688 |
#endif |
688 | 689 |
|
690 |
s->xsave = 0; |
|
691 |
#ifdef KVM_CAP_XSAVE |
|
692 |
s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); |
|
693 |
#endif |
|
694 |
|
|
695 |
s->xcrs = 0; |
|
696 |
#ifdef KVM_CAP_XCRS |
|
697 |
s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); |
|
698 |
#endif |
|
699 |
|
|
689 | 700 |
ret = kvm_arch_init(s, smp_cpus); |
690 | 701 |
if (ret < 0) |
691 | 702 |
goto err; |
... | ... | |
1014 | 1025 |
return kvm_state->debugregs; |
1015 | 1026 |
} |
1016 | 1027 |
|
1028 |
int kvm_has_xsave(void) |
|
1029 |
{ |
|
1030 |
return kvm_state->xsave; |
|
1031 |
} |
|
1032 |
|
|
1033 |
int kvm_has_xcrs(void) |
|
1034 |
{ |
|
1035 |
return kvm_state->xcrs; |
|
1036 |
} |
|
1037 |
|
|
1017 | 1038 |
void kvm_setup_guest_memory(void *start, size_t size) |
1018 | 1039 |
{ |
1019 | 1040 |
if (!kvm_has_sync_mmu()) { |
b/kvm.h | ||
---|---|---|
40 | 40 |
int kvm_has_vcpu_events(void); |
41 | 41 |
int kvm_has_robust_singlestep(void); |
42 | 42 |
int kvm_has_debugregs(void); |
43 |
int kvm_has_xsave(void); |
|
44 |
int kvm_has_xcrs(void); |
|
43 | 45 |
|
44 | 46 |
#ifdef NEED_CPU_H |
45 | 47 |
int kvm_init_vcpu(CPUState *env); |
b/target-i386/cpu.h | ||
---|---|---|
718 | 718 |
uint16_t fpus_vmstate; |
719 | 719 |
uint16_t fptag_vmstate; |
720 | 720 |
uint16_t fpregs_format_vmstate; |
721 |
|
|
722 |
uint64_t xstate_bv; |
|
723 |
XMMReg ymmh_regs[CPU_NB_REGS]; |
|
724 |
|
|
725 |
uint64_t xcr0; |
|
721 | 726 |
} CPUX86State; |
722 | 727 |
|
723 | 728 |
CPUX86State *cpu_x86_init(const char *cpu_model); |
... | ... | |
899 | 904 |
#define cpu_list_id x86_cpu_list |
900 | 905 |
#define cpudef_setup x86_cpudef_setup |
901 | 906 |
|
902 |
#define CPU_SAVE_VERSION 11
|
|
907 |
#define CPU_SAVE_VERSION 12
|
|
903 | 908 |
|
904 | 909 |
/* MMU modes definitions */ |
905 | 910 |
#define MMU_MODE0_SUFFIX _kernel |
b/target-i386/kvm.c | ||
---|---|---|
497 | 497 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu); |
498 | 498 |
} |
499 | 499 |
|
500 |
#ifdef KVM_CAP_XSAVE |
|
501 |
#define XSAVE_CWD_RIP 2 |
|
502 |
#define XSAVE_CWD_RDP 4 |
|
503 |
#define XSAVE_MXCSR 6 |
|
504 |
#define XSAVE_ST_SPACE 8 |
|
505 |
#define XSAVE_XMM_SPACE 40 |
|
506 |
#define XSAVE_XSTATE_BV 128 |
|
507 |
#define XSAVE_YMMH_SPACE 144 |
|
508 |
#endif |
|
509 |
|
|
510 |
static int kvm_put_xsave(CPUState *env) |
|
511 |
{ |
|
512 |
#ifdef KVM_CAP_XSAVE |
|
513 |
int i; |
|
514 |
struct kvm_xsave* xsave; |
|
515 |
uint16_t cwd, swd, twd, fop; |
|
516 |
|
|
517 |
if (!kvm_has_xsave()) |
|
518 |
return kvm_put_fpu(env); |
|
519 |
|
|
520 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
|
521 |
memset(xsave, 0, sizeof(struct kvm_xsave)); |
|
522 |
cwd = swd = twd = fop = 0; |
|
523 |
swd = env->fpus & ~(7 << 11); |
|
524 |
swd |= (env->fpstt & 7) << 11; |
|
525 |
cwd = env->fpuc; |
|
526 |
for (i = 0; i < 8; ++i) |
|
527 |
twd |= (!env->fptags[i]) << i; |
|
528 |
xsave->region[0] = (uint32_t)(swd << 16) + cwd; |
|
529 |
xsave->region[1] = (uint32_t)(fop << 16) + twd; |
|
530 |
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, |
|
531 |
sizeof env->fpregs); |
|
532 |
memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, |
|
533 |
sizeof env->xmm_regs); |
|
534 |
xsave->region[XSAVE_MXCSR] = env->mxcsr; |
|
535 |
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; |
|
536 |
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, |
|
537 |
sizeof env->ymmh_regs); |
|
538 |
return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); |
|
539 |
#else |
|
540 |
return kvm_put_fpu(env); |
|
541 |
#endif |
|
542 |
} |
|
543 |
|
|
544 |
static int kvm_put_xcrs(CPUState *env) |
|
545 |
{ |
|
546 |
#ifdef KVM_CAP_XCRS |
|
547 |
struct kvm_xcrs xcrs; |
|
548 |
|
|
549 |
if (!kvm_has_xcrs()) |
|
550 |
return 0; |
|
551 |
|
|
552 |
xcrs.nr_xcrs = 1; |
|
553 |
xcrs.flags = 0; |
|
554 |
xcrs.xcrs[0].xcr = 0; |
|
555 |
xcrs.xcrs[0].value = env->xcr0; |
|
556 |
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs); |
|
557 |
#else |
|
558 |
return 0; |
|
559 |
#endif |
|
560 |
} |
|
561 |
|
|
500 | 562 |
static int kvm_put_sregs(CPUState *env) |
501 | 563 |
{ |
502 | 564 |
struct kvm_sregs sregs; |
... | ... | |
614 | 676 |
return 0; |
615 | 677 |
} |
616 | 678 |
|
679 |
static int kvm_get_xsave(CPUState *env) |
|
680 |
{ |
|
681 |
#ifdef KVM_CAP_XSAVE |
|
682 |
struct kvm_xsave* xsave; |
|
683 |
int ret, i; |
|
684 |
uint16_t cwd, swd, twd, fop; |
|
685 |
|
|
686 |
if (!kvm_has_xsave()) |
|
687 |
return kvm_get_fpu(env); |
|
688 |
|
|
689 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
|
690 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); |
|
691 |
if (ret < 0) |
|
692 |
return ret; |
|
693 |
|
|
694 |
cwd = (uint16_t)xsave->region[0]; |
|
695 |
swd = (uint16_t)(xsave->region[0] >> 16); |
|
696 |
twd = (uint16_t)xsave->region[1]; |
|
697 |
fop = (uint16_t)(xsave->region[1] >> 16); |
|
698 |
env->fpstt = (swd >> 11) & 7; |
|
699 |
env->fpus = swd; |
|
700 |
env->fpuc = cwd; |
|
701 |
for (i = 0; i < 8; ++i) |
|
702 |
env->fptags[i] = !((twd >> i) & 1); |
|
703 |
env->mxcsr = xsave->region[XSAVE_MXCSR]; |
|
704 |
memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], |
|
705 |
sizeof env->fpregs); |
|
706 |
memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], |
|
707 |
sizeof env->xmm_regs); |
|
708 |
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; |
|
709 |
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], |
|
710 |
sizeof env->ymmh_regs); |
|
711 |
return 0; |
|
712 |
#else |
|
713 |
return kvm_get_fpu(env); |
|
714 |
#endif |
|
715 |
} |
|
716 |
|
|
717 |
static int kvm_get_xcrs(CPUState *env) |
|
718 |
{ |
|
719 |
#ifdef KVM_CAP_XCRS |
|
720 |
int i, ret; |
|
721 |
struct kvm_xcrs xcrs; |
|
722 |
|
|
723 |
if (!kvm_has_xcrs()) |
|
724 |
return 0; |
|
725 |
|
|
726 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); |
|
727 |
if (ret < 0) |
|
728 |
return ret; |
|
729 |
|
|
730 |
for (i = 0; i < xcrs.nr_xcrs; i++) |
|
731 |
/* Only support xcr0 now */ |
|
732 |
if (xcrs.xcrs[0].xcr == 0) { |
|
733 |
env->xcr0 = xcrs.xcrs[0].value; |
|
734 |
break; |
|
735 |
} |
|
736 |
return 0; |
|
737 |
#else |
|
738 |
return 0; |
|
739 |
#endif |
|
740 |
} |
|
741 |
|
|
617 | 742 |
static int kvm_get_sregs(CPUState *env) |
618 | 743 |
{ |
619 | 744 |
struct kvm_sregs sregs; |
... | ... | |
958 | 1083 |
if (ret < 0) |
959 | 1084 |
return ret; |
960 | 1085 |
|
961 |
ret = kvm_put_fpu(env); |
|
1086 |
ret = kvm_put_xsave(env); |
|
1087 |
if (ret < 0) |
|
1088 |
return ret; |
|
1089 |
|
|
1090 |
ret = kvm_put_xcrs(env); |
|
962 | 1091 |
if (ret < 0) |
963 | 1092 |
return ret; |
964 | 1093 |
|
... | ... | |
1002 | 1131 |
if (ret < 0) |
1003 | 1132 |
return ret; |
1004 | 1133 |
|
1005 |
ret = kvm_get_fpu(env); |
|
1134 |
ret = kvm_get_xsave(env); |
|
1135 |
if (ret < 0) |
|
1136 |
return ret; |
|
1137 |
|
|
1138 |
ret = kvm_get_xcrs(env); |
|
1006 | 1139 |
if (ret < 0) |
1007 | 1140 |
return ret; |
1008 | 1141 |
|
... | ... | |
1290 | 1423 |
(len_code[hw_breakpoint[n].len] << (18 + n*4)); |
1291 | 1424 |
} |
1292 | 1425 |
} |
1426 |
/* Legal xcr0 for loading */ |
|
1427 |
env->xcr0 = 1; |
|
1293 | 1428 |
} |
1294 | 1429 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
1295 | 1430 |
|
b/target-i386/machine.c | ||
---|---|---|
47 | 47 |
#define VMSTATE_XMM_REGS(_field, _state, _n) \ |
48 | 48 |
VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg) |
49 | 49 |
|
50 |
/* YMMH format is the same as XMM */ |
|
51 |
static const VMStateDescription vmstate_ymmh_reg = { |
|
52 |
.name = "ymmh_reg", |
|
53 |
.version_id = 1, |
|
54 |
.minimum_version_id = 1, |
|
55 |
.minimum_version_id_old = 1, |
|
56 |
.fields = (VMStateField []) { |
|
57 |
VMSTATE_UINT64(XMM_Q(0), XMMReg), |
|
58 |
VMSTATE_UINT64(XMM_Q(1), XMMReg), |
|
59 |
VMSTATE_END_OF_LIST() |
|
60 |
} |
|
61 |
}; |
|
62 |
|
|
63 |
#define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \ |
|
64 |
VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg) |
|
65 |
|
|
50 | 66 |
static const VMStateDescription vmstate_mtrr_var = { |
51 | 67 |
.name = "mtrr_var", |
52 | 68 |
.version_id = 1, |
... | ... | |
453 | 469 |
/* KVM pvclock msr */ |
454 | 470 |
VMSTATE_UINT64_V(system_time_msr, CPUState, 11), |
455 | 471 |
VMSTATE_UINT64_V(wall_clock_msr, CPUState, 11), |
472 |
/* XSAVE related fields */ |
|
473 |
VMSTATE_UINT64_V(xcr0, CPUState, 12), |
|
474 |
VMSTATE_UINT64_V(xstate_bv, CPUState, 12), |
|
475 |
VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12), |
|
456 | 476 |
VMSTATE_END_OF_LIST() |
457 | 477 |
/* The above list is not sorted /wrt version numbers, watch out! */ |
458 | 478 |
} |
Also available in: Unified diff