Revision b9bec74b target-i386/kvm.c

b/target-i386/kvm.c
150 150

  
151 151
#ifdef CONFIG_KVM_PARA
152 152
struct kvm_para_features {
153
        int cap;
154
        int feature;
153
    int cap;
154
    int feature;
155 155
} para_features[] = {
156 156
#ifdef KVM_CAP_CLOCKSOURCE
157
        { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
157
    { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
158 158
#endif
159 159
#ifdef KVM_CAP_NOP_IO_DELAY
160
        { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
160
    { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
161 161
#endif
162 162
#ifdef KVM_CAP_PV_MMU
163
        { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
163
    { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
164 164
#endif
165 165
#ifdef KVM_CAP_ASYNC_PF
166
        { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
166
    { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
167 167
#endif
168
        { -1, -1 }
168
    { -1, -1 }
169 169
};
170 170

  
171 171
static int get_para_features(CPUState *env)
172 172
{
173
        int i, features = 0;
173
    int i, features = 0;
174 174

  
175
        for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
176
                if (kvm_check_extension(env->kvm_state, para_features[i].cap))
177
                        features |= (1 << para_features[i].feature);
175
    for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
176
        if (kvm_check_extension(env->kvm_state, para_features[i].cap)) {
177
            features |= (1 << para_features[i].feature);
178 178
        }
179

  
180
        return features;
179
    }
180
    return features;
181 181
}
182 182
#endif
183 183

  
......
389 389
                c->index = j;
390 390
                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
391 391

  
392
                if (i == 4 && c->eax == 0)
392
                if (i == 4 && c->eax == 0) {
393 393
                    break;
394
                if (i == 0xb && !(c->ecx & 0xff00))
394
                }
395
                if (i == 0xb && !(c->ecx & 0xff00)) {
395 396
                    break;
396
                if (i == 0xd && c->eax == 0)
397
                }
398
                if (i == 0xd && c->eax == 0) {
397 399
                    break;
398

  
400
                }
399 401
                c = &cpuid_data.entries[cpuid_i++];
400 402
            }
401 403
            break;
......
425 427
        uint64_t mcg_cap;
426 428
        int banks;
427 429

  
428
        if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks))
430
        if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks)) {
429 431
            perror("kvm_get_mce_cap_supported FAILED");
430
        else {
432
        } else {
431 433
            if (banks > MCE_BANKS_DEF)
432 434
                banks = MCE_BANKS_DEF;
433 435
            mcg_cap &= MCE_CAP_DEF;
434 436
            mcg_cap |= banks;
435
            if (kvm_setup_mce(env, &mcg_cap))
437
            if (kvm_setup_mce(env, &mcg_cap)) {
436 438
                perror("kvm_setup_mce FAILED");
437
            else
439
            } else {
438 440
                env->mcg_cap = mcg_cap;
441
            }
439 442
        }
440 443
    }
441 444
#endif
......
577 580

  
578 581
    return kvm_init_identity_map_page(s);
579 582
}
580
                    
583

  
581 584
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
582 585
{
583 586
    lhs->selector = rhs->selector;
......
616 619
    lhs->selector = rhs->selector;
617 620
    lhs->base = rhs->base;
618 621
    lhs->limit = rhs->limit;
619
    lhs->flags =
620
	(rhs->type << DESC_TYPE_SHIFT)
621
	| (rhs->present * DESC_P_MASK)
622
	| (rhs->dpl << DESC_DPL_SHIFT)
623
	| (rhs->db << DESC_B_SHIFT)
624
	| (rhs->s * DESC_S_MASK)
625
	| (rhs->l << DESC_L_SHIFT)
626
	| (rhs->g * DESC_G_MASK)
627
	| (rhs->avl * DESC_AVL_MASK);
622
    lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
623
                 (rhs->present * DESC_P_MASK) |
624
                 (rhs->dpl << DESC_DPL_SHIFT) |
625
                 (rhs->db << DESC_B_SHIFT) |
626
                 (rhs->s * DESC_S_MASK) |
627
                 (rhs->l << DESC_L_SHIFT) |
628
                 (rhs->g * DESC_G_MASK) |
629
                 (rhs->avl * DESC_AVL_MASK);
628 630
}
629 631

  
630 632
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
631 633
{
632
    if (set)
634
    if (set) {
633 635
        *kvm_reg = *qemu_reg;
634
    else
636
    } else {
635 637
        *qemu_reg = *kvm_reg;
638
    }
636 639
}
637 640

  
638 641
static int kvm_getput_regs(CPUState *env, int set)
......
642 645

  
643 646
    if (!set) {
644 647
        ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
645
        if (ret < 0)
648
        if (ret < 0) {
646 649
            return ret;
650
        }
647 651
    }
648 652

  
649 653
    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
......
668 672
    kvm_getput_reg(&regs.rflags, &env->eflags, set);
669 673
    kvm_getput_reg(&regs.rip, &env->eip, set);
670 674

  
671
    if (set)
675
    if (set) {
672 676
        ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
677
    }
673 678

  
674 679
    return ret;
675 680
}
......
683 688
    fpu.fsw = env->fpus & ~(7 << 11);
684 689
    fpu.fsw |= (env->fpstt & 7) << 11;
685 690
    fpu.fcw = env->fpuc;
686
    for (i = 0; i < 8; ++i)
687
	fpu.ftwx |= (!env->fptags[i]) << i;
691
    for (i = 0; i < 8; ++i) {
692
        fpu.ftwx |= (!env->fptags[i]) << i;
693
    }
688 694
    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
689 695
    memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
690 696
    fpu.mxcsr = env->mxcsr;
......
709 715
    struct kvm_xsave* xsave;
710 716
    uint16_t cwd, swd, twd, fop;
711 717

  
712
    if (!kvm_has_xsave())
718
    if (!kvm_has_xsave()) {
713 719
        return kvm_put_fpu(env);
720
    }
714 721

  
715 722
    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
716 723
    memset(xsave, 0, sizeof(struct kvm_xsave));
......
718 725
    swd = env->fpus & ~(7 << 11);
719 726
    swd |= (env->fpstt & 7) << 11;
720 727
    cwd = env->fpuc;
721
    for (i = 0; i < 8; ++i)
728
    for (i = 0; i < 8; ++i) {
722 729
        twd |= (!env->fptags[i]) << i;
730
    }
723 731
    xsave->region[0] = (uint32_t)(swd << 16) + cwd;
724 732
    xsave->region[1] = (uint32_t)(fop << 16) + twd;
725 733
    memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
......
743 751
#ifdef KVM_CAP_XCRS
744 752
    struct kvm_xcrs xcrs;
745 753

  
746
    if (!kvm_has_xcrs())
754
    if (!kvm_has_xcrs()) {
747 755
        return 0;
756
    }
748 757

  
749 758
    xcrs.nr_xcrs = 1;
750 759
    xcrs.flags = 0;
......
767 776
    }
768 777

  
769 778
    if ((env->eflags & VM_MASK)) {
770
	    set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
771
	    set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
772
	    set_v8086_seg(&sregs.es, &env->segs[R_ES]);
773
	    set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
774
	    set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
775
	    set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
779
        set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
780
        set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
781
        set_v8086_seg(&sregs.es, &env->segs[R_ES]);
782
        set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
783
        set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
784
        set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
776 785
    } else {
777
	    set_seg(&sregs.cs, &env->segs[R_CS]);
778
	    set_seg(&sregs.ds, &env->segs[R_DS]);
779
	    set_seg(&sregs.es, &env->segs[R_ES]);
780
	    set_seg(&sregs.fs, &env->segs[R_FS]);
781
	    set_seg(&sregs.gs, &env->segs[R_GS]);
782
	    set_seg(&sregs.ss, &env->segs[R_SS]);
786
        set_seg(&sregs.cs, &env->segs[R_CS]);
787
        set_seg(&sregs.ds, &env->segs[R_DS]);
788
        set_seg(&sregs.es, &env->segs[R_ES]);
789
        set_seg(&sregs.fs, &env->segs[R_FS]);
790
        set_seg(&sregs.gs, &env->segs[R_GS]);
791
        set_seg(&sregs.ss, &env->segs[R_SS]);
783 792
    }
784 793

  
785 794
    set_seg(&sregs.tr, &env->tr);
......
822 831
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
823 832
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
824 833
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
825
    if (kvm_has_msr_star(env))
826
	kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
827
    if (kvm_has_msr_hsave_pa(env))
834
    if (kvm_has_msr_star(env)) {
835
        kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
836
    }
837
    if (kvm_has_msr_hsave_pa(env)) {
828 838
        kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
839
    }
829 840
#ifdef TARGET_X86_64
830 841
    if (lm_capable_kernel) {
831 842
        kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
......
854 865
#ifdef KVM_CAP_MCE
855 866
    if (env->mcg_cap) {
856 867
        int i;
857
        if (level == KVM_PUT_RESET_STATE)
868

  
869
        if (level == KVM_PUT_RESET_STATE) {
858 870
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
859
        else if (level == KVM_PUT_FULL_STATE) {
871
        } else if (level == KVM_PUT_FULL_STATE) {
860 872
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
861 873
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
862
            for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
874
            for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
863 875
                kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
876
            }
864 877
        }
865 878
    }
866 879
#endif
......
878 891
    int i, ret;
879 892

  
880 893
    ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
881
    if (ret < 0)
894
    if (ret < 0) {
882 895
        return ret;
896
    }
883 897

  
884 898
    env->fpstt = (fpu.fsw >> 11) & 7;
885 899
    env->fpus = fpu.fsw;
886 900
    env->fpuc = fpu.fcw;
887
    for (i = 0; i < 8; ++i)
888
	env->fptags[i] = !((fpu.ftwx >> i) & 1);
901
    for (i = 0; i < 8; ++i) {
902
        env->fptags[i] = !((fpu.ftwx >> i) & 1);
903
    }
889 904
    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
890 905
    memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
891 906
    env->mxcsr = fpu.mxcsr;
......
900 915
    int ret, i;
901 916
    uint16_t cwd, swd, twd, fop;
902 917

  
903
    if (!kvm_has_xsave())
918
    if (!kvm_has_xsave()) {
904 919
        return kvm_get_fpu(env);
920
    }
905 921

  
906 922
    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
907 923
    ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
......
917 933
    env->fpstt = (swd >> 11) & 7;
918 934
    env->fpus = swd;
919 935
    env->fpuc = cwd;
920
    for (i = 0; i < 8; ++i)
936
    for (i = 0; i < 8; ++i) {
921 937
        env->fptags[i] = !((twd >> i) & 1);
938
    }
922 939
    env->mxcsr = xsave->region[XSAVE_MXCSR];
923 940
    memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
924 941
            sizeof env->fpregs);
......
940 957
    int i, ret;
941 958
    struct kvm_xcrs xcrs;
942 959

  
943
    if (!kvm_has_xcrs())
960
    if (!kvm_has_xcrs()) {
944 961
        return 0;
962
    }
945 963

  
946 964
    ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
947
    if (ret < 0)
965
    if (ret < 0) {
948 966
        return ret;
967
    }
949 968

  
950
    for (i = 0; i < xcrs.nr_xcrs; i++)
969
    for (i = 0; i < xcrs.nr_xcrs; i++) {
951 970
        /* Only support xcr0 now */
952 971
        if (xcrs.xcrs[0].xcr == 0) {
953 972
            env->xcr0 = xcrs.xcrs[0].value;
954 973
            break;
955 974
        }
975
    }
956 976
    return 0;
957 977
#else
958 978
    return 0;
......
966 986
    int bit, i, ret;
967 987

  
968 988
    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
969
    if (ret < 0)
989
    if (ret < 0) {
970 990
        return ret;
991
    }
971 992

  
972 993
    /* There can only be one pending IRQ set in the bitmap at a time, so try
973 994
       to find it and save its number instead (-1 for none). */
......
1005 1026
    env->efer = sregs.efer;
1006 1027
    //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
1007 1028

  
1008
#define HFLAG_COPY_MASK ~( \
1009
			HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1010
			HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1011
			HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1012
			HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1013

  
1014

  
1029
#define HFLAG_COPY_MASK \
1030
    ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1031
       HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1032
       HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1033
       HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1015 1034

  
1016 1035
    hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1017 1036
    hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1018 1037
    hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1019
	    (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1038
                (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1020 1039
    hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1021 1040
    hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1022
	    (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1041
                (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1023 1042

  
1024 1043
    if (env->efer & MSR_EFER_LMA) {
1025 1044
        hflags |= HF_LMA_MASK;
......
1029 1048
        hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1030 1049
    } else {
1031 1050
        hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1032
		(DESC_B_SHIFT - HF_CS32_SHIFT);
1051
                    (DESC_B_SHIFT - HF_CS32_SHIFT);
1033 1052
        hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1034
		(DESC_B_SHIFT - HF_SS32_SHIFT);
1035
        if (!(env->cr[0] & CR0_PE_MASK) ||
1036
                   (env->eflags & VM_MASK) ||
1037
                   !(hflags & HF_CS32_MASK)) {
1038
                hflags |= HF_ADDSEG_MASK;
1039
            } else {
1040
                hflags |= ((env->segs[R_DS].base |
1041
                                env->segs[R_ES].base |
1042
                                env->segs[R_SS].base) != 0) <<
1043
                    HF_ADDSEG_SHIFT;
1044
            }
1053
                    (DESC_B_SHIFT - HF_SS32_SHIFT);
1054
        if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1055
            !(hflags & HF_CS32_MASK)) {
1056
            hflags |= HF_ADDSEG_MASK;
1057
        } else {
1058
            hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1059
                        env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1060
        }
1045 1061
    }
1046 1062
    env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1047 1063

  
......
1061 1077
    msrs[n++].index = MSR_IA32_SYSENTER_CS;
1062 1078
    msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1063 1079
    msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1064
    if (kvm_has_msr_star(env))
1065
	msrs[n++].index = MSR_STAR;
1066
    if (kvm_has_msr_hsave_pa(env))
1080
    if (kvm_has_msr_star(env)) {
1081
        msrs[n++].index = MSR_STAR;
1082
    }
1083
    if (kvm_has_msr_hsave_pa(env)) {
1067 1084
        msrs[n++].index = MSR_VM_HSAVE_PA;
1085
    }
1068 1086
    msrs[n++].index = MSR_IA32_TSC;
1069 1087
#ifdef TARGET_X86_64
1070 1088
    if (lm_capable_kernel) {
......
1084 1102
    if (env->mcg_cap) {
1085 1103
        msrs[n++].index = MSR_MCG_STATUS;
1086 1104
        msrs[n++].index = MSR_MCG_CTL;
1087
        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
1105
        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1088 1106
            msrs[n++].index = MSR_MC0_CTL + i;
1107
        }
1089 1108
    }
1090 1109
#endif
1091 1110

  
1092 1111
    msr_data.info.nmsrs = n;
1093 1112
    ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
1094
    if (ret < 0)
1113
    if (ret < 0) {
1095 1114
        return ret;
1115
    }
1096 1116

  
1097 1117
    for (i = 0; i < ret; i++) {
1098 1118
        switch (msrs[i].index) {
......
1320 1340

  
1321 1341
    ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1322 1342
    if (ret < 0) {
1323
       return ret;
1343
        return ret;
1324 1344
    }
1325 1345
    for (i = 0; i < 4; i++) {
1326 1346
        env->dr[i] = dbgregs.db[i];
......
1339 1359
    assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1340 1360

  
1341 1361
    ret = kvm_getput_regs(env, 1);
1342
    if (ret < 0)
1362
    if (ret < 0) {
1343 1363
        return ret;
1344

  
1364
    }
1345 1365
    ret = kvm_put_xsave(env);
1346
    if (ret < 0)
1366
    if (ret < 0) {
1347 1367
        return ret;
1348

  
1368
    }
1349 1369
    ret = kvm_put_xcrs(env);
1350
    if (ret < 0)
1370
    if (ret < 0) {
1351 1371
        return ret;
1352

  
1372
    }
1353 1373
    ret = kvm_put_sregs(env);
1354
    if (ret < 0)
1374
    if (ret < 0) {
1355 1375
        return ret;
1356

  
1376
    }
1357 1377
    ret = kvm_put_msrs(env, level);
1358
    if (ret < 0)
1378
    if (ret < 0) {
1359 1379
        return ret;
1360

  
1380
    }
1361 1381
    if (level >= KVM_PUT_RESET_STATE) {
1362 1382
        ret = kvm_put_mp_state(env);
1363
        if (ret < 0)
1383
        if (ret < 0) {
1364 1384
            return ret;
1385
        }
1365 1386
    }
1366

  
1367 1387
    ret = kvm_put_vcpu_events(env, level);
1368
    if (ret < 0)
1388
    if (ret < 0) {
1369 1389
        return ret;
1370

  
1390
    }
1371 1391
    /* must be last */
1372 1392
    ret = kvm_guest_debug_workarounds(env);
1373
    if (ret < 0)
1393
    if (ret < 0) {
1374 1394
        return ret;
1375

  
1395
    }
1376 1396
    ret = kvm_put_debugregs(env);
1377
    if (ret < 0)
1397
    if (ret < 0) {
1378 1398
        return ret;
1379

  
1399
    }
1380 1400
    return 0;
1381 1401
}
1382 1402

  
......
1387 1407
    assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1388 1408

  
1389 1409
    ret = kvm_getput_regs(env, 0);
1390
    if (ret < 0)
1410
    if (ret < 0) {
1391 1411
        return ret;
1392

  
1412
    }
1393 1413
    ret = kvm_get_xsave(env);
1394
    if (ret < 0)
1414
    if (ret < 0) {
1395 1415
        return ret;
1396

  
1416
    }
1397 1417
    ret = kvm_get_xcrs(env);
1398
    if (ret < 0)
1418
    if (ret < 0) {
1399 1419
        return ret;
1400

  
1420
    }
1401 1421
    ret = kvm_get_sregs(env);
1402
    if (ret < 0)
1422
    if (ret < 0) {
1403 1423
        return ret;
1404

  
1424
    }
1405 1425
    ret = kvm_get_msrs(env);
1406
    if (ret < 0)
1426
    if (ret < 0) {
1407 1427
        return ret;
1408

  
1428
    }
1409 1429
    ret = kvm_get_mp_state(env);
1410
    if (ret < 0)
1430
    if (ret < 0) {
1411 1431
        return ret;
1412

  
1432
    }
1413 1433
    ret = kvm_get_vcpu_events(env);
1414
    if (ret < 0)
1434
    if (ret < 0) {
1415 1435
        return ret;
1416

  
1436
    }
1417 1437
    ret = kvm_get_debugregs(env);
1418
    if (ret < 0)
1438
    if (ret < 0) {
1419 1439
        return ret;
1420

  
1440
    }
1421 1441
    return 0;
1422 1442
}
1423 1443

  
......
1451 1471
     * interrupt, request an interrupt window exit.  This will
1452 1472
     * cause a return to userspace as soon as the guest is ready to
1453 1473
     * receive interrupts. */
1454
    if ((env->interrupt_request & CPU_INTERRUPT_HARD))
1474
    if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
1455 1475
        run->request_interrupt_window = 1;
1456
    else
1476
    } else {
1457 1477
        run->request_interrupt_window = 0;
1478
    }
1458 1479

  
1459 1480
    DPRINTF("setting tpr\n");
1460 1481
    run->cr8 = cpu_get_apic_tpr(env->apic_state);
......
1464 1485

  
1465 1486
int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
1466 1487
{
1467
    if (run->if_flag)
1488
    if (run->if_flag) {
1468 1489
        env->eflags |= IF_MASK;
1469
    else
1490
    } else {
1470 1491
        env->eflags &= ~IF_MASK;
1471
    
1492
    }
1472 1493
    cpu_set_apic_tpr(env->apic_state, run->cr8);
1473 1494
    cpu_set_apic_base(env->apic_state, run->apic_base);
1474 1495

  
......
1524 1545
    static const uint8_t int3 = 0xcc;
1525 1546

  
1526 1547
    if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
1527
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
1548
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
1528 1549
        return -EINVAL;
1550
    }
1529 1551
    return 0;
1530 1552
}
1531 1553

  
......
1534 1556
    uint8_t int3;
1535 1557

  
1536 1558
    if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
1537
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
1559
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
1538 1560
        return -EINVAL;
1561
    }
1539 1562
    return 0;
1540 1563
}
1541 1564

  
......
1551 1574
{
1552 1575
    int n;
1553 1576

  
1554
    for (n = 0; n < nb_hw_breakpoint; n++)
1577
    for (n = 0; n < nb_hw_breakpoint; n++) {
1555 1578
        if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1556
            (hw_breakpoint[n].len == len || len == -1))
1579
            (hw_breakpoint[n].len == len || len == -1)) {
1557 1580
            return n;
1581
        }
1582
    }
1558 1583
    return -1;
1559 1584
}
1560 1585

  
......
1573 1598
        case 2:
1574 1599
        case 4:
1575 1600
        case 8:
1576
            if (addr & (len - 1))
1601
            if (addr & (len - 1)) {
1577 1602
                return -EINVAL;
1603
            }
1578 1604
            break;
1579 1605
        default:
1580 1606
            return -EINVAL;
......
1584 1610
        return -ENOSYS;
1585 1611
    }
1586 1612

  
1587
    if (nb_hw_breakpoint == 4)
1613
    if (nb_hw_breakpoint == 4) {
1588 1614
        return -ENOBUFS;
1589

  
1590
    if (find_hw_breakpoint(addr, len, type) >= 0)
1615
    }
1616
    if (find_hw_breakpoint(addr, len, type) >= 0) {
1591 1617
        return -EEXIST;
1592

  
1618
    }
1593 1619
    hw_breakpoint[nb_hw_breakpoint].addr = addr;
1594 1620
    hw_breakpoint[nb_hw_breakpoint].len = len;
1595 1621
    hw_breakpoint[nb_hw_breakpoint].type = type;
......
1604 1630
    int n;
1605 1631

  
1606 1632
    n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1607
    if (n < 0)
1633
    if (n < 0) {
1608 1634
        return -ENOENT;
1609

  
1635
    }
1610 1636
    nb_hw_breakpoint--;
1611 1637
    hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1612 1638

  
......
1627 1653

  
1628 1654
    if (arch_info->exception == 1) {
1629 1655
        if (arch_info->dr6 & (1 << 14)) {
1630
            if (cpu_single_env->singlestep_enabled)
1656
            if (cpu_single_env->singlestep_enabled) {
1631 1657
                handle = 1;
1658
            }
1632 1659
        } else {
1633
            for (n = 0; n < 4; n++)
1634
                if (arch_info->dr6 & (1 << n))
1660
            for (n = 0; n < 4; n++) {
1661
                if (arch_info->dr6 & (1 << n)) {
1635 1662
                    switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1636 1663
                    case 0x0:
1637 1664
                        handle = 1;
......
1649 1676
                        hw_watchpoint.flags = BP_MEM_ACCESS;
1650 1677
                        break;
1651 1678
                    }
1679
                }
1680
            }
1652 1681
        }
1653
    } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
1682
    } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
1654 1683
        handle = 1;
1655

  
1684
    }
1656 1685
    if (!handle) {
1657 1686
        cpu_synchronize_state(cpu_single_env);
1658 1687
        assert(cpu_single_env->exception_injected == -1);
......
1676 1705
    };
1677 1706
    int n;
1678 1707

  
1679
    if (kvm_sw_breakpoints_active(env))
1708
    if (kvm_sw_breakpoints_active(env)) {
1680 1709
        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1681

  
1710
    }
1682 1711
    if (nb_hw_breakpoint > 0) {
1683 1712
        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1684 1713
        dbg->arch.debugreg[7] = 0x0600;
......
1696 1725

  
1697 1726
bool kvm_arch_stop_on_emulation_error(CPUState *env)
1698 1727
{
1699
      return !(env->cr[0] & CR0_PE_MASK) ||
1700
              ((env->segs[R_CS].selector  & 3) != 3);
1728
    return !(env->cr[0] & CR0_PE_MASK) ||
1729
           ((env->segs[R_CS].selector  & 3) != 3);
1701 1730
}
1702 1731

  
1703 1732
static void hardware_memory_error(void)

Also available in: Unified diff