Statistics
| Branch: | Revision:

root / target-i386 / svm_helper.c @ ff287bbd

History | View | Annotate | Download (26.6 kB)

1 6bada5e8 Blue Swirl
/*
2 6bada5e8 Blue Swirl
 *  x86 SVM helpers
3 6bada5e8 Blue Swirl
 *
4 6bada5e8 Blue Swirl
 *  Copyright (c) 2003 Fabrice Bellard
5 6bada5e8 Blue Swirl
 *
6 6bada5e8 Blue Swirl
 * This library is free software; you can redistribute it and/or
7 6bada5e8 Blue Swirl
 * modify it under the terms of the GNU Lesser General Public
8 6bada5e8 Blue Swirl
 * License as published by the Free Software Foundation; either
9 6bada5e8 Blue Swirl
 * version 2 of the License, or (at your option) any later version.
10 6bada5e8 Blue Swirl
 *
11 6bada5e8 Blue Swirl
 * This library is distributed in the hope that it will be useful,
12 6bada5e8 Blue Swirl
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 6bada5e8 Blue Swirl
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 6bada5e8 Blue Swirl
 * Lesser General Public License for more details.
15 6bada5e8 Blue Swirl
 *
16 6bada5e8 Blue Swirl
 * You should have received a copy of the GNU Lesser General Public
17 6bada5e8 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 6bada5e8 Blue Swirl
 */
19 6bada5e8 Blue Swirl
20 6bada5e8 Blue Swirl
#include "cpu.h"
21 052e80d5 Blue Swirl
#include "cpu-all.h"
22 6bada5e8 Blue Swirl
#include "helper.h"
23 6bada5e8 Blue Swirl
24 92fc4b58 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
25 92fc4b58 Blue Swirl
#include "softmmu_exec.h"
26 92fc4b58 Blue Swirl
#endif /* !defined(CONFIG_USER_ONLY) */
27 92fc4b58 Blue Swirl
28 6bada5e8 Blue Swirl
/* Secure Virtual Machine helpers */
29 6bada5e8 Blue Swirl
30 6bada5e8 Blue Swirl
#if defined(CONFIG_USER_ONLY)
31 6bada5e8 Blue Swirl
32 052e80d5 Blue Swirl
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 6bada5e8 Blue Swirl
{
34 6bada5e8 Blue Swirl
}
35 6bada5e8 Blue Swirl
36 052e80d5 Blue Swirl
void helper_vmmcall(CPUX86State *env)
37 6bada5e8 Blue Swirl
{
38 6bada5e8 Blue Swirl
}
39 6bada5e8 Blue Swirl
40 052e80d5 Blue Swirl
void helper_vmload(CPUX86State *env, int aflag)
41 6bada5e8 Blue Swirl
{
42 6bada5e8 Blue Swirl
}
43 6bada5e8 Blue Swirl
44 052e80d5 Blue Swirl
void helper_vmsave(CPUX86State *env, int aflag)
45 6bada5e8 Blue Swirl
{
46 6bada5e8 Blue Swirl
}
47 6bada5e8 Blue Swirl
48 052e80d5 Blue Swirl
void helper_stgi(CPUX86State *env)
49 6bada5e8 Blue Swirl
{
50 6bada5e8 Blue Swirl
}
51 6bada5e8 Blue Swirl
52 052e80d5 Blue Swirl
void helper_clgi(CPUX86State *env)
53 6bada5e8 Blue Swirl
{
54 6bada5e8 Blue Swirl
}
55 6bada5e8 Blue Swirl
56 052e80d5 Blue Swirl
void helper_skinit(CPUX86State *env)
57 6bada5e8 Blue Swirl
{
58 6bada5e8 Blue Swirl
}
59 6bada5e8 Blue Swirl
60 052e80d5 Blue Swirl
void helper_invlpga(CPUX86State *env, int aflag)
61 6bada5e8 Blue Swirl
{
62 6bada5e8 Blue Swirl
}
63 6bada5e8 Blue Swirl
64 052e80d5 Blue Swirl
void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
65 6bada5e8 Blue Swirl
{
66 6bada5e8 Blue Swirl
}
67 6bada5e8 Blue Swirl
68 6bada5e8 Blue Swirl
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 6bada5e8 Blue Swirl
{
70 6bada5e8 Blue Swirl
}
71 6bada5e8 Blue Swirl
72 052e80d5 Blue Swirl
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 052e80d5 Blue Swirl
                                      uint64_t param)
74 6bada5e8 Blue Swirl
{
75 6bada5e8 Blue Swirl
}
76 6bada5e8 Blue Swirl
77 6bada5e8 Blue Swirl
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 6bada5e8 Blue Swirl
                                   uint64_t param)
79 6bada5e8 Blue Swirl
{
80 6bada5e8 Blue Swirl
}
81 6bada5e8 Blue Swirl
82 052e80d5 Blue Swirl
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 6bada5e8 Blue Swirl
                         uint32_t next_eip_addend)
84 6bada5e8 Blue Swirl
{
85 6bada5e8 Blue Swirl
}
86 6bada5e8 Blue Swirl
#else
87 6bada5e8 Blue Swirl
88 a8170e5e Avi Kivity
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 6bada5e8 Blue Swirl
                                const SegmentCache *sc)
90 6bada5e8 Blue Swirl
{
91 6bada5e8 Blue Swirl
    stw_phys(addr + offsetof(struct vmcb_seg, selector),
92 6bada5e8 Blue Swirl
             sc->selector);
93 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb_seg, base),
94 6bada5e8 Blue Swirl
             sc->base);
95 6bada5e8 Blue Swirl
    stl_phys(addr + offsetof(struct vmcb_seg, limit),
96 6bada5e8 Blue Swirl
             sc->limit);
97 6bada5e8 Blue Swirl
    stw_phys(addr + offsetof(struct vmcb_seg, attrib),
98 6bada5e8 Blue Swirl
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99 6bada5e8 Blue Swirl
}
100 6bada5e8 Blue Swirl
101 a8170e5e Avi Kivity
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 052e80d5 Blue Swirl
                                SegmentCache *sc)
103 6bada5e8 Blue Swirl
{
104 6bada5e8 Blue Swirl
    unsigned int flags;
105 6bada5e8 Blue Swirl
106 6bada5e8 Blue Swirl
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
107 6bada5e8 Blue Swirl
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
108 6bada5e8 Blue Swirl
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
109 6bada5e8 Blue Swirl
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
110 6bada5e8 Blue Swirl
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111 6bada5e8 Blue Swirl
}
112 6bada5e8 Blue Swirl
113 a8170e5e Avi Kivity
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 052e80d5 Blue Swirl
                                      int seg_reg)
115 6bada5e8 Blue Swirl
{
116 6bada5e8 Blue Swirl
    SegmentCache sc1, *sc = &sc1;
117 6bada5e8 Blue Swirl
118 052e80d5 Blue Swirl
    svm_load_seg(env, addr, sc);
119 6bada5e8 Blue Swirl
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 6bada5e8 Blue Swirl
                           sc->base, sc->limit, sc->flags);
121 6bada5e8 Blue Swirl
}
122 6bada5e8 Blue Swirl
123 052e80d5 Blue Swirl
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124 6bada5e8 Blue Swirl
{
125 6bada5e8 Blue Swirl
    target_ulong addr;
126 6bada5e8 Blue Swirl
    uint32_t event_inj;
127 6bada5e8 Blue Swirl
    uint32_t int_ctl;
128 6bada5e8 Blue Swirl
129 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
130 6bada5e8 Blue Swirl
131 6bada5e8 Blue Swirl
    if (aflag == 2) {
132 6bada5e8 Blue Swirl
        addr = EAX;
133 6bada5e8 Blue Swirl
    } else {
134 6bada5e8 Blue Swirl
        addr = (uint32_t)EAX;
135 6bada5e8 Blue Swirl
    }
136 6bada5e8 Blue Swirl
137 6bada5e8 Blue Swirl
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
138 6bada5e8 Blue Swirl
139 6bada5e8 Blue Swirl
    env->vm_vmcb = addr;
140 6bada5e8 Blue Swirl
141 6bada5e8 Blue Swirl
    /* save the current CPU state in the hsave page */
142 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
143 6bada5e8 Blue Swirl
             env->gdt.base);
144 6bada5e8 Blue Swirl
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
145 6bada5e8 Blue Swirl
             env->gdt.limit);
146 6bada5e8 Blue Swirl
147 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
148 6bada5e8 Blue Swirl
             env->idt.base);
149 6bada5e8 Blue Swirl
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
150 6bada5e8 Blue Swirl
             env->idt.limit);
151 6bada5e8 Blue Swirl
152 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
153 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
154 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
155 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
156 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
157 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
158 6bada5e8 Blue Swirl
159 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
160 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
161 6bada5e8 Blue Swirl
             cpu_compute_eflags(env));
162 6bada5e8 Blue Swirl
163 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
164 6bada5e8 Blue Swirl
                 &env->segs[R_ES]);
165 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
166 6bada5e8 Blue Swirl
                 &env->segs[R_CS]);
167 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
168 6bada5e8 Blue Swirl
                 &env->segs[R_SS]);
169 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
170 6bada5e8 Blue Swirl
                 &env->segs[R_DS]);
171 6bada5e8 Blue Swirl
172 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
173 6bada5e8 Blue Swirl
             EIP + next_eip_addend);
174 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
175 6bada5e8 Blue Swirl
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
176 6bada5e8 Blue Swirl
177 6bada5e8 Blue Swirl
    /* load the interception bitmaps so we do not need to access the
178 6bada5e8 Blue Swirl
       vmcb in svm mode */
179 6bada5e8 Blue Swirl
    env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
180 6bada5e8 Blue Swirl
                                                      control.intercept));
181 6bada5e8 Blue Swirl
    env->intercept_cr_read = lduw_phys(env->vm_vmcb +
182 6bada5e8 Blue Swirl
                                       offsetof(struct vmcb,
183 6bada5e8 Blue Swirl
                                                control.intercept_cr_read));
184 6bada5e8 Blue Swirl
    env->intercept_cr_write = lduw_phys(env->vm_vmcb +
185 6bada5e8 Blue Swirl
                                        offsetof(struct vmcb,
186 6bada5e8 Blue Swirl
                                                 control.intercept_cr_write));
187 6bada5e8 Blue Swirl
    env->intercept_dr_read = lduw_phys(env->vm_vmcb +
188 6bada5e8 Blue Swirl
                                       offsetof(struct vmcb,
189 6bada5e8 Blue Swirl
                                                control.intercept_dr_read));
190 6bada5e8 Blue Swirl
    env->intercept_dr_write = lduw_phys(env->vm_vmcb +
191 6bada5e8 Blue Swirl
                                        offsetof(struct vmcb,
192 6bada5e8 Blue Swirl
                                                 control.intercept_dr_write));
193 6bada5e8 Blue Swirl
    env->intercept_exceptions = ldl_phys(env->vm_vmcb +
194 6bada5e8 Blue Swirl
                                         offsetof(struct vmcb,
195 6bada5e8 Blue Swirl
                                                  control.intercept_exceptions
196 6bada5e8 Blue Swirl
                                                  ));
197 6bada5e8 Blue Swirl
198 6bada5e8 Blue Swirl
    /* enable intercepts */
199 6bada5e8 Blue Swirl
    env->hflags |= HF_SVMI_MASK;
200 6bada5e8 Blue Swirl
201 6bada5e8 Blue Swirl
    env->tsc_offset = ldq_phys(env->vm_vmcb +
202 6bada5e8 Blue Swirl
                               offsetof(struct vmcb, control.tsc_offset));
203 6bada5e8 Blue Swirl
204 6bada5e8 Blue Swirl
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
205 6bada5e8 Blue Swirl
                                                      save.gdtr.base));
206 6bada5e8 Blue Swirl
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
207 6bada5e8 Blue Swirl
                                                      save.gdtr.limit));
208 6bada5e8 Blue Swirl
209 6bada5e8 Blue Swirl
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
210 6bada5e8 Blue Swirl
                                                      save.idtr.base));
211 6bada5e8 Blue Swirl
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
212 6bada5e8 Blue Swirl
                                                      save.idtr.limit));
213 6bada5e8 Blue Swirl
214 6bada5e8 Blue Swirl
    /* clear exit_info_2 so we behave like the real hardware */
215 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
216 6bada5e8 Blue Swirl
217 6bada5e8 Blue Swirl
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
218 6bada5e8 Blue Swirl
                                                             save.cr0)));
219 6bada5e8 Blue Swirl
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
220 6bada5e8 Blue Swirl
                                                             save.cr4)));
221 6bada5e8 Blue Swirl
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
222 6bada5e8 Blue Swirl
                                                             save.cr3)));
223 6bada5e8 Blue Swirl
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
224 6bada5e8 Blue Swirl
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
225 6bada5e8 Blue Swirl
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
226 6bada5e8 Blue Swirl
    if (int_ctl & V_INTR_MASKING_MASK) {
227 6bada5e8 Blue Swirl
        env->v_tpr = int_ctl & V_TPR_MASK;
228 6bada5e8 Blue Swirl
        env->hflags2 |= HF2_VINTR_MASK;
229 6bada5e8 Blue Swirl
        if (env->eflags & IF_MASK) {
230 6bada5e8 Blue Swirl
            env->hflags2 |= HF2_HIF_MASK;
231 6bada5e8 Blue Swirl
        }
232 6bada5e8 Blue Swirl
    }
233 6bada5e8 Blue Swirl
234 6bada5e8 Blue Swirl
    cpu_load_efer(env,
235 6bada5e8 Blue Swirl
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
236 6bada5e8 Blue Swirl
    env->eflags = 0;
237 6bada5e8 Blue Swirl
    cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
238 6bada5e8 Blue Swirl
                                                          save.rflags)),
239 6bada5e8 Blue Swirl
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
240 6bada5e8 Blue Swirl
    CC_OP = CC_OP_EFLAGS;
241 6bada5e8 Blue Swirl
242 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
243 052e80d5 Blue Swirl
                       R_ES);
244 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
245 052e80d5 Blue Swirl
                       R_CS);
246 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
247 052e80d5 Blue Swirl
                       R_SS);
248 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
249 052e80d5 Blue Swirl
                       R_DS);
250 6bada5e8 Blue Swirl
251 6bada5e8 Blue Swirl
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
252 6bada5e8 Blue Swirl
    env->eip = EIP;
253 6bada5e8 Blue Swirl
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
254 6bada5e8 Blue Swirl
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
255 6bada5e8 Blue Swirl
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
256 6bada5e8 Blue Swirl
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
257 6bada5e8 Blue Swirl
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
258 6bada5e8 Blue Swirl
                                                           save.cpl)));
259 6bada5e8 Blue Swirl
260 6bada5e8 Blue Swirl
    /* FIXME: guest state consistency checks */
261 6bada5e8 Blue Swirl
262 6bada5e8 Blue Swirl
    switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
263 6bada5e8 Blue Swirl
    case TLB_CONTROL_DO_NOTHING:
264 6bada5e8 Blue Swirl
        break;
265 6bada5e8 Blue Swirl
    case TLB_CONTROL_FLUSH_ALL_ASID:
266 6bada5e8 Blue Swirl
        /* FIXME: this is not 100% correct but should work for now */
267 6bada5e8 Blue Swirl
        tlb_flush(env, 1);
268 6bada5e8 Blue Swirl
        break;
269 6bada5e8 Blue Swirl
    }
270 6bada5e8 Blue Swirl
271 6bada5e8 Blue Swirl
    env->hflags2 |= HF2_GIF_MASK;
272 6bada5e8 Blue Swirl
273 6bada5e8 Blue Swirl
    if (int_ctl & V_IRQ_MASK) {
274 6bada5e8 Blue Swirl
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
275 6bada5e8 Blue Swirl
    }
276 6bada5e8 Blue Swirl
277 6bada5e8 Blue Swirl
    /* maybe we need to inject an event */
278 6bada5e8 Blue Swirl
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
279 6bada5e8 Blue Swirl
                                                 control.event_inj));
280 6bada5e8 Blue Swirl
    if (event_inj & SVM_EVTINJ_VALID) {
281 6bada5e8 Blue Swirl
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
282 6bada5e8 Blue Swirl
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
283 6bada5e8 Blue Swirl
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
284 6bada5e8 Blue Swirl
                                          offsetof(struct vmcb,
285 6bada5e8 Blue Swirl
                                                   control.event_inj_err));
286 6bada5e8 Blue Swirl
287 6bada5e8 Blue Swirl
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
288 6bada5e8 Blue Swirl
        /* FIXME: need to implement valid_err */
289 6bada5e8 Blue Swirl
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
290 6bada5e8 Blue Swirl
        case SVM_EVTINJ_TYPE_INTR:
291 6bada5e8 Blue Swirl
            env->exception_index = vector;
292 6bada5e8 Blue Swirl
            env->error_code = event_inj_err;
293 6bada5e8 Blue Swirl
            env->exception_is_int = 0;
294 6bada5e8 Blue Swirl
            env->exception_next_eip = -1;
295 6bada5e8 Blue Swirl
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
296 6bada5e8 Blue Swirl
            /* XXX: is it always correct? */
297 6bada5e8 Blue Swirl
            do_interrupt_x86_hardirq(env, vector, 1);
298 6bada5e8 Blue Swirl
            break;
299 6bada5e8 Blue Swirl
        case SVM_EVTINJ_TYPE_NMI:
300 6bada5e8 Blue Swirl
            env->exception_index = EXCP02_NMI;
301 6bada5e8 Blue Swirl
            env->error_code = event_inj_err;
302 6bada5e8 Blue Swirl
            env->exception_is_int = 0;
303 6bada5e8 Blue Swirl
            env->exception_next_eip = EIP;
304 6bada5e8 Blue Swirl
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
305 6bada5e8 Blue Swirl
            cpu_loop_exit(env);
306 6bada5e8 Blue Swirl
            break;
307 6bada5e8 Blue Swirl
        case SVM_EVTINJ_TYPE_EXEPT:
308 6bada5e8 Blue Swirl
            env->exception_index = vector;
309 6bada5e8 Blue Swirl
            env->error_code = event_inj_err;
310 6bada5e8 Blue Swirl
            env->exception_is_int = 0;
311 6bada5e8 Blue Swirl
            env->exception_next_eip = -1;
312 6bada5e8 Blue Swirl
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
313 6bada5e8 Blue Swirl
            cpu_loop_exit(env);
314 6bada5e8 Blue Swirl
            break;
315 6bada5e8 Blue Swirl
        case SVM_EVTINJ_TYPE_SOFT:
316 6bada5e8 Blue Swirl
            env->exception_index = vector;
317 6bada5e8 Blue Swirl
            env->error_code = event_inj_err;
318 6bada5e8 Blue Swirl
            env->exception_is_int = 1;
319 6bada5e8 Blue Swirl
            env->exception_next_eip = EIP;
320 6bada5e8 Blue Swirl
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
321 6bada5e8 Blue Swirl
            cpu_loop_exit(env);
322 6bada5e8 Blue Swirl
            break;
323 6bada5e8 Blue Swirl
        }
324 6bada5e8 Blue Swirl
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
325 6bada5e8 Blue Swirl
                      env->error_code);
326 6bada5e8 Blue Swirl
    }
327 6bada5e8 Blue Swirl
}
328 6bada5e8 Blue Swirl
329 052e80d5 Blue Swirl
void helper_vmmcall(CPUX86State *env)
330 6bada5e8 Blue Swirl
{
331 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
332 6bada5e8 Blue Swirl
    raise_exception(env, EXCP06_ILLOP);
333 6bada5e8 Blue Swirl
}
334 6bada5e8 Blue Swirl
335 052e80d5 Blue Swirl
void helper_vmload(CPUX86State *env, int aflag)
336 6bada5e8 Blue Swirl
{
337 6bada5e8 Blue Swirl
    target_ulong addr;
338 6bada5e8 Blue Swirl
339 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
340 6bada5e8 Blue Swirl
341 6bada5e8 Blue Swirl
    if (aflag == 2) {
342 6bada5e8 Blue Swirl
        addr = EAX;
343 6bada5e8 Blue Swirl
    } else {
344 6bada5e8 Blue Swirl
        addr = (uint32_t)EAX;
345 6bada5e8 Blue Swirl
    }
346 6bada5e8 Blue Swirl
347 6bada5e8 Blue Swirl
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
348 6bada5e8 Blue Swirl
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
349 052e80d5 Blue Swirl
                  addr, ldq_phys(addr + offsetof(struct vmcb,
350 052e80d5 Blue Swirl
                                                          save.fs.base)),
351 6bada5e8 Blue Swirl
                  env->segs[R_FS].base);
352 6bada5e8 Blue Swirl
353 052e80d5 Blue Swirl
    svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
354 052e80d5 Blue Swirl
    svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
355 052e80d5 Blue Swirl
    svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
356 052e80d5 Blue Swirl
    svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
357 6bada5e8 Blue Swirl
358 6bada5e8 Blue Swirl
#ifdef TARGET_X86_64
359 6bada5e8 Blue Swirl
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
360 6bada5e8 Blue Swirl
                                                 save.kernel_gs_base));
361 6bada5e8 Blue Swirl
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
362 6bada5e8 Blue Swirl
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
363 6bada5e8 Blue Swirl
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
364 6bada5e8 Blue Swirl
#endif
365 6bada5e8 Blue Swirl
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
366 6bada5e8 Blue Swirl
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
367 6bada5e8 Blue Swirl
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
368 6bada5e8 Blue Swirl
                                                 save.sysenter_esp));
369 6bada5e8 Blue Swirl
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
370 6bada5e8 Blue Swirl
                                                 save.sysenter_eip));
371 6bada5e8 Blue Swirl
}
372 6bada5e8 Blue Swirl
373 052e80d5 Blue Swirl
void helper_vmsave(CPUX86State *env, int aflag)
374 6bada5e8 Blue Swirl
{
375 6bada5e8 Blue Swirl
    target_ulong addr;
376 6bada5e8 Blue Swirl
377 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
378 6bada5e8 Blue Swirl
379 6bada5e8 Blue Swirl
    if (aflag == 2) {
380 6bada5e8 Blue Swirl
        addr = EAX;
381 6bada5e8 Blue Swirl
    } else {
382 6bada5e8 Blue Swirl
        addr = (uint32_t)EAX;
383 6bada5e8 Blue Swirl
    }
384 6bada5e8 Blue Swirl
385 6bada5e8 Blue Swirl
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
386 6bada5e8 Blue Swirl
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
387 6bada5e8 Blue Swirl
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
388 6bada5e8 Blue Swirl
                  env->segs[R_FS].base);
389 6bada5e8 Blue Swirl
390 052e80d5 Blue Swirl
    svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
391 6bada5e8 Blue Swirl
                 &env->segs[R_FS]);
392 052e80d5 Blue Swirl
    svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
393 6bada5e8 Blue Swirl
                 &env->segs[R_GS]);
394 052e80d5 Blue Swirl
    svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
395 6bada5e8 Blue Swirl
                 &env->tr);
396 052e80d5 Blue Swirl
    svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
397 6bada5e8 Blue Swirl
                 &env->ldt);
398 6bada5e8 Blue Swirl
399 6bada5e8 Blue Swirl
#ifdef TARGET_X86_64
400 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
401 6bada5e8 Blue Swirl
             env->kernelgsbase);
402 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
403 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
404 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
405 6bada5e8 Blue Swirl
#endif
406 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
407 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
408 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
409 6bada5e8 Blue Swirl
             env->sysenter_esp);
410 6bada5e8 Blue Swirl
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
411 6bada5e8 Blue Swirl
             env->sysenter_eip);
412 6bada5e8 Blue Swirl
}
413 6bada5e8 Blue Swirl
414 052e80d5 Blue Swirl
void helper_stgi(CPUX86State *env)
415 6bada5e8 Blue Swirl
{
416 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
417 6bada5e8 Blue Swirl
    env->hflags2 |= HF2_GIF_MASK;
418 6bada5e8 Blue Swirl
}
419 6bada5e8 Blue Swirl
420 052e80d5 Blue Swirl
void helper_clgi(CPUX86State *env)
421 6bada5e8 Blue Swirl
{
422 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
423 6bada5e8 Blue Swirl
    env->hflags2 &= ~HF2_GIF_MASK;
424 6bada5e8 Blue Swirl
}
425 6bada5e8 Blue Swirl
426 052e80d5 Blue Swirl
void helper_skinit(CPUX86State *env)
427 6bada5e8 Blue Swirl
{
428 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
429 6bada5e8 Blue Swirl
    /* XXX: not implemented */
430 6bada5e8 Blue Swirl
    raise_exception(env, EXCP06_ILLOP);
431 6bada5e8 Blue Swirl
}
432 6bada5e8 Blue Swirl
433 052e80d5 Blue Swirl
void helper_invlpga(CPUX86State *env, int aflag)
434 6bada5e8 Blue Swirl
{
435 6bada5e8 Blue Swirl
    target_ulong addr;
436 6bada5e8 Blue Swirl
437 052e80d5 Blue Swirl
    cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
438 6bada5e8 Blue Swirl
439 6bada5e8 Blue Swirl
    if (aflag == 2) {
440 6bada5e8 Blue Swirl
        addr = EAX;
441 6bada5e8 Blue Swirl
    } else {
442 6bada5e8 Blue Swirl
        addr = (uint32_t)EAX;
443 6bada5e8 Blue Swirl
    }
444 6bada5e8 Blue Swirl
445 6bada5e8 Blue Swirl
    /* XXX: could use the ASID to see if it is needed to do the
446 6bada5e8 Blue Swirl
       flush */
447 6bada5e8 Blue Swirl
    tlb_flush_page(env, addr);
448 6bada5e8 Blue Swirl
}
449 6bada5e8 Blue Swirl
450 052e80d5 Blue Swirl
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
451 052e80d5 Blue Swirl
                                      uint64_t param)
452 6bada5e8 Blue Swirl
{
453 6bada5e8 Blue Swirl
    if (likely(!(env->hflags & HF_SVMI_MASK))) {
454 6bada5e8 Blue Swirl
        return;
455 6bada5e8 Blue Swirl
    }
456 6bada5e8 Blue Swirl
    switch (type) {
457 6bada5e8 Blue Swirl
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
458 6bada5e8 Blue Swirl
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
459 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
460 6bada5e8 Blue Swirl
        }
461 6bada5e8 Blue Swirl
        break;
462 6bada5e8 Blue Swirl
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
463 6bada5e8 Blue Swirl
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
464 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
465 6bada5e8 Blue Swirl
        }
466 6bada5e8 Blue Swirl
        break;
467 6bada5e8 Blue Swirl
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
468 6bada5e8 Blue Swirl
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
469 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
470 6bada5e8 Blue Swirl
        }
471 6bada5e8 Blue Swirl
        break;
472 6bada5e8 Blue Swirl
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
473 6bada5e8 Blue Swirl
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
474 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
475 6bada5e8 Blue Swirl
        }
476 6bada5e8 Blue Swirl
        break;
477 6bada5e8 Blue Swirl
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
478 6bada5e8 Blue Swirl
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
479 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
480 6bada5e8 Blue Swirl
        }
481 6bada5e8 Blue Swirl
        break;
482 6bada5e8 Blue Swirl
    case SVM_EXIT_MSR:
483 6bada5e8 Blue Swirl
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
484 6bada5e8 Blue Swirl
            /* FIXME: this should be read in at vmrun (faster this way?) */
485 6bada5e8 Blue Swirl
            uint64_t addr = ldq_phys(env->vm_vmcb +
486 6bada5e8 Blue Swirl
                                     offsetof(struct vmcb,
487 6bada5e8 Blue Swirl
                                              control.msrpm_base_pa));
488 6bada5e8 Blue Swirl
            uint32_t t0, t1;
489 6bada5e8 Blue Swirl
490 6bada5e8 Blue Swirl
            switch ((uint32_t)ECX) {
491 6bada5e8 Blue Swirl
            case 0 ... 0x1fff:
492 6bada5e8 Blue Swirl
                t0 = (ECX * 2) % 8;
493 6bada5e8 Blue Swirl
                t1 = (ECX * 2) / 8;
494 6bada5e8 Blue Swirl
                break;
495 6bada5e8 Blue Swirl
            case 0xc0000000 ... 0xc0001fff:
496 6bada5e8 Blue Swirl
                t0 = (8192 + ECX - 0xc0000000) * 2;
497 6bada5e8 Blue Swirl
                t1 = (t0 / 8);
498 6bada5e8 Blue Swirl
                t0 %= 8;
499 6bada5e8 Blue Swirl
                break;
500 6bada5e8 Blue Swirl
            case 0xc0010000 ... 0xc0011fff:
501 6bada5e8 Blue Swirl
                t0 = (16384 + ECX - 0xc0010000) * 2;
502 6bada5e8 Blue Swirl
                t1 = (t0 / 8);
503 6bada5e8 Blue Swirl
                t0 %= 8;
504 6bada5e8 Blue Swirl
                break;
505 6bada5e8 Blue Swirl
            default:
506 052e80d5 Blue Swirl
                helper_vmexit(env, type, param);
507 6bada5e8 Blue Swirl
                t0 = 0;
508 6bada5e8 Blue Swirl
                t1 = 0;
509 6bada5e8 Blue Swirl
                break;
510 6bada5e8 Blue Swirl
            }
511 6bada5e8 Blue Swirl
            if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
512 052e80d5 Blue Swirl
                helper_vmexit(env, type, param);
513 6bada5e8 Blue Swirl
            }
514 6bada5e8 Blue Swirl
        }
515 6bada5e8 Blue Swirl
        break;
516 6bada5e8 Blue Swirl
    default:
517 6bada5e8 Blue Swirl
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
518 052e80d5 Blue Swirl
            helper_vmexit(env, type, param);
519 6bada5e8 Blue Swirl
        }
520 6bada5e8 Blue Swirl
        break;
521 6bada5e8 Blue Swirl
    }
522 6bada5e8 Blue Swirl
}
523 6bada5e8 Blue Swirl
524 052e80d5 Blue Swirl
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
525 6bada5e8 Blue Swirl
                                   uint64_t param)
526 6bada5e8 Blue Swirl
{
527 052e80d5 Blue Swirl
    helper_svm_check_intercept_param(env, type, param);
528 6bada5e8 Blue Swirl
}
529 6bada5e8 Blue Swirl
530 052e80d5 Blue Swirl
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
531 6bada5e8 Blue Swirl
                         uint32_t next_eip_addend)
532 6bada5e8 Blue Swirl
{
533 6bada5e8 Blue Swirl
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
534 6bada5e8 Blue Swirl
        /* FIXME: this should be read in at vmrun (faster this way?) */
535 6bada5e8 Blue Swirl
        uint64_t addr = ldq_phys(env->vm_vmcb +
536 6bada5e8 Blue Swirl
                                 offsetof(struct vmcb, control.iopm_base_pa));
537 6bada5e8 Blue Swirl
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
538 6bada5e8 Blue Swirl
539 6bada5e8 Blue Swirl
        if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
540 6bada5e8 Blue Swirl
            /* next EIP */
541 6bada5e8 Blue Swirl
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
542 6bada5e8 Blue Swirl
                     env->eip + next_eip_addend);
543 052e80d5 Blue Swirl
            helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
544 6bada5e8 Blue Swirl
        }
545 6bada5e8 Blue Swirl
    }
546 6bada5e8 Blue Swirl
}
547 6bada5e8 Blue Swirl
548 6bada5e8 Blue Swirl
/* Note: currently only 32 bits of exit_code are used */
549 052e80d5 Blue Swirl
void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
550 6bada5e8 Blue Swirl
{
551 6bada5e8 Blue Swirl
    uint32_t int_ctl;
552 6bada5e8 Blue Swirl
553 6bada5e8 Blue Swirl
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
554 6bada5e8 Blue Swirl
                  PRIx64 ", " TARGET_FMT_lx ")!\n",
555 6bada5e8 Blue Swirl
                  exit_code, exit_info_1,
556 6bada5e8 Blue Swirl
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
557 6bada5e8 Blue Swirl
                                                   control.exit_info_2)),
558 6bada5e8 Blue Swirl
                  EIP);
559 6bada5e8 Blue Swirl
560 6bada5e8 Blue Swirl
    if (env->hflags & HF_INHIBIT_IRQ_MASK) {
561 6bada5e8 Blue Swirl
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
562 6bada5e8 Blue Swirl
                 SVM_INTERRUPT_SHADOW_MASK);
563 6bada5e8 Blue Swirl
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
564 6bada5e8 Blue Swirl
    } else {
565 6bada5e8 Blue Swirl
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
566 6bada5e8 Blue Swirl
    }
567 6bada5e8 Blue Swirl
568 6bada5e8 Blue Swirl
    /* Save the VM state in the vmcb */
569 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
570 6bada5e8 Blue Swirl
                 &env->segs[R_ES]);
571 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
572 6bada5e8 Blue Swirl
                 &env->segs[R_CS]);
573 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
574 6bada5e8 Blue Swirl
                 &env->segs[R_SS]);
575 052e80d5 Blue Swirl
    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
576 6bada5e8 Blue Swirl
                 &env->segs[R_DS]);
577 6bada5e8 Blue Swirl
578 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
579 6bada5e8 Blue Swirl
             env->gdt.base);
580 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
581 6bada5e8 Blue Swirl
             env->gdt.limit);
582 6bada5e8 Blue Swirl
583 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
584 6bada5e8 Blue Swirl
             env->idt.base);
585 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
586 6bada5e8 Blue Swirl
             env->idt.limit);
587 6bada5e8 Blue Swirl
588 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
589 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
590 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
591 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
592 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
593 6bada5e8 Blue Swirl
594 6bada5e8 Blue Swirl
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
595 6bada5e8 Blue Swirl
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
596 6bada5e8 Blue Swirl
    int_ctl |= env->v_tpr & V_TPR_MASK;
597 6bada5e8 Blue Swirl
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
598 6bada5e8 Blue Swirl
        int_ctl |= V_IRQ_MASK;
599 6bada5e8 Blue Swirl
    }
600 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
601 6bada5e8 Blue Swirl
602 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
603 6bada5e8 Blue Swirl
             cpu_compute_eflags(env));
604 052e80d5 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
605 052e80d5 Blue Swirl
             env->eip);
606 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
607 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
608 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
609 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
610 6bada5e8 Blue Swirl
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
611 6bada5e8 Blue Swirl
             env->hflags & HF_CPL_MASK);
612 6bada5e8 Blue Swirl
613 6bada5e8 Blue Swirl
    /* Reload the host state from vm_hsave */
614 6bada5e8 Blue Swirl
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
615 6bada5e8 Blue Swirl
    env->hflags &= ~HF_SVMI_MASK;
616 6bada5e8 Blue Swirl
    env->intercept = 0;
617 6bada5e8 Blue Swirl
    env->intercept_exceptions = 0;
618 6bada5e8 Blue Swirl
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
619 6bada5e8 Blue Swirl
    env->tsc_offset = 0;
620 6bada5e8 Blue Swirl
621 6bada5e8 Blue Swirl
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
622 6bada5e8 Blue Swirl
                                                       save.gdtr.base));
623 6bada5e8 Blue Swirl
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
624 6bada5e8 Blue Swirl
                                                       save.gdtr.limit));
625 6bada5e8 Blue Swirl
626 6bada5e8 Blue Swirl
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
627 6bada5e8 Blue Swirl
                                                       save.idtr.base));
628 6bada5e8 Blue Swirl
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
629 6bada5e8 Blue Swirl
                                                       save.idtr.limit));
630 6bada5e8 Blue Swirl
631 6bada5e8 Blue Swirl
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
632 6bada5e8 Blue Swirl
                                                              save.cr0)) |
633 6bada5e8 Blue Swirl
                       CR0_PE_MASK);
634 6bada5e8 Blue Swirl
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
635 6bada5e8 Blue Swirl
                                                              save.cr4)));
636 6bada5e8 Blue Swirl
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
637 6bada5e8 Blue Swirl
                                                              save.cr3)));
638 6bada5e8 Blue Swirl
    /* we need to set the efer after the crs so the hidden flags get
639 6bada5e8 Blue Swirl
       set properly */
640 6bada5e8 Blue Swirl
    cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
641 6bada5e8 Blue Swirl
                                                         save.efer)));
642 6bada5e8 Blue Swirl
    env->eflags = 0;
643 6bada5e8 Blue Swirl
    cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
644 6bada5e8 Blue Swirl
                                                           save.rflags)),
645 6bada5e8 Blue Swirl
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
646 6bada5e8 Blue Swirl
    CC_OP = CC_OP_EFLAGS;
647 6bada5e8 Blue Swirl
648 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
649 052e80d5 Blue Swirl
                       R_ES);
650 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
651 052e80d5 Blue Swirl
                       R_CS);
652 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
653 052e80d5 Blue Swirl
                       R_SS);
654 052e80d5 Blue Swirl
    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
655 052e80d5 Blue Swirl
                       R_DS);
656 6bada5e8 Blue Swirl
657 6bada5e8 Blue Swirl
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
658 6bada5e8 Blue Swirl
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
659 6bada5e8 Blue Swirl
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
660 6bada5e8 Blue Swirl
661 6bada5e8 Blue Swirl
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
662 6bada5e8 Blue Swirl
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
663 6bada5e8 Blue Swirl
664 6bada5e8 Blue Swirl
    /* other setups */
665 6bada5e8 Blue Swirl
    cpu_x86_set_cpl(env, 0);
666 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
667 6bada5e8 Blue Swirl
             exit_code);
668 6bada5e8 Blue Swirl
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
669 6bada5e8 Blue Swirl
             exit_info_1);
670 6bada5e8 Blue Swirl
671 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
672 6bada5e8 Blue Swirl
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
673 6bada5e8 Blue Swirl
                                              control.event_inj)));
674 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
675 6bada5e8 Blue Swirl
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
676 6bada5e8 Blue Swirl
                                              control.event_inj_err)));
677 6bada5e8 Blue Swirl
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
678 6bada5e8 Blue Swirl
679 6bada5e8 Blue Swirl
    env->hflags2 &= ~HF2_GIF_MASK;
680 6bada5e8 Blue Swirl
    /* FIXME: Resets the current ASID register to zero (host ASID). */
681 6bada5e8 Blue Swirl
682 6bada5e8 Blue Swirl
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
683 6bada5e8 Blue Swirl
684 6bada5e8 Blue Swirl
    /* Clears the TSC_OFFSET inside the processor. */
685 6bada5e8 Blue Swirl
686 6bada5e8 Blue Swirl
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
687 6bada5e8 Blue Swirl
       from the page table indicated the host's CR3. If the PDPEs contain
688 6bada5e8 Blue Swirl
       illegal state, the processor causes a shutdown. */
689 6bada5e8 Blue Swirl
690 6bada5e8 Blue Swirl
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
691 6bada5e8 Blue Swirl
    env->cr[0] |= CR0_PE_MASK;
692 6bada5e8 Blue Swirl
    env->eflags &= ~VM_MASK;
693 6bada5e8 Blue Swirl
694 6bada5e8 Blue Swirl
    /* Disables all breakpoints in the host DR7 register. */
695 6bada5e8 Blue Swirl
696 6bada5e8 Blue Swirl
    /* Checks the reloaded host state for consistency. */
697 6bada5e8 Blue Swirl
698 6bada5e8 Blue Swirl
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
699 6bada5e8 Blue Swirl
       host's code segment or non-canonical (in the case of long mode), a
700 6bada5e8 Blue Swirl
       #GP fault is delivered inside the host. */
701 6bada5e8 Blue Swirl
702 6bada5e8 Blue Swirl
    /* remove any pending exception */
703 6bada5e8 Blue Swirl
    env->exception_index = -1;
704 6bada5e8 Blue Swirl
    env->error_code = 0;
705 6bada5e8 Blue Swirl
    env->old_exception = -1;
706 6bada5e8 Blue Swirl
707 6bada5e8 Blue Swirl
    cpu_loop_exit(env);
708 6bada5e8 Blue Swirl
}
709 6bada5e8 Blue Swirl
710 052e80d5 Blue Swirl
void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
711 6bada5e8 Blue Swirl
{
712 052e80d5 Blue Swirl
    helper_vmexit(env, exit_code, exit_info_1);
713 6bada5e8 Blue Swirl
}
714 6bada5e8 Blue Swirl
715 6bada5e8 Blue Swirl
#endif