Revision 872929aa target-i386/svm.h

b/target-i386/svm.h
1 1
#ifndef __SVM_H
2 2
#define __SVM_H
3 3

  
4
enum {
5
        /* We shift all the intercept bits so we can OR them with the
6
           TB flags later on */
7
	INTERCEPT_INTR = HF_HIF_SHIFT,
8
	INTERCEPT_NMI,
9
	INTERCEPT_SMI,
10
	INTERCEPT_INIT,
11
	INTERCEPT_VINTR,
12
	INTERCEPT_SELECTIVE_CR0,
13
	INTERCEPT_STORE_IDTR,
14
	INTERCEPT_STORE_GDTR,
15
	INTERCEPT_STORE_LDTR,
16
	INTERCEPT_STORE_TR,
17
	INTERCEPT_LOAD_IDTR,
18
	INTERCEPT_LOAD_GDTR,
19
	INTERCEPT_LOAD_LDTR,
20
	INTERCEPT_LOAD_TR,
21
	INTERCEPT_RDTSC,
22
	INTERCEPT_RDPMC,
23
	INTERCEPT_PUSHF,
24
	INTERCEPT_POPF,
25
	INTERCEPT_CPUID,
26
	INTERCEPT_RSM,
27
	INTERCEPT_IRET,
28
	INTERCEPT_INTn,
29
	INTERCEPT_INVD,
30
	INTERCEPT_PAUSE,
31
	INTERCEPT_HLT,
32
	INTERCEPT_INVLPG,
33
	INTERCEPT_INVLPGA,
34
	INTERCEPT_IOIO_PROT,
35
	INTERCEPT_MSR_PROT,
36
	INTERCEPT_TASK_SWITCH,
37
	INTERCEPT_FERR_FREEZE,
38
	INTERCEPT_SHUTDOWN,
39
	INTERCEPT_VMRUN,
40
	INTERCEPT_VMMCALL,
41
	INTERCEPT_VMLOAD,
42
	INTERCEPT_VMSAVE,
43
	INTERCEPT_STGI,
44
	INTERCEPT_CLGI,
45
	INTERCEPT_SKINIT,
46
	INTERCEPT_RDTSCP,
47
	INTERCEPT_ICEBP,
48
	INTERCEPT_WBINVD,
49
};
50
/* This is not really an intercept but rather a placeholder to
51
   show that we are in an SVM (just like a hidden flag, but keeps the
52
   TBs clean) */
53
#define INTERCEPT_SVM 63
54
#define INTERCEPT_SVM_MASK (1ULL << INTERCEPT_SVM)
55

  
56
struct __attribute__ ((__packed__)) vmcb_control_area {
57
	uint16_t intercept_cr_read;
58
	uint16_t intercept_cr_write;
59
	uint16_t intercept_dr_read;
60
	uint16_t intercept_dr_write;
61
	uint32_t intercept_exceptions;
62
	uint64_t intercept;
63
	uint8_t reserved_1[44];
64
	uint64_t iopm_base_pa;
65
	uint64_t msrpm_base_pa;
66
	uint64_t tsc_offset;
67
	uint32_t asid;
68
	uint8_t tlb_ctl;
69
	uint8_t reserved_2[3];
70
	uint32_t int_ctl;
71
	uint32_t int_vector;
72
	uint32_t int_state;
73
	uint8_t reserved_3[4];
74
	uint64_t exit_code;
75
	uint64_t exit_info_1;
76
	uint64_t exit_info_2;
77
	uint32_t exit_int_info;
78
	uint32_t exit_int_info_err;
79
	uint64_t nested_ctl;
80
	uint8_t reserved_4[16];
81
	uint32_t event_inj;
82
	uint32_t event_inj_err;
83
	uint64_t nested_cr3;
84
	uint64_t lbr_ctl;
85
	uint8_t reserved_5[832];
86
};
87

  
88

  
89 4
#define TLB_CONTROL_DO_NOTHING 0
90 5
#define TLB_CONTROL_FLUSH_ALL_ASID 1
91 6

  
......
116 31
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
117 32
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
118 33

  
119
struct __attribute__ ((__packed__)) vmcb_seg {
120
	uint16_t selector;
121
	uint16_t attrib;
122
	uint32_t limit;
123
	uint64_t base;
124
};
125

  
126
struct __attribute__ ((__packed__)) vmcb_save_area {
127
	struct vmcb_seg es;
128
	struct vmcb_seg cs;
129
	struct vmcb_seg ss;
130
	struct vmcb_seg ds;
131
	struct vmcb_seg fs;
132
	struct vmcb_seg gs;
133
	struct vmcb_seg gdtr;
134
	struct vmcb_seg ldtr;
135
	struct vmcb_seg idtr;
136
	struct vmcb_seg tr;
137
	uint8_t reserved_1[43];
138
	uint8_t cpl;
139
	uint8_t reserved_2[4];
140
	uint64_t efer;
141
	uint8_t reserved_3[112];
142
	uint64_t cr4;
143
	uint64_t cr3;
144
	uint64_t cr0;
145
	uint64_t dr7;
146
	uint64_t dr6;
147
	uint64_t rflags;
148
	uint64_t rip;
149
	uint8_t reserved_4[88];
150
	uint64_t rsp;
151
	uint8_t reserved_5[24];
152
	uint64_t rax;
153
	uint64_t star;
154
	uint64_t lstar;
155
	uint64_t cstar;
156
	uint64_t sfmask;
157
	uint64_t kernel_gs_base;
158
	uint64_t sysenter_cs;
159
	uint64_t sysenter_esp;
160
	uint64_t sysenter_eip;
161
	uint64_t cr2;
162
	/* qemu: cr8 added to reuse this as hsave */
163
	uint64_t cr8;
164
	uint8_t reserved_6[32 - 8]; /* originally 32 */
165
	uint64_t g_pat;
166
	uint64_t dbgctl;
167
	uint64_t br_from;
168
	uint64_t br_to;
169
	uint64_t last_excp_from;
170
	uint64_t last_excp_to;
171
};
172

  
173
struct __attribute__ ((__packed__)) vmcb {
174
	struct vmcb_control_area control;
175
	struct vmcb_save_area save;
176
};
177

  
178
#define SVM_CPUID_FEATURE_SHIFT 2
179
#define SVM_CPUID_FUNC 0x8000000a
180

  
181
#define MSR_EFER_SVME_MASK (1ULL << 12)
182

  
183
#define SVM_SELECTOR_S_SHIFT 4
184
#define SVM_SELECTOR_DPL_SHIFT 5
185
#define SVM_SELECTOR_P_SHIFT 7
186
#define SVM_SELECTOR_AVL_SHIFT 8
187
#define SVM_SELECTOR_L_SHIFT 9
188
#define SVM_SELECTOR_DB_SHIFT 10
189
#define SVM_SELECTOR_G_SHIFT 11
190

  
191
#define SVM_SELECTOR_TYPE_MASK (0xf)
192
#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
193
#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
194
#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
195
#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
196
#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
197
#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
198
#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
199

  
200
#define SVM_SELECTOR_WRITE_MASK (1 << 1)
201
#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
202
#define SVM_SELECTOR_CODE_MASK (1 << 3)
203

  
204
#define INTERCEPT_CR0_MASK 1
205
#define INTERCEPT_CR3_MASK (1 << 3)
206
#define INTERCEPT_CR4_MASK (1 << 4)
207

  
208
#define INTERCEPT_DR0_MASK 1
209
#define INTERCEPT_DR1_MASK (1 << 1)
210
#define INTERCEPT_DR2_MASK (1 << 2)
211
#define INTERCEPT_DR3_MASK (1 << 3)
212
#define INTERCEPT_DR4_MASK (1 << 4)
213
#define INTERCEPT_DR5_MASK (1 << 5)
214
#define INTERCEPT_DR6_MASK (1 << 6)
215
#define INTERCEPT_DR7_MASK (1 << 7)
216

  
217 34
#define SVM_EVTINJ_VEC_MASK 0xff
218 35

  
219 36
#define SVM_EVTINJ_TYPE_SHIFT 8
......
313 130

  
314 131
#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
315 132

  
316
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
317
#define SVM_VMRUN  ".byte 0x0f, 0x01, 0xd8"
318
#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
319
#define SVM_CLGI   ".byte 0x0f, 0x01, 0xdd"
320
#define SVM_STGI   ".byte 0x0f, 0x01, 0xdc"
321
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
322

  
323
/* function references */
324

  
325
#define INTERCEPTED(mask) (env->intercept & mask)
326
#define INTERCEPTEDw(var, mask) (env->intercept ## var & mask)
327
#define INTERCEPTEDl(var, mask) (env->intercept ## var & mask)
133
struct __attribute__ ((__packed__)) vmcb_control_area {
134
	uint16_t intercept_cr_read;
135
	uint16_t intercept_cr_write;
136
	uint16_t intercept_dr_read;
137
	uint16_t intercept_dr_write;
138
	uint32_t intercept_exceptions;
139
	uint64_t intercept;
140
	uint8_t reserved_1[44];
141
	uint64_t iopm_base_pa;
142
	uint64_t msrpm_base_pa;
143
	uint64_t tsc_offset;
144
	uint32_t asid;
145
	uint8_t tlb_ctl;
146
	uint8_t reserved_2[3];
147
	uint32_t int_ctl;
148
	uint32_t int_vector;
149
	uint32_t int_state;
150
	uint8_t reserved_3[4];
151
	uint64_t exit_code;
152
	uint64_t exit_info_1;
153
	uint64_t exit_info_2;
154
	uint32_t exit_int_info;
155
	uint32_t exit_int_info_err;
156
	uint64_t nested_ctl;
157
	uint8_t reserved_4[16];
158
	uint32_t event_inj;
159
	uint32_t event_inj_err;
160
	uint64_t nested_cr3;
161
	uint64_t lbr_ctl;
162
	uint8_t reserved_5[832];
163
};
328 164

  
329
#define SVM_LOAD_SEG(addr, seg_index, seg) \
330
    cpu_x86_load_seg_cache(env, \
331
                    R_##seg_index, \
332
                    lduw_phys(addr + offsetof(struct vmcb, save.seg.selector)),\
333
                    ldq_phys(addr + offsetof(struct vmcb, save.seg.base)),\
334
                    ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)),\
335
                    vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg.attrib)), ldq_phys(addr + offsetof(struct vmcb, save.seg.base)), ldl_phys(addr + offsetof(struct vmcb, save.seg.limit))))
165
struct __attribute__ ((__packed__)) vmcb_seg {
166
	uint16_t selector;
167
	uint16_t attrib;
168
	uint32_t limit;
169
	uint64_t base;
170
};
336 171

  
337
#define SVM_LOAD_SEG2(addr, seg_qemu, seg_vmcb) \
338
    env->seg_qemu.selector  = lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector)); \
339
    env->seg_qemu.base      = ldq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base)); \
340
    env->seg_qemu.limit     = ldl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit)); \
341
    env->seg_qemu.flags     = vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib)), env->seg_qemu.base, env->seg_qemu.limit)
172
struct __attribute__ ((__packed__)) vmcb_save_area {
173
	struct vmcb_seg es;
174
	struct vmcb_seg cs;
175
	struct vmcb_seg ss;
176
	struct vmcb_seg ds;
177
	struct vmcb_seg fs;
178
	struct vmcb_seg gs;
179
	struct vmcb_seg gdtr;
180
	struct vmcb_seg ldtr;
181
	struct vmcb_seg idtr;
182
	struct vmcb_seg tr;
183
	uint8_t reserved_1[43];
184
	uint8_t cpl;
185
	uint8_t reserved_2[4];
186
	uint64_t efer;
187
	uint8_t reserved_3[112];
188
	uint64_t cr4;
189
	uint64_t cr3;
190
	uint64_t cr0;
191
	uint64_t dr7;
192
	uint64_t dr6;
193
	uint64_t rflags;
194
	uint64_t rip;
195
	uint8_t reserved_4[88];
196
	uint64_t rsp;
197
	uint8_t reserved_5[24];
198
	uint64_t rax;
199
	uint64_t star;
200
	uint64_t lstar;
201
	uint64_t cstar;
202
	uint64_t sfmask;
203
	uint64_t kernel_gs_base;
204
	uint64_t sysenter_cs;
205
	uint64_t sysenter_esp;
206
	uint64_t sysenter_eip;
207
	uint64_t cr2;
208
	/* qemu: cr8 added to reuse this as hsave */
209
	uint64_t cr8;
210
	uint8_t reserved_6[32 - 8]; /* originally 32 */
211
	uint64_t g_pat;
212
	uint64_t dbgctl;
213
	uint64_t br_from;
214
	uint64_t br_to;
215
	uint64_t last_excp_from;
216
	uint64_t last_excp_to;
217
};
342 218

  
343
#define SVM_SAVE_SEG(addr, seg_qemu, seg_vmcb) \
344
    stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector), env->seg_qemu.selector); \
345
    stq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base), env->seg_qemu.base); \
346
    stl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit), env->seg_qemu.limit); \
347
    stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib), cpu2vmcb_attrib(env->seg_qemu.flags))
219
struct __attribute__ ((__packed__)) vmcb {
220
	struct vmcb_control_area control;
221
	struct vmcb_save_area save;
222
};
348 223

  
349 224
#endif

Also available in: Unified diff