root / target-i386 / smm_helper.c @ 78c3c6d3
History | View | Annotate | Download (10.5 kB)
1 |
/*
|
---|---|
2 |
* x86 SMM helpers
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#include "cpu.h" |
21 |
#include "helper.h" |
22 |
|
23 |
/* SMM support */
|
24 |
|
25 |
#if defined(CONFIG_USER_ONLY)
|
26 |
|
27 |
void do_smm_enter(CPUX86State *env)
|
28 |
{ |
29 |
} |
30 |
|
31 |
void helper_rsm(CPUX86State *env)
|
32 |
{ |
33 |
} |
34 |
|
35 |
#else
|
36 |
|
37 |
#ifdef TARGET_X86_64
|
38 |
#define SMM_REVISION_ID 0x00020064 |
39 |
#else
|
40 |
#define SMM_REVISION_ID 0x00020000 |
41 |
#endif
|
42 |
|
43 |
void do_smm_enter(CPUX86State *env)
|
44 |
{ |
45 |
target_ulong sm_state; |
46 |
SegmentCache *dt; |
47 |
int i, offset;
|
48 |
|
49 |
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
|
50 |
log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP); |
51 |
|
52 |
env->hflags |= HF_SMM_MASK; |
53 |
cpu_smm_update(env); |
54 |
|
55 |
sm_state = env->smbase + 0x8000;
|
56 |
|
57 |
#ifdef TARGET_X86_64
|
58 |
for (i = 0; i < 6; i++) { |
59 |
dt = &env->segs[i]; |
60 |
offset = 0x7e00 + i * 16; |
61 |
stw_phys(sm_state + offset, dt->selector); |
62 |
stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); |
63 |
stl_phys(sm_state + offset + 4, dt->limit);
|
64 |
stq_phys(sm_state + offset + 8, dt->base);
|
65 |
} |
66 |
|
67 |
stq_phys(sm_state + 0x7e68, env->gdt.base);
|
68 |
stl_phys(sm_state + 0x7e64, env->gdt.limit);
|
69 |
|
70 |
stw_phys(sm_state + 0x7e70, env->ldt.selector);
|
71 |
stq_phys(sm_state + 0x7e78, env->ldt.base);
|
72 |
stl_phys(sm_state + 0x7e74, env->ldt.limit);
|
73 |
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); |
74 |
|
75 |
stq_phys(sm_state + 0x7e88, env->idt.base);
|
76 |
stl_phys(sm_state + 0x7e84, env->idt.limit);
|
77 |
|
78 |
stw_phys(sm_state + 0x7e90, env->tr.selector);
|
79 |
stq_phys(sm_state + 0x7e98, env->tr.base);
|
80 |
stl_phys(sm_state + 0x7e94, env->tr.limit);
|
81 |
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); |
82 |
|
83 |
stq_phys(sm_state + 0x7ed0, env->efer);
|
84 |
|
85 |
stq_phys(sm_state + 0x7ff8, env->regs[R_EAX]);
|
86 |
stq_phys(sm_state + 0x7ff0, env->regs[R_ECX]);
|
87 |
stq_phys(sm_state + 0x7fe8, env->regs[R_EDX]);
|
88 |
stq_phys(sm_state + 0x7fe0, env->regs[R_EBX]);
|
89 |
stq_phys(sm_state + 0x7fd8, env->regs[R_ESP]);
|
90 |
stq_phys(sm_state + 0x7fd0, env->regs[R_EBP]);
|
91 |
stq_phys(sm_state + 0x7fc8, env->regs[R_ESI]);
|
92 |
stq_phys(sm_state + 0x7fc0, EDI);
|
93 |
for (i = 8; i < 16; i++) { |
94 |
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); |
95 |
} |
96 |
stq_phys(sm_state + 0x7f78, env->eip);
|
97 |
stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
|
98 |
stl_phys(sm_state + 0x7f68, env->dr[6]); |
99 |
stl_phys(sm_state + 0x7f60, env->dr[7]); |
100 |
|
101 |
stl_phys(sm_state + 0x7f48, env->cr[4]); |
102 |
stl_phys(sm_state + 0x7f50, env->cr[3]); |
103 |
stl_phys(sm_state + 0x7f58, env->cr[0]); |
104 |
|
105 |
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
|
106 |
stl_phys(sm_state + 0x7f00, env->smbase);
|
107 |
#else
|
108 |
stl_phys(sm_state + 0x7ffc, env->cr[0]); |
109 |
stl_phys(sm_state + 0x7ff8, env->cr[3]); |
110 |
stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
|
111 |
stl_phys(sm_state + 0x7ff0, env->eip);
|
112 |
stl_phys(sm_state + 0x7fec, EDI);
|
113 |
stl_phys(sm_state + 0x7fe8, env->regs[R_ESI]);
|
114 |
stl_phys(sm_state + 0x7fe4, env->regs[R_EBP]);
|
115 |
stl_phys(sm_state + 0x7fe0, env->regs[R_ESP]);
|
116 |
stl_phys(sm_state + 0x7fdc, env->regs[R_EBX]);
|
117 |
stl_phys(sm_state + 0x7fd8, env->regs[R_EDX]);
|
118 |
stl_phys(sm_state + 0x7fd4, env->regs[R_ECX]);
|
119 |
stl_phys(sm_state + 0x7fd0, env->regs[R_EAX]);
|
120 |
stl_phys(sm_state + 0x7fcc, env->dr[6]); |
121 |
stl_phys(sm_state + 0x7fc8, env->dr[7]); |
122 |
|
123 |
stl_phys(sm_state + 0x7fc4, env->tr.selector);
|
124 |
stl_phys(sm_state + 0x7f64, env->tr.base);
|
125 |
stl_phys(sm_state + 0x7f60, env->tr.limit);
|
126 |
stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); |
127 |
|
128 |
stl_phys(sm_state + 0x7fc0, env->ldt.selector);
|
129 |
stl_phys(sm_state + 0x7f80, env->ldt.base);
|
130 |
stl_phys(sm_state + 0x7f7c, env->ldt.limit);
|
131 |
stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); |
132 |
|
133 |
stl_phys(sm_state + 0x7f74, env->gdt.base);
|
134 |
stl_phys(sm_state + 0x7f70, env->gdt.limit);
|
135 |
|
136 |
stl_phys(sm_state + 0x7f58, env->idt.base);
|
137 |
stl_phys(sm_state + 0x7f54, env->idt.limit);
|
138 |
|
139 |
for (i = 0; i < 6; i++) { |
140 |
dt = &env->segs[i]; |
141 |
if (i < 3) { |
142 |
offset = 0x7f84 + i * 12; |
143 |
} else {
|
144 |
offset = 0x7f2c + (i - 3) * 12; |
145 |
} |
146 |
stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector); |
147 |
stl_phys(sm_state + offset + 8, dt->base);
|
148 |
stl_phys(sm_state + offset + 4, dt->limit);
|
149 |
stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff); |
150 |
} |
151 |
stl_phys(sm_state + 0x7f14, env->cr[4]); |
152 |
|
153 |
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
|
154 |
stl_phys(sm_state + 0x7ef8, env->smbase);
|
155 |
#endif
|
156 |
/* init SMM cpu state */
|
157 |
|
158 |
#ifdef TARGET_X86_64
|
159 |
cpu_load_efer(env, 0);
|
160 |
#endif
|
161 |
cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
|
162 |
DF_MASK)); |
163 |
env->eip = 0x00008000;
|
164 |
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, |
165 |
0xffffffff, 0); |
166 |
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); |
167 |
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0); |
168 |
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); |
169 |
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); |
170 |
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); |
171 |
|
172 |
cpu_x86_update_cr0(env, |
173 |
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
|
174 |
CR0_PG_MASK)); |
175 |
cpu_x86_update_cr4(env, 0);
|
176 |
env->dr[7] = 0x00000400; |
177 |
CC_OP = CC_OP_EFLAGS; |
178 |
} |
179 |
|
180 |
void helper_rsm(CPUX86State *env)
|
181 |
{ |
182 |
target_ulong sm_state; |
183 |
int i, offset;
|
184 |
uint32_t val; |
185 |
|
186 |
sm_state = env->smbase + 0x8000;
|
187 |
#ifdef TARGET_X86_64
|
188 |
cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
|
189 |
|
190 |
for (i = 0; i < 6; i++) { |
191 |
offset = 0x7e00 + i * 16; |
192 |
cpu_x86_load_seg_cache(env, i, |
193 |
lduw_phys(sm_state + offset), |
194 |
ldq_phys(sm_state + offset + 8),
|
195 |
ldl_phys(sm_state + offset + 4),
|
196 |
(lduw_phys(sm_state + offset + 2) &
|
197 |
0xf0ff) << 8); |
198 |
} |
199 |
|
200 |
env->gdt.base = ldq_phys(sm_state + 0x7e68);
|
201 |
env->gdt.limit = ldl_phys(sm_state + 0x7e64);
|
202 |
|
203 |
env->ldt.selector = lduw_phys(sm_state + 0x7e70);
|
204 |
env->ldt.base = ldq_phys(sm_state + 0x7e78);
|
205 |
env->ldt.limit = ldl_phys(sm_state + 0x7e74);
|
206 |
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; |
207 |
|
208 |
env->idt.base = ldq_phys(sm_state + 0x7e88);
|
209 |
env->idt.limit = ldl_phys(sm_state + 0x7e84);
|
210 |
|
211 |
env->tr.selector = lduw_phys(sm_state + 0x7e90);
|
212 |
env->tr.base = ldq_phys(sm_state + 0x7e98);
|
213 |
env->tr.limit = ldl_phys(sm_state + 0x7e94);
|
214 |
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; |
215 |
|
216 |
env->regs[R_EAX] = ldq_phys(sm_state + 0x7ff8);
|
217 |
env->regs[R_ECX] = ldq_phys(sm_state + 0x7ff0);
|
218 |
env->regs[R_EDX] = ldq_phys(sm_state + 0x7fe8);
|
219 |
env->regs[R_EBX] = ldq_phys(sm_state + 0x7fe0);
|
220 |
env->regs[R_ESP] = ldq_phys(sm_state + 0x7fd8);
|
221 |
env->regs[R_EBP] = ldq_phys(sm_state + 0x7fd0);
|
222 |
env->regs[R_ESI] = ldq_phys(sm_state + 0x7fc8);
|
223 |
EDI = ldq_phys(sm_state + 0x7fc0);
|
224 |
for (i = 8; i < 16; i++) { |
225 |
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); |
226 |
} |
227 |
env->eip = ldq_phys(sm_state + 0x7f78);
|
228 |
cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
|
229 |
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
230 |
env->dr[6] = ldl_phys(sm_state + 0x7f68); |
231 |
env->dr[7] = ldl_phys(sm_state + 0x7f60); |
232 |
|
233 |
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
|
234 |
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
|
235 |
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
|
236 |
|
237 |
val = ldl_phys(sm_state + 0x7efc); /* revision ID */ |
238 |
if (val & 0x20000) { |
239 |
env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff; |
240 |
} |
241 |
#else
|
242 |
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
|
243 |
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
|
244 |
cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
|
245 |
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
246 |
env->eip = ldl_phys(sm_state + 0x7ff0);
|
247 |
EDI = ldl_phys(sm_state + 0x7fec);
|
248 |
env->regs[R_ESI] = ldl_phys(sm_state + 0x7fe8);
|
249 |
env->regs[R_EBP] = ldl_phys(sm_state + 0x7fe4);
|
250 |
env->regs[R_ESP] = ldl_phys(sm_state + 0x7fe0);
|
251 |
env->regs[R_EBX] = ldl_phys(sm_state + 0x7fdc);
|
252 |
env->regs[R_EDX] = ldl_phys(sm_state + 0x7fd8);
|
253 |
env->regs[R_ECX] = ldl_phys(sm_state + 0x7fd4);
|
254 |
env->regs[R_EAX] = ldl_phys(sm_state + 0x7fd0);
|
255 |
env->dr[6] = ldl_phys(sm_state + 0x7fcc); |
256 |
env->dr[7] = ldl_phys(sm_state + 0x7fc8); |
257 |
|
258 |
env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; |
259 |
env->tr.base = ldl_phys(sm_state + 0x7f64);
|
260 |
env->tr.limit = ldl_phys(sm_state + 0x7f60);
|
261 |
env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; |
262 |
|
263 |
env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; |
264 |
env->ldt.base = ldl_phys(sm_state + 0x7f80);
|
265 |
env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
|
266 |
env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; |
267 |
|
268 |
env->gdt.base = ldl_phys(sm_state + 0x7f74);
|
269 |
env->gdt.limit = ldl_phys(sm_state + 0x7f70);
|
270 |
|
271 |
env->idt.base = ldl_phys(sm_state + 0x7f58);
|
272 |
env->idt.limit = ldl_phys(sm_state + 0x7f54);
|
273 |
|
274 |
for (i = 0; i < 6; i++) { |
275 |
if (i < 3) { |
276 |
offset = 0x7f84 + i * 12; |
277 |
} else {
|
278 |
offset = 0x7f2c + (i - 3) * 12; |
279 |
} |
280 |
cpu_x86_load_seg_cache(env, i, |
281 |
ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, |
282 |
ldl_phys(sm_state + offset + 8),
|
283 |
ldl_phys(sm_state + offset + 4),
|
284 |
(ldl_phys(sm_state + offset) & 0xf0ff) << 8); |
285 |
} |
286 |
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
|
287 |
|
288 |
val = ldl_phys(sm_state + 0x7efc); /* revision ID */ |
289 |
if (val & 0x20000) { |
290 |
env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff; |
291 |
} |
292 |
#endif
|
293 |
CC_OP = CC_OP_EFLAGS; |
294 |
env->hflags &= ~HF_SMM_MASK; |
295 |
cpu_smm_update(env); |
296 |
|
297 |
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
|
298 |
log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP); |
299 |
} |
300 |
|
301 |
#endif /* !CONFIG_USER_ONLY */ |