root / cpus.c @ 355b1943
History | View | Annotate | Download (17.5 kB)
1 | 296af7c9 | Blue Swirl | /*
|
---|---|---|---|
2 | 296af7c9 | Blue Swirl | * QEMU System Emulator
|
3 | 296af7c9 | Blue Swirl | *
|
4 | 296af7c9 | Blue Swirl | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 296af7c9 | Blue Swirl | *
|
6 | 296af7c9 | Blue Swirl | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 296af7c9 | Blue Swirl | * of this software and associated documentation files (the "Software"), to deal
|
8 | 296af7c9 | Blue Swirl | * in the Software without restriction, including without limitation the rights
|
9 | 296af7c9 | Blue Swirl | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 296af7c9 | Blue Swirl | * copies of the Software, and to permit persons to whom the Software is
|
11 | 296af7c9 | Blue Swirl | * furnished to do so, subject to the following conditions:
|
12 | 296af7c9 | Blue Swirl | *
|
13 | 296af7c9 | Blue Swirl | * The above copyright notice and this permission notice shall be included in
|
14 | 296af7c9 | Blue Swirl | * all copies or substantial portions of the Software.
|
15 | 296af7c9 | Blue Swirl | *
|
16 | 296af7c9 | Blue Swirl | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 296af7c9 | Blue Swirl | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 296af7c9 | Blue Swirl | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 296af7c9 | Blue Swirl | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 296af7c9 | Blue Swirl | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 296af7c9 | Blue Swirl | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 296af7c9 | Blue Swirl | * THE SOFTWARE.
|
23 | 296af7c9 | Blue Swirl | */
|
24 | 296af7c9 | Blue Swirl | |
25 | 296af7c9 | Blue Swirl | /* Needed early for CONFIG_BSD etc. */
|
26 | 296af7c9 | Blue Swirl | #include "config-host.h" |
27 | 296af7c9 | Blue Swirl | |
28 | 296af7c9 | Blue Swirl | #include "monitor.h" |
29 | 296af7c9 | Blue Swirl | #include "sysemu.h" |
30 | 296af7c9 | Blue Swirl | #include "gdbstub.h" |
31 | 296af7c9 | Blue Swirl | #include "dma.h" |
32 | 296af7c9 | Blue Swirl | #include "kvm.h" |
33 | 296af7c9 | Blue Swirl | |
34 | 296af7c9 | Blue Swirl | #include "cpus.h" |
35 | 296af7c9 | Blue Swirl | |
36 | 296af7c9 | Blue Swirl | static CPUState *cur_cpu;
|
37 | 296af7c9 | Blue Swirl | static CPUState *next_cpu;
|
38 | 296af7c9 | Blue Swirl | |
39 | 296af7c9 | Blue Swirl | /***********************************************************/
|
40 | 296af7c9 | Blue Swirl | void hw_error(const char *fmt, ...) |
41 | 296af7c9 | Blue Swirl | { |
42 | 296af7c9 | Blue Swirl | va_list ap; |
43 | 296af7c9 | Blue Swirl | CPUState *env; |
44 | 296af7c9 | Blue Swirl | |
45 | 296af7c9 | Blue Swirl | va_start(ap, fmt); |
46 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu: hardware error: ");
|
47 | 296af7c9 | Blue Swirl | vfprintf(stderr, fmt, ap); |
48 | 296af7c9 | Blue Swirl | fprintf(stderr, "\n");
|
49 | 296af7c9 | Blue Swirl | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
50 | 296af7c9 | Blue Swirl | fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
51 | 296af7c9 | Blue Swirl | #ifdef TARGET_I386
|
52 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
53 | 296af7c9 | Blue Swirl | #else
|
54 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, 0);
|
55 | 296af7c9 | Blue Swirl | #endif
|
56 | 296af7c9 | Blue Swirl | } |
57 | 296af7c9 | Blue Swirl | va_end(ap); |
58 | 296af7c9 | Blue Swirl | abort(); |
59 | 296af7c9 | Blue Swirl | } |
60 | 296af7c9 | Blue Swirl | |
61 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_states(void) |
62 | 296af7c9 | Blue Swirl | { |
63 | 296af7c9 | Blue Swirl | CPUState *cpu; |
64 | 296af7c9 | Blue Swirl | |
65 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
66 | 296af7c9 | Blue Swirl | cpu_synchronize_state(cpu); |
67 | 296af7c9 | Blue Swirl | } |
68 | 296af7c9 | Blue Swirl | } |
69 | 296af7c9 | Blue Swirl | |
70 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_reset(void) |
71 | 296af7c9 | Blue Swirl | { |
72 | 296af7c9 | Blue Swirl | CPUState *cpu; |
73 | 296af7c9 | Blue Swirl | |
74 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
75 | 296af7c9 | Blue Swirl | cpu_synchronize_post_reset(cpu); |
76 | 296af7c9 | Blue Swirl | } |
77 | 296af7c9 | Blue Swirl | } |
78 | 296af7c9 | Blue Swirl | |
79 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_init(void) |
80 | 296af7c9 | Blue Swirl | { |
81 | 296af7c9 | Blue Swirl | CPUState *cpu; |
82 | 296af7c9 | Blue Swirl | |
83 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
84 | 296af7c9 | Blue Swirl | cpu_synchronize_post_init(cpu); |
85 | 296af7c9 | Blue Swirl | } |
86 | 296af7c9 | Blue Swirl | } |
87 | 296af7c9 | Blue Swirl | |
88 | 296af7c9 | Blue Swirl | static void do_vm_stop(int reason) |
89 | 296af7c9 | Blue Swirl | { |
90 | 296af7c9 | Blue Swirl | if (vm_running) {
|
91 | 296af7c9 | Blue Swirl | cpu_disable_ticks(); |
92 | 296af7c9 | Blue Swirl | vm_running = 0;
|
93 | 296af7c9 | Blue Swirl | pause_all_vcpus(); |
94 | 296af7c9 | Blue Swirl | vm_state_notify(0, reason);
|
95 | 296af7c9 | Blue Swirl | monitor_protocol_event(QEVENT_STOP, NULL);
|
96 | 296af7c9 | Blue Swirl | } |
97 | 296af7c9 | Blue Swirl | } |
98 | 296af7c9 | Blue Swirl | |
99 | 296af7c9 | Blue Swirl | static int cpu_can_run(CPUState *env) |
100 | 296af7c9 | Blue Swirl | { |
101 | 296af7c9 | Blue Swirl | if (env->stop)
|
102 | 296af7c9 | Blue Swirl | return 0; |
103 | 296af7c9 | Blue Swirl | if (env->stopped)
|
104 | 296af7c9 | Blue Swirl | return 0; |
105 | 296af7c9 | Blue Swirl | if (!vm_running)
|
106 | 296af7c9 | Blue Swirl | return 0; |
107 | 296af7c9 | Blue Swirl | return 1; |
108 | 296af7c9 | Blue Swirl | } |
109 | 296af7c9 | Blue Swirl | |
110 | 296af7c9 | Blue Swirl | static int cpu_has_work(CPUState *env) |
111 | 296af7c9 | Blue Swirl | { |
112 | 296af7c9 | Blue Swirl | if (env->stop)
|
113 | 296af7c9 | Blue Swirl | return 1; |
114 | 296af7c9 | Blue Swirl | if (env->stopped)
|
115 | 296af7c9 | Blue Swirl | return 0; |
116 | 296af7c9 | Blue Swirl | if (!env->halted)
|
117 | 296af7c9 | Blue Swirl | return 1; |
118 | 296af7c9 | Blue Swirl | if (qemu_cpu_has_work(env))
|
119 | 296af7c9 | Blue Swirl | return 1; |
120 | 296af7c9 | Blue Swirl | return 0; |
121 | 296af7c9 | Blue Swirl | } |
122 | 296af7c9 | Blue Swirl | |
123 | 296af7c9 | Blue Swirl | static int tcg_has_work(void) |
124 | 296af7c9 | Blue Swirl | { |
125 | 296af7c9 | Blue Swirl | CPUState *env; |
126 | 296af7c9 | Blue Swirl | |
127 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
128 | 296af7c9 | Blue Swirl | if (cpu_has_work(env))
|
129 | 296af7c9 | Blue Swirl | return 1; |
130 | 296af7c9 | Blue Swirl | return 0; |
131 | 296af7c9 | Blue Swirl | } |
132 | 296af7c9 | Blue Swirl | |
133 | 296af7c9 | Blue Swirl | #ifndef _WIN32
|
134 | 296af7c9 | Blue Swirl | static int io_thread_fd = -1; |
135 | 296af7c9 | Blue Swirl | |
136 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
137 | 296af7c9 | Blue Swirl | { |
138 | 296af7c9 | Blue Swirl | /* Write 8 bytes to be compatible with eventfd. */
|
139 | 296af7c9 | Blue Swirl | static uint64_t val = 1; |
140 | 296af7c9 | Blue Swirl | ssize_t ret; |
141 | 296af7c9 | Blue Swirl | |
142 | 296af7c9 | Blue Swirl | if (io_thread_fd == -1) |
143 | 296af7c9 | Blue Swirl | return;
|
144 | 296af7c9 | Blue Swirl | |
145 | 296af7c9 | Blue Swirl | do {
|
146 | 296af7c9 | Blue Swirl | ret = write(io_thread_fd, &val, sizeof(val));
|
147 | 296af7c9 | Blue Swirl | } while (ret < 0 && errno == EINTR); |
148 | 296af7c9 | Blue Swirl | |
149 | 296af7c9 | Blue Swirl | /* EAGAIN is fine, a read must be pending. */
|
150 | 296af7c9 | Blue Swirl | if (ret < 0 && errno != EAGAIN) { |
151 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
152 | 296af7c9 | Blue Swirl | strerror(errno)); |
153 | 296af7c9 | Blue Swirl | exit (1);
|
154 | 296af7c9 | Blue Swirl | } |
155 | 296af7c9 | Blue Swirl | } |
156 | 296af7c9 | Blue Swirl | |
157 | 296af7c9 | Blue Swirl | static void qemu_event_read(void *opaque) |
158 | 296af7c9 | Blue Swirl | { |
159 | 296af7c9 | Blue Swirl | int fd = (unsigned long)opaque; |
160 | 296af7c9 | Blue Swirl | ssize_t len; |
161 | 296af7c9 | Blue Swirl | char buffer[512]; |
162 | 296af7c9 | Blue Swirl | |
163 | 296af7c9 | Blue Swirl | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
164 | 296af7c9 | Blue Swirl | do {
|
165 | 296af7c9 | Blue Swirl | len = read(fd, buffer, sizeof(buffer));
|
166 | 296af7c9 | Blue Swirl | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
167 | 296af7c9 | Blue Swirl | } |
168 | 296af7c9 | Blue Swirl | |
169 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
170 | 296af7c9 | Blue Swirl | { |
171 | 296af7c9 | Blue Swirl | int err;
|
172 | 296af7c9 | Blue Swirl | int fds[2]; |
173 | 296af7c9 | Blue Swirl | |
174 | 296af7c9 | Blue Swirl | err = qemu_eventfd(fds); |
175 | 296af7c9 | Blue Swirl | if (err == -1) |
176 | 296af7c9 | Blue Swirl | return -errno;
|
177 | 296af7c9 | Blue Swirl | |
178 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[0], O_NONBLOCK);
|
179 | 296af7c9 | Blue Swirl | if (err < 0) |
180 | 296af7c9 | Blue Swirl | goto fail;
|
181 | 296af7c9 | Blue Swirl | |
182 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[1], O_NONBLOCK);
|
183 | 296af7c9 | Blue Swirl | if (err < 0) |
184 | 296af7c9 | Blue Swirl | goto fail;
|
185 | 296af7c9 | Blue Swirl | |
186 | 296af7c9 | Blue Swirl | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
187 | 296af7c9 | Blue Swirl | (void *)(unsigned long)fds[0]); |
188 | 296af7c9 | Blue Swirl | |
189 | 296af7c9 | Blue Swirl | io_thread_fd = fds[1];
|
190 | 296af7c9 | Blue Swirl | return 0; |
191 | 296af7c9 | Blue Swirl | |
192 | 296af7c9 | Blue Swirl | fail:
|
193 | 296af7c9 | Blue Swirl | close(fds[0]);
|
194 | 296af7c9 | Blue Swirl | close(fds[1]);
|
195 | 296af7c9 | Blue Swirl | return err;
|
196 | 296af7c9 | Blue Swirl | } |
197 | 296af7c9 | Blue Swirl | #else
|
198 | 296af7c9 | Blue Swirl | HANDLE qemu_event_handle; |
199 | 296af7c9 | Blue Swirl | |
200 | 296af7c9 | Blue Swirl | static void dummy_event_handler(void *opaque) |
201 | 296af7c9 | Blue Swirl | { |
202 | 296af7c9 | Blue Swirl | } |
203 | 296af7c9 | Blue Swirl | |
204 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
205 | 296af7c9 | Blue Swirl | { |
206 | 296af7c9 | Blue Swirl | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
207 | 296af7c9 | Blue Swirl | if (!qemu_event_handle) {
|
208 | 296af7c9 | Blue Swirl | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
209 | 296af7c9 | Blue Swirl | return -1; |
210 | 296af7c9 | Blue Swirl | } |
211 | 296af7c9 | Blue Swirl | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
212 | 296af7c9 | Blue Swirl | return 0; |
213 | 296af7c9 | Blue Swirl | } |
214 | 296af7c9 | Blue Swirl | |
215 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
216 | 296af7c9 | Blue Swirl | { |
217 | 296af7c9 | Blue Swirl | if (!SetEvent(qemu_event_handle)) {
|
218 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
219 | 296af7c9 | Blue Swirl | GetLastError()); |
220 | 296af7c9 | Blue Swirl | exit (1);
|
221 | 296af7c9 | Blue Swirl | } |
222 | 296af7c9 | Blue Swirl | } |
223 | 296af7c9 | Blue Swirl | #endif
|
224 | 296af7c9 | Blue Swirl | |
225 | 296af7c9 | Blue Swirl | #ifndef CONFIG_IOTHREAD
|
226 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
227 | 296af7c9 | Blue Swirl | { |
228 | 296af7c9 | Blue Swirl | return qemu_event_init();
|
229 | 296af7c9 | Blue Swirl | } |
230 | 296af7c9 | Blue Swirl | |
231 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
232 | 296af7c9 | Blue Swirl | { |
233 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
234 | 296af7c9 | Blue Swirl | |
235 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
236 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
237 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
238 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
239 | 296af7c9 | Blue Swirl | return;
|
240 | 296af7c9 | Blue Swirl | } |
241 | 296af7c9 | Blue Swirl | |
242 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *env) |
243 | 296af7c9 | Blue Swirl | { |
244 | 296af7c9 | Blue Swirl | return 1; |
245 | 296af7c9 | Blue Swirl | } |
246 | 296af7c9 | Blue Swirl | |
247 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
248 | 296af7c9 | Blue Swirl | { |
249 | 296af7c9 | Blue Swirl | } |
250 | 296af7c9 | Blue Swirl | |
251 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
252 | 296af7c9 | Blue Swirl | { |
253 | 296af7c9 | Blue Swirl | } |
254 | 296af7c9 | Blue Swirl | |
255 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *env) |
256 | 296af7c9 | Blue Swirl | { |
257 | 296af7c9 | Blue Swirl | return;
|
258 | 296af7c9 | Blue Swirl | } |
259 | 296af7c9 | Blue Swirl | |
260 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
261 | 296af7c9 | Blue Swirl | { |
262 | 296af7c9 | Blue Swirl | CPUState *env = cpu_single_env; |
263 | 296af7c9 | Blue Swirl | |
264 | 296af7c9 | Blue Swirl | qemu_event_increment (); |
265 | 296af7c9 | Blue Swirl | if (env) {
|
266 | 296af7c9 | Blue Swirl | cpu_exit(env); |
267 | 296af7c9 | Blue Swirl | } |
268 | 296af7c9 | Blue Swirl | if (next_cpu && env != next_cpu) {
|
269 | 296af7c9 | Blue Swirl | cpu_exit(next_cpu); |
270 | 296af7c9 | Blue Swirl | } |
271 | 296af7c9 | Blue Swirl | } |
272 | 296af7c9 | Blue Swirl | |
273 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) {} |
274 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) {} |
275 | 296af7c9 | Blue Swirl | |
276 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
277 | 296af7c9 | Blue Swirl | { |
278 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
279 | 296af7c9 | Blue Swirl | } |
280 | 296af7c9 | Blue Swirl | |
281 | 296af7c9 | Blue Swirl | #else /* CONFIG_IOTHREAD */ |
282 | 296af7c9 | Blue Swirl | |
283 | 296af7c9 | Blue Swirl | #include "qemu-thread.h" |
284 | 296af7c9 | Blue Swirl | |
285 | 296af7c9 | Blue Swirl | QemuMutex qemu_global_mutex; |
286 | 296af7c9 | Blue Swirl | static QemuMutex qemu_fair_mutex;
|
287 | 296af7c9 | Blue Swirl | |
288 | 296af7c9 | Blue Swirl | static QemuThread io_thread;
|
289 | 296af7c9 | Blue Swirl | |
290 | 296af7c9 | Blue Swirl | static QemuThread *tcg_cpu_thread;
|
291 | 296af7c9 | Blue Swirl | static QemuCond *tcg_halt_cond;
|
292 | 296af7c9 | Blue Swirl | |
293 | 296af7c9 | Blue Swirl | static int qemu_system_ready; |
294 | 296af7c9 | Blue Swirl | /* cpu creation */
|
295 | 296af7c9 | Blue Swirl | static QemuCond qemu_cpu_cond;
|
296 | 296af7c9 | Blue Swirl | /* system init */
|
297 | 296af7c9 | Blue Swirl | static QemuCond qemu_system_cond;
|
298 | 296af7c9 | Blue Swirl | static QemuCond qemu_pause_cond;
|
299 | 296af7c9 | Blue Swirl | |
300 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void); |
301 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env); |
302 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void); |
303 | 296af7c9 | Blue Swirl | |
304 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
305 | 296af7c9 | Blue Swirl | { |
306 | 296af7c9 | Blue Swirl | int ret;
|
307 | 296af7c9 | Blue Swirl | |
308 | 296af7c9 | Blue Swirl | ret = qemu_event_init(); |
309 | 296af7c9 | Blue Swirl | if (ret)
|
310 | 296af7c9 | Blue Swirl | return ret;
|
311 | 296af7c9 | Blue Swirl | |
312 | 296af7c9 | Blue Swirl | qemu_cond_init(&qemu_pause_cond); |
313 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_fair_mutex); |
314 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_global_mutex); |
315 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
316 | 296af7c9 | Blue Swirl | |
317 | 296af7c9 | Blue Swirl | unblock_io_signals(); |
318 | 296af7c9 | Blue Swirl | qemu_thread_self(&io_thread); |
319 | 296af7c9 | Blue Swirl | |
320 | 296af7c9 | Blue Swirl | return 0; |
321 | 296af7c9 | Blue Swirl | } |
322 | 296af7c9 | Blue Swirl | |
323 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event_common(CPUState *env) |
324 | 296af7c9 | Blue Swirl | { |
325 | 296af7c9 | Blue Swirl | if (env->stop) {
|
326 | 296af7c9 | Blue Swirl | env->stop = 0;
|
327 | 296af7c9 | Blue Swirl | env->stopped = 1;
|
328 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_pause_cond); |
329 | 296af7c9 | Blue Swirl | } |
330 | 296af7c9 | Blue Swirl | } |
331 | 296af7c9 | Blue Swirl | |
332 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event(CPUState *env) |
333 | 296af7c9 | Blue Swirl | { |
334 | 296af7c9 | Blue Swirl | while (!tcg_has_work())
|
335 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
336 | 296af7c9 | Blue Swirl | |
337 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
338 | 296af7c9 | Blue Swirl | |
339 | 296af7c9 | Blue Swirl | /*
|
340 | 296af7c9 | Blue Swirl | * Users of qemu_global_mutex can be starved, having no chance
|
341 | 296af7c9 | Blue Swirl | * to acquire it since this path will get to it first.
|
342 | 296af7c9 | Blue Swirl | * So use another lock to provide fairness.
|
343 | 296af7c9 | Blue Swirl | */
|
344 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
345 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
346 | 296af7c9 | Blue Swirl | |
347 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
348 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
349 | 296af7c9 | Blue Swirl | } |
350 | 296af7c9 | Blue Swirl | |
351 | 296af7c9 | Blue Swirl | static void qemu_kvm_eat_signal(CPUState *env, int timeout) |
352 | 296af7c9 | Blue Swirl | { |
353 | 296af7c9 | Blue Swirl | struct timespec ts;
|
354 | 296af7c9 | Blue Swirl | int r, e;
|
355 | 296af7c9 | Blue Swirl | siginfo_t siginfo; |
356 | 296af7c9 | Blue Swirl | sigset_t waitset; |
357 | 296af7c9 | Blue Swirl | |
358 | 296af7c9 | Blue Swirl | ts.tv_sec = timeout / 1000;
|
359 | 296af7c9 | Blue Swirl | ts.tv_nsec = (timeout % 1000) * 1000000; |
360 | 296af7c9 | Blue Swirl | |
361 | 296af7c9 | Blue Swirl | sigemptyset(&waitset); |
362 | 296af7c9 | Blue Swirl | sigaddset(&waitset, SIG_IPI); |
363 | 296af7c9 | Blue Swirl | |
364 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
365 | 296af7c9 | Blue Swirl | r = sigtimedwait(&waitset, &siginfo, &ts); |
366 | 296af7c9 | Blue Swirl | e = errno; |
367 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
368 | 296af7c9 | Blue Swirl | |
369 | 296af7c9 | Blue Swirl | if (r == -1 && !(e == EAGAIN || e == EINTR)) { |
370 | 296af7c9 | Blue Swirl | fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
|
371 | 296af7c9 | Blue Swirl | exit(1);
|
372 | 296af7c9 | Blue Swirl | } |
373 | 296af7c9 | Blue Swirl | } |
374 | 296af7c9 | Blue Swirl | |
375 | 296af7c9 | Blue Swirl | static void qemu_kvm_wait_io_event(CPUState *env) |
376 | 296af7c9 | Blue Swirl | { |
377 | 296af7c9 | Blue Swirl | while (!cpu_has_work(env))
|
378 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
379 | 296af7c9 | Blue Swirl | |
380 | 296af7c9 | Blue Swirl | qemu_kvm_eat_signal(env, 0);
|
381 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
382 | 296af7c9 | Blue Swirl | } |
383 | 296af7c9 | Blue Swirl | |
384 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env); |
385 | 296af7c9 | Blue Swirl | |
386 | 296af7c9 | Blue Swirl | static void *kvm_cpu_thread_fn(void *arg) |
387 | 296af7c9 | Blue Swirl | { |
388 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
389 | 296af7c9 | Blue Swirl | |
390 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
391 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
392 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
393 | 296af7c9 | Blue Swirl | |
394 | 296af7c9 | Blue Swirl | kvm_block_io_signals(env); |
395 | 296af7c9 | Blue Swirl | |
396 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
397 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
398 | 296af7c9 | Blue Swirl | env->created = 1;
|
399 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
400 | 296af7c9 | Blue Swirl | |
401 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
402 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
403 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
404 | 296af7c9 | Blue Swirl | |
405 | 296af7c9 | Blue Swirl | while (1) { |
406 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
407 | 296af7c9 | Blue Swirl | qemu_cpu_exec(env); |
408 | 296af7c9 | Blue Swirl | qemu_kvm_wait_io_event(env); |
409 | 296af7c9 | Blue Swirl | } |
410 | 296af7c9 | Blue Swirl | |
411 | 296af7c9 | Blue Swirl | return NULL; |
412 | 296af7c9 | Blue Swirl | } |
413 | 296af7c9 | Blue Swirl | |
414 | 296af7c9 | Blue Swirl | static void *tcg_cpu_thread_fn(void *arg) |
415 | 296af7c9 | Blue Swirl | { |
416 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
417 | 296af7c9 | Blue Swirl | |
418 | 296af7c9 | Blue Swirl | tcg_block_io_signals(); |
419 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
420 | 296af7c9 | Blue Swirl | |
421 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
422 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
423 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
424 | 296af7c9 | Blue Swirl | env->created = 1;
|
425 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
426 | 296af7c9 | Blue Swirl | |
427 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
428 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
429 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
430 | 296af7c9 | Blue Swirl | |
431 | 296af7c9 | Blue Swirl | while (1) { |
432 | 296af7c9 | Blue Swirl | tcg_cpu_exec(); |
433 | 296af7c9 | Blue Swirl | qemu_wait_io_event(cur_cpu); |
434 | 296af7c9 | Blue Swirl | } |
435 | 296af7c9 | Blue Swirl | |
436 | 296af7c9 | Blue Swirl | return NULL; |
437 | 296af7c9 | Blue Swirl | } |
438 | 296af7c9 | Blue Swirl | |
439 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *_env) |
440 | 296af7c9 | Blue Swirl | { |
441 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
442 | 296af7c9 | Blue Swirl | qemu_cond_broadcast(env->halt_cond); |
443 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
444 | 296af7c9 | Blue Swirl | qemu_thread_signal(env->thread, SIG_IPI); |
445 | 296af7c9 | Blue Swirl | } |
446 | 296af7c9 | Blue Swirl | |
447 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *_env) |
448 | 296af7c9 | Blue Swirl | { |
449 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
450 | 296af7c9 | Blue Swirl | QemuThread this; |
451 | 296af7c9 | Blue Swirl | |
452 | 296af7c9 | Blue Swirl | qemu_thread_self(&this); |
453 | 296af7c9 | Blue Swirl | |
454 | 296af7c9 | Blue Swirl | return qemu_thread_equal(&this, env->thread);
|
455 | 296af7c9 | Blue Swirl | } |
456 | 296af7c9 | Blue Swirl | |
457 | 296af7c9 | Blue Swirl | static void cpu_signal(int sig) |
458 | 296af7c9 | Blue Swirl | { |
459 | 296af7c9 | Blue Swirl | if (cpu_single_env)
|
460 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
461 | 296af7c9 | Blue Swirl | } |
462 | 296af7c9 | Blue Swirl | |
463 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void) |
464 | 296af7c9 | Blue Swirl | { |
465 | 296af7c9 | Blue Swirl | sigset_t set; |
466 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
467 | 296af7c9 | Blue Swirl | |
468 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
469 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
470 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
471 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
472 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
473 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
474 | 296af7c9 | Blue Swirl | |
475 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
476 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
477 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
478 | 296af7c9 | Blue Swirl | |
479 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
480 | 296af7c9 | Blue Swirl | sigact.sa_handler = cpu_signal; |
481 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
482 | 296af7c9 | Blue Swirl | } |
483 | 296af7c9 | Blue Swirl | |
484 | 296af7c9 | Blue Swirl | static void dummy_signal(int sig) |
485 | 296af7c9 | Blue Swirl | { |
486 | 296af7c9 | Blue Swirl | } |
487 | 296af7c9 | Blue Swirl | |
488 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env) |
489 | 296af7c9 | Blue Swirl | { |
490 | 296af7c9 | Blue Swirl | int r;
|
491 | 296af7c9 | Blue Swirl | sigset_t set; |
492 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
493 | 296af7c9 | Blue Swirl | |
494 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
495 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
496 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
497 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
498 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
499 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
500 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
501 | 296af7c9 | Blue Swirl | |
502 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, NULL, &set);
|
503 | 296af7c9 | Blue Swirl | sigdelset(&set, SIG_IPI); |
504 | 296af7c9 | Blue Swirl | |
505 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
506 | 296af7c9 | Blue Swirl | sigact.sa_handler = dummy_signal; |
507 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
508 | 296af7c9 | Blue Swirl | |
509 | 296af7c9 | Blue Swirl | r = kvm_set_signal_mask(env, &set); |
510 | 296af7c9 | Blue Swirl | if (r) {
|
511 | 296af7c9 | Blue Swirl | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
|
512 | 296af7c9 | Blue Swirl | exit(1);
|
513 | 296af7c9 | Blue Swirl | } |
514 | 296af7c9 | Blue Swirl | } |
515 | 296af7c9 | Blue Swirl | |
516 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void) |
517 | 296af7c9 | Blue Swirl | { |
518 | 296af7c9 | Blue Swirl | sigset_t set; |
519 | 296af7c9 | Blue Swirl | |
520 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
521 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
522 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
523 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
524 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
525 | 296af7c9 | Blue Swirl | |
526 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
527 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
528 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
529 | 296af7c9 | Blue Swirl | } |
530 | 296af7c9 | Blue Swirl | |
531 | 296af7c9 | Blue Swirl | static void qemu_signal_lock(unsigned int msecs) |
532 | 296af7c9 | Blue Swirl | { |
533 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
534 | 296af7c9 | Blue Swirl | |
535 | 296af7c9 | Blue Swirl | while (qemu_mutex_trylock(&qemu_global_mutex)) {
|
536 | 296af7c9 | Blue Swirl | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
537 | 296af7c9 | Blue Swirl | if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
|
538 | 296af7c9 | Blue Swirl | break;
|
539 | 296af7c9 | Blue Swirl | } |
540 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
541 | 296af7c9 | Blue Swirl | } |
542 | 296af7c9 | Blue Swirl | |
543 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) |
544 | 296af7c9 | Blue Swirl | { |
545 | 296af7c9 | Blue Swirl | if (kvm_enabled()) {
|
546 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
547 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
548 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
549 | 296af7c9 | Blue Swirl | } else
|
550 | 296af7c9 | Blue Swirl | qemu_signal_lock(100);
|
551 | 296af7c9 | Blue Swirl | } |
552 | 296af7c9 | Blue Swirl | |
553 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) |
554 | 296af7c9 | Blue Swirl | { |
555 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
556 | 296af7c9 | Blue Swirl | } |
557 | 296af7c9 | Blue Swirl | |
558 | 296af7c9 | Blue Swirl | static int all_vcpus_paused(void) |
559 | 296af7c9 | Blue Swirl | { |
560 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
561 | 296af7c9 | Blue Swirl | |
562 | 296af7c9 | Blue Swirl | while (penv) {
|
563 | 296af7c9 | Blue Swirl | if (!penv->stopped)
|
564 | 296af7c9 | Blue Swirl | return 0; |
565 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
566 | 296af7c9 | Blue Swirl | } |
567 | 296af7c9 | Blue Swirl | |
568 | 296af7c9 | Blue Swirl | return 1; |
569 | 296af7c9 | Blue Swirl | } |
570 | 296af7c9 | Blue Swirl | |
571 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
572 | 296af7c9 | Blue Swirl | { |
573 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
574 | 296af7c9 | Blue Swirl | |
575 | 296af7c9 | Blue Swirl | while (penv) {
|
576 | 296af7c9 | Blue Swirl | penv->stop = 1;
|
577 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
578 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
579 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
580 | 296af7c9 | Blue Swirl | } |
581 | 296af7c9 | Blue Swirl | |
582 | 296af7c9 | Blue Swirl | while (!all_vcpus_paused()) {
|
583 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
584 | 296af7c9 | Blue Swirl | penv = first_cpu; |
585 | 296af7c9 | Blue Swirl | while (penv) {
|
586 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
587 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
588 | 296af7c9 | Blue Swirl | } |
589 | 296af7c9 | Blue Swirl | } |
590 | 296af7c9 | Blue Swirl | } |
591 | 296af7c9 | Blue Swirl | |
592 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
593 | 296af7c9 | Blue Swirl | { |
594 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
595 | 296af7c9 | Blue Swirl | |
596 | 296af7c9 | Blue Swirl | while (penv) {
|
597 | 296af7c9 | Blue Swirl | penv->stop = 0;
|
598 | 296af7c9 | Blue Swirl | penv->stopped = 0;
|
599 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
600 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
601 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
602 | 296af7c9 | Blue Swirl | } |
603 | 296af7c9 | Blue Swirl | } |
604 | 296af7c9 | Blue Swirl | |
605 | 296af7c9 | Blue Swirl | static void tcg_init_vcpu(void *_env) |
606 | 296af7c9 | Blue Swirl | { |
607 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
608 | 296af7c9 | Blue Swirl | /* share a single thread for all cpus with TCG */
|
609 | 296af7c9 | Blue Swirl | if (!tcg_cpu_thread) {
|
610 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
611 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
612 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
613 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); |
614 | 296af7c9 | Blue Swirl | while (env->created == 0) |
615 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
616 | 296af7c9 | Blue Swirl | tcg_cpu_thread = env->thread; |
617 | 296af7c9 | Blue Swirl | tcg_halt_cond = env->halt_cond; |
618 | 296af7c9 | Blue Swirl | } else {
|
619 | 296af7c9 | Blue Swirl | env->thread = tcg_cpu_thread; |
620 | 296af7c9 | Blue Swirl | env->halt_cond = tcg_halt_cond; |
621 | 296af7c9 | Blue Swirl | } |
622 | 296af7c9 | Blue Swirl | } |
623 | 296af7c9 | Blue Swirl | |
624 | 296af7c9 | Blue Swirl | static void kvm_start_vcpu(CPUState *env) |
625 | 296af7c9 | Blue Swirl | { |
626 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
627 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
628 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
629 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); |
630 | 296af7c9 | Blue Swirl | while (env->created == 0) |
631 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
632 | 296af7c9 | Blue Swirl | } |
633 | 296af7c9 | Blue Swirl | |
634 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
635 | 296af7c9 | Blue Swirl | { |
636 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
637 | 296af7c9 | Blue Swirl | |
638 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
639 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
640 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
641 | 296af7c9 | Blue Swirl | kvm_start_vcpu(env); |
642 | 296af7c9 | Blue Swirl | else
|
643 | 296af7c9 | Blue Swirl | tcg_init_vcpu(env); |
644 | 296af7c9 | Blue Swirl | } |
645 | 296af7c9 | Blue Swirl | |
646 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
647 | 296af7c9 | Blue Swirl | { |
648 | 296af7c9 | Blue Swirl | qemu_event_increment(); |
649 | 296af7c9 | Blue Swirl | } |
650 | 296af7c9 | Blue Swirl | |
651 | 296af7c9 | Blue Swirl | static void qemu_system_vmstop_request(int reason) |
652 | 296af7c9 | Blue Swirl | { |
653 | 296af7c9 | Blue Swirl | vmstop_requested = reason; |
654 | 296af7c9 | Blue Swirl | qemu_notify_event(); |
655 | 296af7c9 | Blue Swirl | } |
656 | 296af7c9 | Blue Swirl | |
657 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
658 | 296af7c9 | Blue Swirl | { |
659 | 296af7c9 | Blue Swirl | QemuThread me; |
660 | 296af7c9 | Blue Swirl | qemu_thread_self(&me); |
661 | 296af7c9 | Blue Swirl | |
662 | 296af7c9 | Blue Swirl | if (!qemu_thread_equal(&me, &io_thread)) {
|
663 | 296af7c9 | Blue Swirl | qemu_system_vmstop_request(reason); |
664 | 296af7c9 | Blue Swirl | /*
|
665 | 296af7c9 | Blue Swirl | * FIXME: should not return to device code in case
|
666 | 296af7c9 | Blue Swirl | * vm_stop() has been requested.
|
667 | 296af7c9 | Blue Swirl | */
|
668 | 296af7c9 | Blue Swirl | if (cpu_single_env) {
|
669 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
670 | 296af7c9 | Blue Swirl | cpu_single_env->stop = 1;
|
671 | 296af7c9 | Blue Swirl | } |
672 | 296af7c9 | Blue Swirl | return;
|
673 | 296af7c9 | Blue Swirl | } |
674 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
675 | 296af7c9 | Blue Swirl | } |
676 | 296af7c9 | Blue Swirl | |
677 | 296af7c9 | Blue Swirl | #endif
|
678 | 296af7c9 | Blue Swirl | |
679 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env) |
680 | 296af7c9 | Blue Swirl | { |
681 | 296af7c9 | Blue Swirl | int ret;
|
682 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
683 | 296af7c9 | Blue Swirl | int64_t ti; |
684 | 296af7c9 | Blue Swirl | #endif
|
685 | 296af7c9 | Blue Swirl | |
686 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
687 | 296af7c9 | Blue Swirl | ti = profile_getclock(); |
688 | 296af7c9 | Blue Swirl | #endif
|
689 | 296af7c9 | Blue Swirl | if (use_icount) {
|
690 | 296af7c9 | Blue Swirl | int64_t count; |
691 | 296af7c9 | Blue Swirl | int decr;
|
692 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
693 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = 0;
|
694 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
695 | 296af7c9 | Blue Swirl | count = qemu_icount_round (qemu_next_deadline()); |
696 | 296af7c9 | Blue Swirl | qemu_icount += count; |
697 | 296af7c9 | Blue Swirl | decr = (count > 0xffff) ? 0xffff : count; |
698 | 296af7c9 | Blue Swirl | count -= decr; |
699 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = decr; |
700 | 296af7c9 | Blue Swirl | env->icount_extra = count; |
701 | 296af7c9 | Blue Swirl | } |
702 | 296af7c9 | Blue Swirl | ret = cpu_exec(env); |
703 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
704 | 296af7c9 | Blue Swirl | qemu_time += profile_getclock() - ti; |
705 | 296af7c9 | Blue Swirl | #endif
|
706 | 296af7c9 | Blue Swirl | if (use_icount) {
|
707 | 296af7c9 | Blue Swirl | /* Fold pending instructions back into the
|
708 | 296af7c9 | Blue Swirl | instruction counter, and clear the interrupt flag. */
|
709 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low |
710 | 296af7c9 | Blue Swirl | + env->icount_extra); |
711 | 296af7c9 | Blue Swirl | env->icount_decr.u32 = 0;
|
712 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
713 | 296af7c9 | Blue Swirl | } |
714 | 296af7c9 | Blue Swirl | return ret;
|
715 | 296af7c9 | Blue Swirl | } |
716 | 296af7c9 | Blue Swirl | |
717 | 296af7c9 | Blue Swirl | bool tcg_cpu_exec(void) |
718 | 296af7c9 | Blue Swirl | { |
719 | 296af7c9 | Blue Swirl | int ret = 0; |
720 | 296af7c9 | Blue Swirl | |
721 | 296af7c9 | Blue Swirl | if (next_cpu == NULL) |
722 | 296af7c9 | Blue Swirl | next_cpu = first_cpu; |
723 | 296af7c9 | Blue Swirl | for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { |
724 | 296af7c9 | Blue Swirl | CPUState *env = cur_cpu = next_cpu; |
725 | 296af7c9 | Blue Swirl | |
726 | 296af7c9 | Blue Swirl | qemu_clock_enable(vm_clock, |
727 | 296af7c9 | Blue Swirl | (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
728 | 296af7c9 | Blue Swirl | |
729 | 296af7c9 | Blue Swirl | if (qemu_alarm_pending())
|
730 | 296af7c9 | Blue Swirl | break;
|
731 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
732 | 296af7c9 | Blue Swirl | ret = qemu_cpu_exec(env); |
733 | 296af7c9 | Blue Swirl | else if (env->stop) |
734 | 296af7c9 | Blue Swirl | break;
|
735 | 296af7c9 | Blue Swirl | |
736 | 296af7c9 | Blue Swirl | if (ret == EXCP_DEBUG) {
|
737 | 296af7c9 | Blue Swirl | gdb_set_stop_cpu(env); |
738 | 296af7c9 | Blue Swirl | debug_requested = EXCP_DEBUG; |
739 | 296af7c9 | Blue Swirl | break;
|
740 | 296af7c9 | Blue Swirl | } |
741 | 296af7c9 | Blue Swirl | } |
742 | 296af7c9 | Blue Swirl | return tcg_has_work();
|
743 | 296af7c9 | Blue Swirl | } |
744 | 296af7c9 | Blue Swirl | |
745 | 296af7c9 | Blue Swirl | void set_numa_modes(void) |
746 | 296af7c9 | Blue Swirl | { |
747 | 296af7c9 | Blue Swirl | CPUState *env; |
748 | 296af7c9 | Blue Swirl | int i;
|
749 | 296af7c9 | Blue Swirl | |
750 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
751 | 296af7c9 | Blue Swirl | for (i = 0; i < nb_numa_nodes; i++) { |
752 | 296af7c9 | Blue Swirl | if (node_cpumask[i] & (1 << env->cpu_index)) { |
753 | 296af7c9 | Blue Swirl | env->numa_node = i; |
754 | 296af7c9 | Blue Swirl | } |
755 | 296af7c9 | Blue Swirl | } |
756 | 296af7c9 | Blue Swirl | } |
757 | 296af7c9 | Blue Swirl | } |
758 | 296af7c9 | Blue Swirl | |
759 | 296af7c9 | Blue Swirl | void set_cpu_log(const char *optarg) |
760 | 296af7c9 | Blue Swirl | { |
761 | 296af7c9 | Blue Swirl | int mask;
|
762 | 296af7c9 | Blue Swirl | const CPULogItem *item;
|
763 | 296af7c9 | Blue Swirl | |
764 | 296af7c9 | Blue Swirl | mask = cpu_str_to_log_mask(optarg); |
765 | 296af7c9 | Blue Swirl | if (!mask) {
|
766 | 296af7c9 | Blue Swirl | printf("Log items (comma separated):\n");
|
767 | 296af7c9 | Blue Swirl | for (item = cpu_log_items; item->mask != 0; item++) { |
768 | 296af7c9 | Blue Swirl | printf("%-10s %s\n", item->name, item->help);
|
769 | 296af7c9 | Blue Swirl | } |
770 | 296af7c9 | Blue Swirl | exit(1);
|
771 | 296af7c9 | Blue Swirl | } |
772 | 296af7c9 | Blue Swirl | cpu_set_log(mask); |
773 | 296af7c9 | Blue Swirl | } |
774 | 29e922b6 | Blue Swirl | |
775 | 29e922b6 | Blue Swirl | /* Return the virtual CPU time, based on the instruction counter. */
|
776 | 29e922b6 | Blue Swirl | int64_t cpu_get_icount(void)
|
777 | 29e922b6 | Blue Swirl | { |
778 | 29e922b6 | Blue Swirl | int64_t icount; |
779 | 29e922b6 | Blue Swirl | CPUState *env = cpu_single_env;; |
780 | 29e922b6 | Blue Swirl | |
781 | 29e922b6 | Blue Swirl | icount = qemu_icount; |
782 | 29e922b6 | Blue Swirl | if (env) {
|
783 | 29e922b6 | Blue Swirl | if (!can_do_io(env)) {
|
784 | 29e922b6 | Blue Swirl | fprintf(stderr, "Bad clock read\n");
|
785 | 29e922b6 | Blue Swirl | } |
786 | 29e922b6 | Blue Swirl | icount -= (env->icount_decr.u16.low + env->icount_extra); |
787 | 29e922b6 | Blue Swirl | } |
788 | 29e922b6 | Blue Swirl | return qemu_icount_bias + (icount << icount_time_shift);
|
789 | 29e922b6 | Blue Swirl | } |