root / cpus.c @ e0efb993
History | View | Annotate | Download (25.5 kB)
1 |
/*
|
---|---|
2 |
* QEMU System Emulator
|
3 |
*
|
4 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
/* Needed early for CONFIG_BSD etc. */
|
26 |
#include "config-host.h" |
27 |
|
28 |
#include "monitor.h" |
29 |
#include "sysemu.h" |
30 |
#include "gdbstub.h" |
31 |
#include "dma.h" |
32 |
#include "kvm.h" |
33 |
#include "exec-all.h" |
34 |
|
35 |
#include "qemu-thread.h" |
36 |
#include "cpus.h" |
37 |
#include "compatfd.h" |
38 |
|
39 |
#ifdef SIGRTMIN
|
40 |
#define SIG_IPI (SIGRTMIN+4) |
41 |
#else
|
42 |
#define SIG_IPI SIGUSR1
|
43 |
#endif
|
44 |
|
45 |
#ifdef CONFIG_LINUX
|
46 |
|
47 |
#include <sys/prctl.h> |
48 |
|
49 |
#ifndef PR_MCE_KILL
|
50 |
#define PR_MCE_KILL 33 |
51 |
#endif
|
52 |
|
53 |
#ifndef PR_MCE_KILL_SET
|
54 |
#define PR_MCE_KILL_SET 1 |
55 |
#endif
|
56 |
|
57 |
#ifndef PR_MCE_KILL_EARLY
|
58 |
#define PR_MCE_KILL_EARLY 1 |
59 |
#endif
|
60 |
|
61 |
#endif /* CONFIG_LINUX */ |
62 |
|
63 |
static CPUState *next_cpu;
|
64 |
|
65 |
/***********************************************************/
|
66 |
void hw_error(const char *fmt, ...) |
67 |
{ |
68 |
va_list ap; |
69 |
CPUState *env; |
70 |
|
71 |
va_start(ap, fmt); |
72 |
fprintf(stderr, "qemu: hardware error: ");
|
73 |
vfprintf(stderr, fmt, ap); |
74 |
fprintf(stderr, "\n");
|
75 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
76 |
fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
77 |
#ifdef TARGET_I386
|
78 |
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
79 |
#else
|
80 |
cpu_dump_state(env, stderr, fprintf, 0);
|
81 |
#endif
|
82 |
} |
83 |
va_end(ap); |
84 |
abort(); |
85 |
} |
86 |
|
87 |
void cpu_synchronize_all_states(void) |
88 |
{ |
89 |
CPUState *cpu; |
90 |
|
91 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
92 |
cpu_synchronize_state(cpu); |
93 |
} |
94 |
} |
95 |
|
96 |
void cpu_synchronize_all_post_reset(void) |
97 |
{ |
98 |
CPUState *cpu; |
99 |
|
100 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
101 |
cpu_synchronize_post_reset(cpu); |
102 |
} |
103 |
} |
104 |
|
105 |
void cpu_synchronize_all_post_init(void) |
106 |
{ |
107 |
CPUState *cpu; |
108 |
|
109 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
110 |
cpu_synchronize_post_init(cpu); |
111 |
} |
112 |
} |
113 |
|
114 |
int cpu_is_stopped(CPUState *env)
|
115 |
{ |
116 |
return !vm_running || env->stopped;
|
117 |
} |
118 |
|
119 |
static void do_vm_stop(int reason) |
120 |
{ |
121 |
if (vm_running) {
|
122 |
cpu_disable_ticks(); |
123 |
vm_running = 0;
|
124 |
pause_all_vcpus(); |
125 |
vm_state_notify(0, reason);
|
126 |
qemu_aio_flush(); |
127 |
bdrv_flush_all(); |
128 |
monitor_protocol_event(QEVENT_STOP, NULL);
|
129 |
} |
130 |
} |
131 |
|
132 |
static int cpu_can_run(CPUState *env) |
133 |
{ |
134 |
if (env->stop) {
|
135 |
return 0; |
136 |
} |
137 |
if (env->stopped || !vm_running) {
|
138 |
return 0; |
139 |
} |
140 |
return 1; |
141 |
} |
142 |
|
143 |
static bool cpu_thread_is_idle(CPUState *env) |
144 |
{ |
145 |
if (env->stop || env->queued_work_first) {
|
146 |
return false; |
147 |
} |
148 |
if (env->stopped || !vm_running) {
|
149 |
return true; |
150 |
} |
151 |
if (!env->halted || qemu_cpu_has_work(env)) {
|
152 |
return false; |
153 |
} |
154 |
return true; |
155 |
} |
156 |
|
157 |
static bool all_cpu_threads_idle(void) |
158 |
{ |
159 |
CPUState *env; |
160 |
|
161 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
162 |
if (!cpu_thread_is_idle(env)) {
|
163 |
return false; |
164 |
} |
165 |
} |
166 |
return true; |
167 |
} |
168 |
|
169 |
static CPUDebugExcpHandler *debug_excp_handler;
|
170 |
|
171 |
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) |
172 |
{ |
173 |
CPUDebugExcpHandler *old_handler = debug_excp_handler; |
174 |
|
175 |
debug_excp_handler = handler; |
176 |
return old_handler;
|
177 |
} |
178 |
|
179 |
static void cpu_handle_debug_exception(CPUState *env) |
180 |
{ |
181 |
CPUWatchpoint *wp; |
182 |
|
183 |
if (!env->watchpoint_hit) {
|
184 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
185 |
wp->flags &= ~BP_WATCHPOINT_HIT; |
186 |
} |
187 |
} |
188 |
if (debug_excp_handler) {
|
189 |
debug_excp_handler(env); |
190 |
} |
191 |
|
192 |
gdb_set_stop_cpu(env); |
193 |
qemu_system_debug_request(); |
194 |
#ifdef CONFIG_IOTHREAD
|
195 |
env->stopped = 1;
|
196 |
#endif
|
197 |
} |
198 |
|
199 |
#ifdef CONFIG_IOTHREAD
|
200 |
static void cpu_signal(int sig) |
201 |
{ |
202 |
if (cpu_single_env) {
|
203 |
cpu_exit(cpu_single_env); |
204 |
} |
205 |
exit_request = 1;
|
206 |
} |
207 |
#endif
|
208 |
|
209 |
#ifdef CONFIG_LINUX
|
210 |
static void sigbus_reraise(void) |
211 |
{ |
212 |
sigset_t set; |
213 |
struct sigaction action;
|
214 |
|
215 |
memset(&action, 0, sizeof(action)); |
216 |
action.sa_handler = SIG_DFL; |
217 |
if (!sigaction(SIGBUS, &action, NULL)) { |
218 |
raise(SIGBUS); |
219 |
sigemptyset(&set); |
220 |
sigaddset(&set, SIGBUS); |
221 |
sigprocmask(SIG_UNBLOCK, &set, NULL);
|
222 |
} |
223 |
perror("Failed to re-raise SIGBUS!\n");
|
224 |
abort(); |
225 |
} |
226 |
|
227 |
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, |
228 |
void *ctx)
|
229 |
{ |
230 |
if (kvm_on_sigbus(siginfo->ssi_code,
|
231 |
(void *)(intptr_t)siginfo->ssi_addr)) {
|
232 |
sigbus_reraise(); |
233 |
} |
234 |
} |
235 |
|
236 |
static void qemu_init_sigbus(void) |
237 |
{ |
238 |
struct sigaction action;
|
239 |
|
240 |
memset(&action, 0, sizeof(action)); |
241 |
action.sa_flags = SA_SIGINFO; |
242 |
action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; |
243 |
sigaction(SIGBUS, &action, NULL);
|
244 |
|
245 |
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); |
246 |
} |
247 |
|
248 |
#else /* !CONFIG_LINUX */ |
249 |
|
250 |
static void qemu_init_sigbus(void) |
251 |
{ |
252 |
} |
253 |
#endif /* !CONFIG_LINUX */ |
254 |
|
255 |
#ifndef _WIN32
|
256 |
static int io_thread_fd = -1; |
257 |
|
258 |
static void qemu_event_increment(void) |
259 |
{ |
260 |
/* Write 8 bytes to be compatible with eventfd. */
|
261 |
static const uint64_t val = 1; |
262 |
ssize_t ret; |
263 |
|
264 |
if (io_thread_fd == -1) { |
265 |
return;
|
266 |
} |
267 |
do {
|
268 |
ret = write(io_thread_fd, &val, sizeof(val));
|
269 |
} while (ret < 0 && errno == EINTR); |
270 |
|
271 |
/* EAGAIN is fine, a read must be pending. */
|
272 |
if (ret < 0 && errno != EAGAIN) { |
273 |
fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
274 |
strerror(errno)); |
275 |
exit (1);
|
276 |
} |
277 |
} |
278 |
|
279 |
static void qemu_event_read(void *opaque) |
280 |
{ |
281 |
int fd = (intptr_t)opaque;
|
282 |
ssize_t len; |
283 |
char buffer[512]; |
284 |
|
285 |
/* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
286 |
do {
|
287 |
len = read(fd, buffer, sizeof(buffer));
|
288 |
} while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
289 |
} |
290 |
|
291 |
static int qemu_event_init(void) |
292 |
{ |
293 |
int err;
|
294 |
int fds[2]; |
295 |
|
296 |
err = qemu_eventfd(fds); |
297 |
if (err == -1) { |
298 |
return -errno;
|
299 |
} |
300 |
err = fcntl_setfl(fds[0], O_NONBLOCK);
|
301 |
if (err < 0) { |
302 |
goto fail;
|
303 |
} |
304 |
err = fcntl_setfl(fds[1], O_NONBLOCK);
|
305 |
if (err < 0) { |
306 |
goto fail;
|
307 |
} |
308 |
qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
309 |
(void *)(intptr_t)fds[0]); |
310 |
|
311 |
io_thread_fd = fds[1];
|
312 |
return 0; |
313 |
|
314 |
fail:
|
315 |
close(fds[0]);
|
316 |
close(fds[1]);
|
317 |
return err;
|
318 |
} |
319 |
|
320 |
static void dummy_signal(int sig) |
321 |
{ |
322 |
} |
323 |
|
324 |
/* If we have signalfd, we mask out the signals we want to handle and then
|
325 |
* use signalfd to listen for them. We rely on whatever the current signal
|
326 |
* handler is to dispatch the signals when we receive them.
|
327 |
*/
|
328 |
static void sigfd_handler(void *opaque) |
329 |
{ |
330 |
int fd = (intptr_t)opaque;
|
331 |
struct qemu_signalfd_siginfo info;
|
332 |
struct sigaction action;
|
333 |
ssize_t len; |
334 |
|
335 |
while (1) { |
336 |
do {
|
337 |
len = read(fd, &info, sizeof(info));
|
338 |
} while (len == -1 && errno == EINTR); |
339 |
|
340 |
if (len == -1 && errno == EAGAIN) { |
341 |
break;
|
342 |
} |
343 |
|
344 |
if (len != sizeof(info)) { |
345 |
printf("read from sigfd returned %zd: %m\n", len);
|
346 |
return;
|
347 |
} |
348 |
|
349 |
sigaction(info.ssi_signo, NULL, &action);
|
350 |
if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
|
351 |
action.sa_sigaction(info.ssi_signo, |
352 |
(siginfo_t *)&info, NULL);
|
353 |
} else if (action.sa_handler) { |
354 |
action.sa_handler(info.ssi_signo); |
355 |
} |
356 |
} |
357 |
} |
358 |
|
359 |
static int qemu_signal_init(void) |
360 |
{ |
361 |
int sigfd;
|
362 |
sigset_t set; |
363 |
|
364 |
#ifdef CONFIG_IOTHREAD
|
365 |
/* SIGUSR2 used by posix-aio-compat.c */
|
366 |
sigemptyset(&set); |
367 |
sigaddset(&set, SIGUSR2); |
368 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
369 |
|
370 |
sigemptyset(&set); |
371 |
sigaddset(&set, SIGIO); |
372 |
sigaddset(&set, SIGALRM); |
373 |
sigaddset(&set, SIG_IPI); |
374 |
sigaddset(&set, SIGBUS); |
375 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
376 |
#else
|
377 |
sigemptyset(&set); |
378 |
sigaddset(&set, SIGBUS); |
379 |
if (kvm_enabled()) {
|
380 |
/*
|
381 |
* We need to process timer signals synchronously to avoid a race
|
382 |
* between exit_request check and KVM vcpu entry.
|
383 |
*/
|
384 |
sigaddset(&set, SIGIO); |
385 |
sigaddset(&set, SIGALRM); |
386 |
} |
387 |
#endif
|
388 |
|
389 |
sigfd = qemu_signalfd(&set); |
390 |
if (sigfd == -1) { |
391 |
fprintf(stderr, "failed to create signalfd\n");
|
392 |
return -errno;
|
393 |
} |
394 |
|
395 |
fcntl_setfl(sigfd, O_NONBLOCK); |
396 |
|
397 |
qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, |
398 |
(void *)(intptr_t)sigfd);
|
399 |
|
400 |
return 0; |
401 |
} |
402 |
|
403 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
404 |
{ |
405 |
int r;
|
406 |
sigset_t set; |
407 |
struct sigaction sigact;
|
408 |
|
409 |
memset(&sigact, 0, sizeof(sigact)); |
410 |
sigact.sa_handler = dummy_signal; |
411 |
sigaction(SIG_IPI, &sigact, NULL);
|
412 |
|
413 |
#ifdef CONFIG_IOTHREAD
|
414 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
415 |
sigdelset(&set, SIG_IPI); |
416 |
sigdelset(&set, SIGBUS); |
417 |
r = kvm_set_signal_mask(env, &set); |
418 |
if (r) {
|
419 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
420 |
exit(1);
|
421 |
} |
422 |
#else
|
423 |
sigemptyset(&set); |
424 |
sigaddset(&set, SIG_IPI); |
425 |
sigaddset(&set, SIGIO); |
426 |
sigaddset(&set, SIGALRM); |
427 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
428 |
|
429 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
430 |
sigdelset(&set, SIGIO); |
431 |
sigdelset(&set, SIGALRM); |
432 |
#endif
|
433 |
sigdelset(&set, SIG_IPI); |
434 |
sigdelset(&set, SIGBUS); |
435 |
r = kvm_set_signal_mask(env, &set); |
436 |
if (r) {
|
437 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
438 |
exit(1);
|
439 |
} |
440 |
} |
441 |
|
442 |
static void qemu_tcg_init_cpu_signals(void) |
443 |
{ |
444 |
#ifdef CONFIG_IOTHREAD
|
445 |
sigset_t set; |
446 |
struct sigaction sigact;
|
447 |
|
448 |
memset(&sigact, 0, sizeof(sigact)); |
449 |
sigact.sa_handler = cpu_signal; |
450 |
sigaction(SIG_IPI, &sigact, NULL);
|
451 |
|
452 |
sigemptyset(&set); |
453 |
sigaddset(&set, SIG_IPI); |
454 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
455 |
#endif
|
456 |
} |
457 |
|
458 |
static void qemu_kvm_eat_signals(CPUState *env) |
459 |
{ |
460 |
struct timespec ts = { 0, 0 }; |
461 |
siginfo_t siginfo; |
462 |
sigset_t waitset; |
463 |
sigset_t chkset; |
464 |
int r;
|
465 |
|
466 |
sigemptyset(&waitset); |
467 |
sigaddset(&waitset, SIG_IPI); |
468 |
sigaddset(&waitset, SIGBUS); |
469 |
|
470 |
do {
|
471 |
r = sigtimedwait(&waitset, &siginfo, &ts); |
472 |
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { |
473 |
perror("sigtimedwait");
|
474 |
exit(1);
|
475 |
} |
476 |
|
477 |
switch (r) {
|
478 |
case SIGBUS:
|
479 |
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
|
480 |
sigbus_reraise(); |
481 |
} |
482 |
break;
|
483 |
default:
|
484 |
break;
|
485 |
} |
486 |
|
487 |
r = sigpending(&chkset); |
488 |
if (r == -1) { |
489 |
perror("sigpending");
|
490 |
exit(1);
|
491 |
} |
492 |
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
|
493 |
|
494 |
#ifndef CONFIG_IOTHREAD
|
495 |
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
|
496 |
qemu_notify_event(); |
497 |
} |
498 |
#endif
|
499 |
} |
500 |
|
501 |
#else /* _WIN32 */ |
502 |
|
503 |
HANDLE qemu_event_handle; |
504 |
|
505 |
static void dummy_event_handler(void *opaque) |
506 |
{ |
507 |
} |
508 |
|
509 |
static int qemu_event_init(void) |
510 |
{ |
511 |
qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
512 |
if (!qemu_event_handle) {
|
513 |
fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
514 |
return -1; |
515 |
} |
516 |
qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
517 |
return 0; |
518 |
} |
519 |
|
520 |
static void qemu_event_increment(void) |
521 |
{ |
522 |
if (!SetEvent(qemu_event_handle)) {
|
523 |
fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
524 |
GetLastError()); |
525 |
exit (1);
|
526 |
} |
527 |
} |
528 |
|
529 |
static void qemu_kvm_eat_signals(CPUState *env) |
530 |
{ |
531 |
} |
532 |
|
533 |
static int qemu_signal_init(void) |
534 |
{ |
535 |
return 0; |
536 |
} |
537 |
|
538 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
539 |
{ |
540 |
abort(); |
541 |
} |
542 |
|
543 |
static void qemu_tcg_init_cpu_signals(void) |
544 |
{ |
545 |
} |
546 |
#endif /* _WIN32 */ |
547 |
|
548 |
#ifndef CONFIG_IOTHREAD
|
549 |
int qemu_init_main_loop(void) |
550 |
{ |
551 |
int ret;
|
552 |
|
553 |
ret = qemu_signal_init(); |
554 |
if (ret) {
|
555 |
return ret;
|
556 |
} |
557 |
|
558 |
qemu_init_sigbus(); |
559 |
|
560 |
return qemu_event_init();
|
561 |
} |
562 |
|
563 |
void qemu_main_loop_start(void) |
564 |
{ |
565 |
} |
566 |
|
567 |
void qemu_init_vcpu(void *_env) |
568 |
{ |
569 |
CPUState *env = _env; |
570 |
int r;
|
571 |
|
572 |
env->nr_cores = smp_cores; |
573 |
env->nr_threads = smp_threads; |
574 |
|
575 |
if (kvm_enabled()) {
|
576 |
r = kvm_init_vcpu(env); |
577 |
if (r < 0) { |
578 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
579 |
exit(1);
|
580 |
} |
581 |
qemu_kvm_init_cpu_signals(env); |
582 |
} else {
|
583 |
qemu_tcg_init_cpu_signals(); |
584 |
} |
585 |
} |
586 |
|
587 |
int qemu_cpu_is_self(void *env) |
588 |
{ |
589 |
return 1; |
590 |
} |
591 |
|
592 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
593 |
{ |
594 |
func(data); |
595 |
} |
596 |
|
597 |
void resume_all_vcpus(void) |
598 |
{ |
599 |
} |
600 |
|
601 |
void pause_all_vcpus(void) |
602 |
{ |
603 |
} |
604 |
|
605 |
void qemu_cpu_kick(void *env) |
606 |
{ |
607 |
} |
608 |
|
609 |
void qemu_cpu_kick_self(void) |
610 |
{ |
611 |
#ifndef _WIN32
|
612 |
assert(cpu_single_env); |
613 |
|
614 |
raise(SIG_IPI); |
615 |
#else
|
616 |
abort(); |
617 |
#endif
|
618 |
} |
619 |
|
620 |
void qemu_notify_event(void) |
621 |
{ |
622 |
CPUState *env = cpu_single_env; |
623 |
|
624 |
qemu_event_increment (); |
625 |
if (env) {
|
626 |
cpu_exit(env); |
627 |
} |
628 |
if (next_cpu && env != next_cpu) {
|
629 |
cpu_exit(next_cpu); |
630 |
} |
631 |
exit_request = 1;
|
632 |
} |
633 |
|
634 |
void qemu_mutex_lock_iothread(void) {} |
635 |
void qemu_mutex_unlock_iothread(void) {} |
636 |
|
637 |
void cpu_stop_current(void) |
638 |
{ |
639 |
} |
640 |
|
641 |
void vm_stop(int reason) |
642 |
{ |
643 |
do_vm_stop(reason); |
644 |
} |
645 |
|
646 |
#else /* CONFIG_IOTHREAD */ |
647 |
|
648 |
QemuMutex qemu_global_mutex; |
649 |
static QemuMutex qemu_fair_mutex;
|
650 |
|
651 |
static QemuThread io_thread;
|
652 |
|
653 |
static QemuThread *tcg_cpu_thread;
|
654 |
static QemuCond *tcg_halt_cond;
|
655 |
|
656 |
static int qemu_system_ready; |
657 |
/* cpu creation */
|
658 |
static QemuCond qemu_cpu_cond;
|
659 |
/* system init */
|
660 |
static QemuCond qemu_system_cond;
|
661 |
static QemuCond qemu_pause_cond;
|
662 |
static QemuCond qemu_work_cond;
|
663 |
|
664 |
int qemu_init_main_loop(void) |
665 |
{ |
666 |
int ret;
|
667 |
|
668 |
qemu_init_sigbus(); |
669 |
|
670 |
ret = qemu_signal_init(); |
671 |
if (ret) {
|
672 |
return ret;
|
673 |
} |
674 |
|
675 |
/* Note eventfd must be drained before signalfd handlers run */
|
676 |
ret = qemu_event_init(); |
677 |
if (ret) {
|
678 |
return ret;
|
679 |
} |
680 |
|
681 |
qemu_cond_init(&qemu_cpu_cond); |
682 |
qemu_cond_init(&qemu_system_cond); |
683 |
qemu_cond_init(&qemu_pause_cond); |
684 |
qemu_cond_init(&qemu_work_cond); |
685 |
qemu_mutex_init(&qemu_fair_mutex); |
686 |
qemu_mutex_init(&qemu_global_mutex); |
687 |
qemu_mutex_lock(&qemu_global_mutex); |
688 |
|
689 |
qemu_thread_get_self(&io_thread); |
690 |
|
691 |
return 0; |
692 |
} |
693 |
|
694 |
void qemu_main_loop_start(void) |
695 |
{ |
696 |
qemu_system_ready = 1;
|
697 |
qemu_cond_broadcast(&qemu_system_cond); |
698 |
} |
699 |
|
700 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
701 |
{ |
702 |
struct qemu_work_item wi;
|
703 |
|
704 |
if (qemu_cpu_is_self(env)) {
|
705 |
func(data); |
706 |
return;
|
707 |
} |
708 |
|
709 |
wi.func = func; |
710 |
wi.data = data; |
711 |
if (!env->queued_work_first) {
|
712 |
env->queued_work_first = &wi; |
713 |
} else {
|
714 |
env->queued_work_last->next = &wi; |
715 |
} |
716 |
env->queued_work_last = &wi; |
717 |
wi.next = NULL;
|
718 |
wi.done = false;
|
719 |
|
720 |
qemu_cpu_kick(env); |
721 |
while (!wi.done) {
|
722 |
CPUState *self_env = cpu_single_env; |
723 |
|
724 |
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
725 |
cpu_single_env = self_env; |
726 |
} |
727 |
} |
728 |
|
729 |
static void flush_queued_work(CPUState *env) |
730 |
{ |
731 |
struct qemu_work_item *wi;
|
732 |
|
733 |
if (!env->queued_work_first) {
|
734 |
return;
|
735 |
} |
736 |
|
737 |
while ((wi = env->queued_work_first)) {
|
738 |
env->queued_work_first = wi->next; |
739 |
wi->func(wi->data); |
740 |
wi->done = true;
|
741 |
} |
742 |
env->queued_work_last = NULL;
|
743 |
qemu_cond_broadcast(&qemu_work_cond); |
744 |
} |
745 |
|
746 |
static void qemu_wait_io_event_common(CPUState *env) |
747 |
{ |
748 |
if (env->stop) {
|
749 |
env->stop = 0;
|
750 |
env->stopped = 1;
|
751 |
qemu_cond_signal(&qemu_pause_cond); |
752 |
} |
753 |
flush_queued_work(env); |
754 |
env->thread_kicked = false;
|
755 |
} |
756 |
|
757 |
static void qemu_tcg_wait_io_event(void) |
758 |
{ |
759 |
CPUState *env; |
760 |
|
761 |
while (all_cpu_threads_idle()) {
|
762 |
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); |
763 |
} |
764 |
|
765 |
qemu_mutex_unlock(&qemu_global_mutex); |
766 |
|
767 |
/*
|
768 |
* Users of qemu_global_mutex can be starved, having no chance
|
769 |
* to acquire it since this path will get to it first.
|
770 |
* So use another lock to provide fairness.
|
771 |
*/
|
772 |
qemu_mutex_lock(&qemu_fair_mutex); |
773 |
qemu_mutex_unlock(&qemu_fair_mutex); |
774 |
|
775 |
qemu_mutex_lock(&qemu_global_mutex); |
776 |
|
777 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
778 |
qemu_wait_io_event_common(env); |
779 |
} |
780 |
} |
781 |
|
782 |
static void qemu_kvm_wait_io_event(CPUState *env) |
783 |
{ |
784 |
while (cpu_thread_is_idle(env)) {
|
785 |
qemu_cond_wait(env->halt_cond, &qemu_global_mutex); |
786 |
} |
787 |
|
788 |
qemu_kvm_eat_signals(env); |
789 |
qemu_wait_io_event_common(env); |
790 |
} |
791 |
|
792 |
static void *qemu_kvm_cpu_thread_fn(void *arg) |
793 |
{ |
794 |
CPUState *env = arg; |
795 |
int r;
|
796 |
|
797 |
qemu_mutex_lock(&qemu_global_mutex); |
798 |
qemu_thread_get_self(env->thread); |
799 |
|
800 |
r = kvm_init_vcpu(env); |
801 |
if (r < 0) { |
802 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
803 |
exit(1);
|
804 |
} |
805 |
|
806 |
qemu_kvm_init_cpu_signals(env); |
807 |
|
808 |
/* signal CPU creation */
|
809 |
env->created = 1;
|
810 |
qemu_cond_signal(&qemu_cpu_cond); |
811 |
|
812 |
/* and wait for machine initialization */
|
813 |
while (!qemu_system_ready) {
|
814 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
815 |
} |
816 |
|
817 |
while (1) { |
818 |
if (cpu_can_run(env)) {
|
819 |
r = kvm_cpu_exec(env); |
820 |
if (r == EXCP_DEBUG) {
|
821 |
cpu_handle_debug_exception(env); |
822 |
} |
823 |
} |
824 |
qemu_kvm_wait_io_event(env); |
825 |
} |
826 |
|
827 |
return NULL; |
828 |
} |
829 |
|
830 |
static void *qemu_tcg_cpu_thread_fn(void *arg) |
831 |
{ |
832 |
CPUState *env = arg; |
833 |
|
834 |
qemu_tcg_init_cpu_signals(); |
835 |
qemu_thread_get_self(env->thread); |
836 |
|
837 |
/* signal CPU creation */
|
838 |
qemu_mutex_lock(&qemu_global_mutex); |
839 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
840 |
env->created = 1;
|
841 |
} |
842 |
qemu_cond_signal(&qemu_cpu_cond); |
843 |
|
844 |
/* and wait for machine initialization */
|
845 |
while (!qemu_system_ready) {
|
846 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
847 |
} |
848 |
|
849 |
while (1) { |
850 |
cpu_exec_all(); |
851 |
qemu_tcg_wait_io_event(); |
852 |
} |
853 |
|
854 |
return NULL; |
855 |
} |
856 |
|
857 |
static void qemu_cpu_kick_thread(CPUState *env) |
858 |
{ |
859 |
#ifndef _WIN32
|
860 |
int err;
|
861 |
|
862 |
err = pthread_kill(env->thread->thread, SIG_IPI); |
863 |
if (err) {
|
864 |
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
|
865 |
exit(1);
|
866 |
} |
867 |
#else /* _WIN32 */ |
868 |
if (!qemu_cpu_is_self(env)) {
|
869 |
SuspendThread(env->thread->thread); |
870 |
cpu_signal(0);
|
871 |
ResumeThread(env->thread->thread); |
872 |
} |
873 |
#endif
|
874 |
} |
875 |
|
876 |
void qemu_cpu_kick(void *_env) |
877 |
{ |
878 |
CPUState *env = _env; |
879 |
|
880 |
qemu_cond_broadcast(env->halt_cond); |
881 |
if (!env->thread_kicked) {
|
882 |
qemu_cpu_kick_thread(env); |
883 |
env->thread_kicked = true;
|
884 |
} |
885 |
} |
886 |
|
887 |
void qemu_cpu_kick_self(void) |
888 |
{ |
889 |
#ifndef _WIN32
|
890 |
assert(cpu_single_env); |
891 |
|
892 |
if (!cpu_single_env->thread_kicked) {
|
893 |
qemu_cpu_kick_thread(cpu_single_env); |
894 |
cpu_single_env->thread_kicked = true;
|
895 |
} |
896 |
#else
|
897 |
abort(); |
898 |
#endif
|
899 |
} |
900 |
|
901 |
int qemu_cpu_is_self(void *_env) |
902 |
{ |
903 |
CPUState *env = _env; |
904 |
|
905 |
return qemu_thread_is_self(env->thread);
|
906 |
} |
907 |
|
908 |
void qemu_mutex_lock_iothread(void) |
909 |
{ |
910 |
if (kvm_enabled()) {
|
911 |
qemu_mutex_lock(&qemu_global_mutex); |
912 |
} else {
|
913 |
qemu_mutex_lock(&qemu_fair_mutex); |
914 |
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
915 |
qemu_cpu_kick_thread(first_cpu); |
916 |
qemu_mutex_lock(&qemu_global_mutex); |
917 |
} |
918 |
qemu_mutex_unlock(&qemu_fair_mutex); |
919 |
} |
920 |
} |
921 |
|
922 |
void qemu_mutex_unlock_iothread(void) |
923 |
{ |
924 |
qemu_mutex_unlock(&qemu_global_mutex); |
925 |
} |
926 |
|
927 |
static int all_vcpus_paused(void) |
928 |
{ |
929 |
CPUState *penv = first_cpu; |
930 |
|
931 |
while (penv) {
|
932 |
if (!penv->stopped) {
|
933 |
return 0; |
934 |
} |
935 |
penv = (CPUState *)penv->next_cpu; |
936 |
} |
937 |
|
938 |
return 1; |
939 |
} |
940 |
|
941 |
void pause_all_vcpus(void) |
942 |
{ |
943 |
CPUState *penv = first_cpu; |
944 |
|
945 |
while (penv) {
|
946 |
penv->stop = 1;
|
947 |
qemu_cpu_kick(penv); |
948 |
penv = (CPUState *)penv->next_cpu; |
949 |
} |
950 |
|
951 |
while (!all_vcpus_paused()) {
|
952 |
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
953 |
penv = first_cpu; |
954 |
while (penv) {
|
955 |
qemu_cpu_kick(penv); |
956 |
penv = (CPUState *)penv->next_cpu; |
957 |
} |
958 |
} |
959 |
} |
960 |
|
961 |
void resume_all_vcpus(void) |
962 |
{ |
963 |
CPUState *penv = first_cpu; |
964 |
|
965 |
while (penv) {
|
966 |
penv->stop = 0;
|
967 |
penv->stopped = 0;
|
968 |
qemu_cpu_kick(penv); |
969 |
penv = (CPUState *)penv->next_cpu; |
970 |
} |
971 |
} |
972 |
|
973 |
static void qemu_tcg_init_vcpu(void *_env) |
974 |
{ |
975 |
CPUState *env = _env; |
976 |
|
977 |
/* share a single thread for all cpus with TCG */
|
978 |
if (!tcg_cpu_thread) {
|
979 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
980 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
981 |
qemu_cond_init(env->halt_cond); |
982 |
qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
983 |
while (env->created == 0) { |
984 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
985 |
} |
986 |
tcg_cpu_thread = env->thread; |
987 |
tcg_halt_cond = env->halt_cond; |
988 |
} else {
|
989 |
env->thread = tcg_cpu_thread; |
990 |
env->halt_cond = tcg_halt_cond; |
991 |
} |
992 |
} |
993 |
|
994 |
static void qemu_kvm_start_vcpu(CPUState *env) |
995 |
{ |
996 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
997 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
998 |
qemu_cond_init(env->halt_cond); |
999 |
qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
1000 |
while (env->created == 0) { |
1001 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
1002 |
} |
1003 |
} |
1004 |
|
1005 |
void qemu_init_vcpu(void *_env) |
1006 |
{ |
1007 |
CPUState *env = _env; |
1008 |
|
1009 |
env->nr_cores = smp_cores; |
1010 |
env->nr_threads = smp_threads; |
1011 |
if (kvm_enabled()) {
|
1012 |
qemu_kvm_start_vcpu(env); |
1013 |
} else {
|
1014 |
qemu_tcg_init_vcpu(env); |
1015 |
} |
1016 |
} |
1017 |
|
1018 |
void qemu_notify_event(void) |
1019 |
{ |
1020 |
qemu_event_increment(); |
1021 |
} |
1022 |
|
1023 |
void cpu_stop_current(void) |
1024 |
{ |
1025 |
if (cpu_single_env) {
|
1026 |
cpu_single_env->stop = 0;
|
1027 |
cpu_single_env->stopped = 1;
|
1028 |
cpu_exit(cpu_single_env); |
1029 |
qemu_cond_signal(&qemu_pause_cond); |
1030 |
} |
1031 |
} |
1032 |
|
1033 |
void vm_stop(int reason) |
1034 |
{ |
1035 |
if (!qemu_thread_is_self(&io_thread)) {
|
1036 |
qemu_system_vmstop_request(reason); |
1037 |
/*
|
1038 |
* FIXME: should not return to device code in case
|
1039 |
* vm_stop() has been requested.
|
1040 |
*/
|
1041 |
cpu_stop_current(); |
1042 |
return;
|
1043 |
} |
1044 |
do_vm_stop(reason); |
1045 |
} |
1046 |
|
1047 |
#endif
|
1048 |
|
1049 |
static int tcg_cpu_exec(CPUState *env) |
1050 |
{ |
1051 |
int ret;
|
1052 |
#ifdef CONFIG_PROFILER
|
1053 |
int64_t ti; |
1054 |
#endif
|
1055 |
|
1056 |
#ifdef CONFIG_PROFILER
|
1057 |
ti = profile_getclock(); |
1058 |
#endif
|
1059 |
if (use_icount) {
|
1060 |
int64_t count; |
1061 |
int decr;
|
1062 |
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
1063 |
env->icount_decr.u16.low = 0;
|
1064 |
env->icount_extra = 0;
|
1065 |
count = qemu_icount_round (qemu_next_deadline()); |
1066 |
qemu_icount += count; |
1067 |
decr = (count > 0xffff) ? 0xffff : count; |
1068 |
count -= decr; |
1069 |
env->icount_decr.u16.low = decr; |
1070 |
env->icount_extra = count; |
1071 |
} |
1072 |
ret = cpu_exec(env); |
1073 |
#ifdef CONFIG_PROFILER
|
1074 |
qemu_time += profile_getclock() - ti; |
1075 |
#endif
|
1076 |
if (use_icount) {
|
1077 |
/* Fold pending instructions back into the
|
1078 |
instruction counter, and clear the interrupt flag. */
|
1079 |
qemu_icount -= (env->icount_decr.u16.low |
1080 |
+ env->icount_extra); |
1081 |
env->icount_decr.u32 = 0;
|
1082 |
env->icount_extra = 0;
|
1083 |
} |
1084 |
return ret;
|
1085 |
} |
1086 |
|
1087 |
bool cpu_exec_all(void) |
1088 |
{ |
1089 |
int r;
|
1090 |
|
1091 |
if (next_cpu == NULL) { |
1092 |
next_cpu = first_cpu; |
1093 |
} |
1094 |
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
1095 |
CPUState *env = next_cpu; |
1096 |
|
1097 |
qemu_clock_enable(vm_clock, |
1098 |
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
1099 |
|
1100 |
#ifndef CONFIG_IOTHREAD
|
1101 |
if (qemu_alarm_pending()) {
|
1102 |
break;
|
1103 |
} |
1104 |
#endif
|
1105 |
if (cpu_can_run(env)) {
|
1106 |
if (kvm_enabled()) {
|
1107 |
r = kvm_cpu_exec(env); |
1108 |
qemu_kvm_eat_signals(env); |
1109 |
} else {
|
1110 |
r = tcg_cpu_exec(env); |
1111 |
} |
1112 |
if (r == EXCP_DEBUG) {
|
1113 |
cpu_handle_debug_exception(env); |
1114 |
break;
|
1115 |
} |
1116 |
} else if (env->stop || env->stopped) { |
1117 |
break;
|
1118 |
} |
1119 |
} |
1120 |
exit_request = 0;
|
1121 |
return !all_cpu_threads_idle();
|
1122 |
} |
1123 |
|
1124 |
void set_numa_modes(void) |
1125 |
{ |
1126 |
CPUState *env; |
1127 |
int i;
|
1128 |
|
1129 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1130 |
for (i = 0; i < nb_numa_nodes; i++) { |
1131 |
if (node_cpumask[i] & (1 << env->cpu_index)) { |
1132 |
env->numa_node = i; |
1133 |
} |
1134 |
} |
1135 |
} |
1136 |
} |
1137 |
|
1138 |
void set_cpu_log(const char *optarg) |
1139 |
{ |
1140 |
int mask;
|
1141 |
const CPULogItem *item;
|
1142 |
|
1143 |
mask = cpu_str_to_log_mask(optarg); |
1144 |
if (!mask) {
|
1145 |
printf("Log items (comma separated):\n");
|
1146 |
for (item = cpu_log_items; item->mask != 0; item++) { |
1147 |
printf("%-10s %s\n", item->name, item->help);
|
1148 |
} |
1149 |
exit(1);
|
1150 |
} |
1151 |
cpu_set_log(mask); |
1152 |
} |
1153 |
|
1154 |
/* Return the virtual CPU time, based on the instruction counter. */
|
1155 |
int64_t cpu_get_icount(void)
|
1156 |
{ |
1157 |
int64_t icount; |
1158 |
CPUState *env = cpu_single_env;; |
1159 |
|
1160 |
icount = qemu_icount; |
1161 |
if (env) {
|
1162 |
if (!can_do_io(env)) {
|
1163 |
fprintf(stderr, "Bad clock read\n");
|
1164 |
} |
1165 |
icount -= (env->icount_decr.u16.low + env->icount_extra); |
1166 |
} |
1167 |
return qemu_icount_bias + (icount << icount_time_shift);
|
1168 |
} |
1169 |
|
1170 |
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
1171 |
{ |
1172 |
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
1173 |
#if defined(cpu_list_id)
|
1174 |
cpu_list_id(f, cpu_fprintf, optarg); |
1175 |
#elif defined(cpu_list)
|
1176 |
cpu_list(f, cpu_fprintf); /* deprecated */
|
1177 |
#endif
|
1178 |
} |