root / cpus.c @ 1009d2ed
History | View | Annotate | Download (25 kB)
1 |
/*
|
---|---|
2 |
* QEMU System Emulator
|
3 |
*
|
4 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
/* Needed early for CONFIG_BSD etc. */
|
26 |
#include "config-host.h" |
27 |
|
28 |
#include "monitor.h" |
29 |
#include "sysemu.h" |
30 |
#include "gdbstub.h" |
31 |
#include "dma.h" |
32 |
#include "kvm.h" |
33 |
#include "exec-all.h" |
34 |
|
35 |
#include "qemu-thread.h" |
36 |
#include "cpus.h" |
37 |
#include "compatfd.h" |
38 |
|
39 |
#ifdef SIGRTMIN
|
40 |
#define SIG_IPI (SIGRTMIN+4) |
41 |
#else
|
42 |
#define SIG_IPI SIGUSR1
|
43 |
#endif
|
44 |
|
45 |
#ifdef CONFIG_LINUX
|
46 |
|
47 |
#include <sys/prctl.h> |
48 |
|
49 |
#ifndef PR_MCE_KILL
|
50 |
#define PR_MCE_KILL 33 |
51 |
#endif
|
52 |
|
53 |
#ifndef PR_MCE_KILL_SET
|
54 |
#define PR_MCE_KILL_SET 1 |
55 |
#endif
|
56 |
|
57 |
#ifndef PR_MCE_KILL_EARLY
|
58 |
#define PR_MCE_KILL_EARLY 1 |
59 |
#endif
|
60 |
|
61 |
#endif /* CONFIG_LINUX */ |
62 |
|
63 |
static CPUState *next_cpu;
|
64 |
|
65 |
/***********************************************************/
|
66 |
void hw_error(const char *fmt, ...) |
67 |
{ |
68 |
va_list ap; |
69 |
CPUState *env; |
70 |
|
71 |
va_start(ap, fmt); |
72 |
fprintf(stderr, "qemu: hardware error: ");
|
73 |
vfprintf(stderr, fmt, ap); |
74 |
fprintf(stderr, "\n");
|
75 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
76 |
fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
77 |
#ifdef TARGET_I386
|
78 |
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
79 |
#else
|
80 |
cpu_dump_state(env, stderr, fprintf, 0);
|
81 |
#endif
|
82 |
} |
83 |
va_end(ap); |
84 |
abort(); |
85 |
} |
86 |
|
87 |
void cpu_synchronize_all_states(void) |
88 |
{ |
89 |
CPUState *cpu; |
90 |
|
91 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
92 |
cpu_synchronize_state(cpu); |
93 |
} |
94 |
} |
95 |
|
96 |
void cpu_synchronize_all_post_reset(void) |
97 |
{ |
98 |
CPUState *cpu; |
99 |
|
100 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
101 |
cpu_synchronize_post_reset(cpu); |
102 |
} |
103 |
} |
104 |
|
105 |
void cpu_synchronize_all_post_init(void) |
106 |
{ |
107 |
CPUState *cpu; |
108 |
|
109 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
110 |
cpu_synchronize_post_init(cpu); |
111 |
} |
112 |
} |
113 |
|
114 |
int cpu_is_stopped(CPUState *env)
|
115 |
{ |
116 |
return !vm_running || env->stopped;
|
117 |
} |
118 |
|
119 |
static void do_vm_stop(int reason) |
120 |
{ |
121 |
if (vm_running) {
|
122 |
cpu_disable_ticks(); |
123 |
vm_running = 0;
|
124 |
pause_all_vcpus(); |
125 |
vm_state_notify(0, reason);
|
126 |
qemu_aio_flush(); |
127 |
bdrv_flush_all(); |
128 |
monitor_protocol_event(QEVENT_STOP, NULL);
|
129 |
} |
130 |
} |
131 |
|
132 |
static int cpu_can_run(CPUState *env) |
133 |
{ |
134 |
if (env->stop) {
|
135 |
return 0; |
136 |
} |
137 |
if (env->stopped || !vm_running) {
|
138 |
return 0; |
139 |
} |
140 |
return 1; |
141 |
} |
142 |
|
143 |
static bool cpu_thread_is_idle(CPUState *env) |
144 |
{ |
145 |
if (env->stop || env->queued_work_first) {
|
146 |
return false; |
147 |
} |
148 |
if (env->stopped || !vm_running) {
|
149 |
return true; |
150 |
} |
151 |
if (!env->halted || qemu_cpu_has_work(env)) {
|
152 |
return false; |
153 |
} |
154 |
return true; |
155 |
} |
156 |
|
157 |
static bool all_cpu_threads_idle(void) |
158 |
{ |
159 |
CPUState *env; |
160 |
|
161 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
162 |
if (!cpu_thread_is_idle(env)) {
|
163 |
return false; |
164 |
} |
165 |
} |
166 |
return true; |
167 |
} |
168 |
|
169 |
static void cpu_handle_guest_debug(CPUState *env) |
170 |
{ |
171 |
gdb_set_stop_cpu(env); |
172 |
qemu_system_debug_request(); |
173 |
#ifdef CONFIG_IOTHREAD
|
174 |
env->stopped = 1;
|
175 |
#endif
|
176 |
} |
177 |
|
178 |
#ifdef CONFIG_IOTHREAD
|
179 |
static void cpu_signal(int sig) |
180 |
{ |
181 |
if (cpu_single_env) {
|
182 |
cpu_exit(cpu_single_env); |
183 |
} |
184 |
exit_request = 1;
|
185 |
} |
186 |
#endif
|
187 |
|
188 |
#ifdef CONFIG_LINUX
|
189 |
static void sigbus_reraise(void) |
190 |
{ |
191 |
sigset_t set; |
192 |
struct sigaction action;
|
193 |
|
194 |
memset(&action, 0, sizeof(action)); |
195 |
action.sa_handler = SIG_DFL; |
196 |
if (!sigaction(SIGBUS, &action, NULL)) { |
197 |
raise(SIGBUS); |
198 |
sigemptyset(&set); |
199 |
sigaddset(&set, SIGBUS); |
200 |
sigprocmask(SIG_UNBLOCK, &set, NULL);
|
201 |
} |
202 |
perror("Failed to re-raise SIGBUS!\n");
|
203 |
abort(); |
204 |
} |
205 |
|
206 |
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, |
207 |
void *ctx)
|
208 |
{ |
209 |
if (kvm_on_sigbus(siginfo->ssi_code,
|
210 |
(void *)(intptr_t)siginfo->ssi_addr)) {
|
211 |
sigbus_reraise(); |
212 |
} |
213 |
} |
214 |
|
215 |
static void qemu_init_sigbus(void) |
216 |
{ |
217 |
struct sigaction action;
|
218 |
|
219 |
memset(&action, 0, sizeof(action)); |
220 |
action.sa_flags = SA_SIGINFO; |
221 |
action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; |
222 |
sigaction(SIGBUS, &action, NULL);
|
223 |
|
224 |
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); |
225 |
} |
226 |
|
227 |
static void qemu_kvm_eat_signals(CPUState *env) |
228 |
{ |
229 |
struct timespec ts = { 0, 0 }; |
230 |
siginfo_t siginfo; |
231 |
sigset_t waitset; |
232 |
sigset_t chkset; |
233 |
int r;
|
234 |
|
235 |
sigemptyset(&waitset); |
236 |
sigaddset(&waitset, SIG_IPI); |
237 |
sigaddset(&waitset, SIGBUS); |
238 |
|
239 |
do {
|
240 |
r = sigtimedwait(&waitset, &siginfo, &ts); |
241 |
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { |
242 |
perror("sigtimedwait");
|
243 |
exit(1);
|
244 |
} |
245 |
|
246 |
switch (r) {
|
247 |
case SIGBUS:
|
248 |
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
|
249 |
sigbus_reraise(); |
250 |
} |
251 |
break;
|
252 |
default:
|
253 |
break;
|
254 |
} |
255 |
|
256 |
r = sigpending(&chkset); |
257 |
if (r == -1) { |
258 |
perror("sigpending");
|
259 |
exit(1);
|
260 |
} |
261 |
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
|
262 |
|
263 |
#ifndef CONFIG_IOTHREAD
|
264 |
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
|
265 |
qemu_notify_event(); |
266 |
} |
267 |
#endif
|
268 |
} |
269 |
|
270 |
#else /* !CONFIG_LINUX */ |
271 |
|
272 |
static void qemu_init_sigbus(void) |
273 |
{ |
274 |
} |
275 |
|
276 |
static void qemu_kvm_eat_signals(CPUState *env) |
277 |
{ |
278 |
} |
279 |
#endif /* !CONFIG_LINUX */ |
280 |
|
281 |
#ifndef _WIN32
|
282 |
static int io_thread_fd = -1; |
283 |
|
284 |
static void qemu_event_increment(void) |
285 |
{ |
286 |
/* Write 8 bytes to be compatible with eventfd. */
|
287 |
static const uint64_t val = 1; |
288 |
ssize_t ret; |
289 |
|
290 |
if (io_thread_fd == -1) { |
291 |
return;
|
292 |
} |
293 |
do {
|
294 |
ret = write(io_thread_fd, &val, sizeof(val));
|
295 |
} while (ret < 0 && errno == EINTR); |
296 |
|
297 |
/* EAGAIN is fine, a read must be pending. */
|
298 |
if (ret < 0 && errno != EAGAIN) { |
299 |
fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
300 |
strerror(errno)); |
301 |
exit (1);
|
302 |
} |
303 |
} |
304 |
|
305 |
static void qemu_event_read(void *opaque) |
306 |
{ |
307 |
int fd = (unsigned long)opaque; |
308 |
ssize_t len; |
309 |
char buffer[512]; |
310 |
|
311 |
/* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
312 |
do {
|
313 |
len = read(fd, buffer, sizeof(buffer));
|
314 |
} while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
315 |
} |
316 |
|
317 |
static int qemu_event_init(void) |
318 |
{ |
319 |
int err;
|
320 |
int fds[2]; |
321 |
|
322 |
err = qemu_eventfd(fds); |
323 |
if (err == -1) { |
324 |
return -errno;
|
325 |
} |
326 |
err = fcntl_setfl(fds[0], O_NONBLOCK);
|
327 |
if (err < 0) { |
328 |
goto fail;
|
329 |
} |
330 |
err = fcntl_setfl(fds[1], O_NONBLOCK);
|
331 |
if (err < 0) { |
332 |
goto fail;
|
333 |
} |
334 |
qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
335 |
(void *)(unsigned long)fds[0]); |
336 |
|
337 |
io_thread_fd = fds[1];
|
338 |
return 0; |
339 |
|
340 |
fail:
|
341 |
close(fds[0]);
|
342 |
close(fds[1]);
|
343 |
return err;
|
344 |
} |
345 |
|
346 |
static void dummy_signal(int sig) |
347 |
{ |
348 |
} |
349 |
|
350 |
/* If we have signalfd, we mask out the signals we want to handle and then
|
351 |
* use signalfd to listen for them. We rely on whatever the current signal
|
352 |
* handler is to dispatch the signals when we receive them.
|
353 |
*/
|
354 |
static void sigfd_handler(void *opaque) |
355 |
{ |
356 |
int fd = (unsigned long) opaque; |
357 |
struct qemu_signalfd_siginfo info;
|
358 |
struct sigaction action;
|
359 |
ssize_t len; |
360 |
|
361 |
while (1) { |
362 |
do {
|
363 |
len = read(fd, &info, sizeof(info));
|
364 |
} while (len == -1 && errno == EINTR); |
365 |
|
366 |
if (len == -1 && errno == EAGAIN) { |
367 |
break;
|
368 |
} |
369 |
|
370 |
if (len != sizeof(info)) { |
371 |
printf("read from sigfd returned %zd: %m\n", len);
|
372 |
return;
|
373 |
} |
374 |
|
375 |
sigaction(info.ssi_signo, NULL, &action);
|
376 |
if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
|
377 |
action.sa_sigaction(info.ssi_signo, |
378 |
(siginfo_t *)&info, NULL);
|
379 |
} else if (action.sa_handler) { |
380 |
action.sa_handler(info.ssi_signo); |
381 |
} |
382 |
} |
383 |
} |
384 |
|
385 |
static int qemu_signal_init(void) |
386 |
{ |
387 |
int sigfd;
|
388 |
sigset_t set; |
389 |
|
390 |
#ifdef CONFIG_IOTHREAD
|
391 |
/* SIGUSR2 used by posix-aio-compat.c */
|
392 |
sigemptyset(&set); |
393 |
sigaddset(&set, SIGUSR2); |
394 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
395 |
|
396 |
sigemptyset(&set); |
397 |
sigaddset(&set, SIGIO); |
398 |
sigaddset(&set, SIGALRM); |
399 |
sigaddset(&set, SIG_IPI); |
400 |
sigaddset(&set, SIGBUS); |
401 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
402 |
#else
|
403 |
sigemptyset(&set); |
404 |
sigaddset(&set, SIGBUS); |
405 |
if (kvm_enabled()) {
|
406 |
/*
|
407 |
* We need to process timer signals synchronously to avoid a race
|
408 |
* between exit_request check and KVM vcpu entry.
|
409 |
*/
|
410 |
sigaddset(&set, SIGIO); |
411 |
sigaddset(&set, SIGALRM); |
412 |
} |
413 |
#endif
|
414 |
|
415 |
sigfd = qemu_signalfd(&set); |
416 |
if (sigfd == -1) { |
417 |
fprintf(stderr, "failed to create signalfd\n");
|
418 |
return -errno;
|
419 |
} |
420 |
|
421 |
fcntl_setfl(sigfd, O_NONBLOCK); |
422 |
|
423 |
qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, |
424 |
(void *)(unsigned long) sigfd); |
425 |
|
426 |
return 0; |
427 |
} |
428 |
|
429 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
430 |
{ |
431 |
int r;
|
432 |
sigset_t set; |
433 |
struct sigaction sigact;
|
434 |
|
435 |
memset(&sigact, 0, sizeof(sigact)); |
436 |
sigact.sa_handler = dummy_signal; |
437 |
sigaction(SIG_IPI, &sigact, NULL);
|
438 |
|
439 |
#ifdef CONFIG_IOTHREAD
|
440 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
441 |
sigdelset(&set, SIG_IPI); |
442 |
sigdelset(&set, SIGBUS); |
443 |
r = kvm_set_signal_mask(env, &set); |
444 |
if (r) {
|
445 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
446 |
exit(1);
|
447 |
} |
448 |
#else
|
449 |
sigemptyset(&set); |
450 |
sigaddset(&set, SIG_IPI); |
451 |
sigaddset(&set, SIGIO); |
452 |
sigaddset(&set, SIGALRM); |
453 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
454 |
|
455 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
456 |
sigdelset(&set, SIGIO); |
457 |
sigdelset(&set, SIGALRM); |
458 |
#endif
|
459 |
sigdelset(&set, SIG_IPI); |
460 |
sigdelset(&set, SIGBUS); |
461 |
r = kvm_set_signal_mask(env, &set); |
462 |
if (r) {
|
463 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
464 |
exit(1);
|
465 |
} |
466 |
} |
467 |
|
468 |
static void qemu_tcg_init_cpu_signals(void) |
469 |
{ |
470 |
#ifdef CONFIG_IOTHREAD
|
471 |
sigset_t set; |
472 |
struct sigaction sigact;
|
473 |
|
474 |
memset(&sigact, 0, sizeof(sigact)); |
475 |
sigact.sa_handler = cpu_signal; |
476 |
sigaction(SIG_IPI, &sigact, NULL);
|
477 |
|
478 |
sigemptyset(&set); |
479 |
sigaddset(&set, SIG_IPI); |
480 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
481 |
#endif
|
482 |
} |
483 |
|
484 |
#else /* _WIN32 */ |
485 |
|
486 |
HANDLE qemu_event_handle; |
487 |
|
488 |
static void dummy_event_handler(void *opaque) |
489 |
{ |
490 |
} |
491 |
|
492 |
static int qemu_event_init(void) |
493 |
{ |
494 |
qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
495 |
if (!qemu_event_handle) {
|
496 |
fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
497 |
return -1; |
498 |
} |
499 |
qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
500 |
return 0; |
501 |
} |
502 |
|
503 |
static void qemu_event_increment(void) |
504 |
{ |
505 |
if (!SetEvent(qemu_event_handle)) {
|
506 |
fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
507 |
GetLastError()); |
508 |
exit (1);
|
509 |
} |
510 |
} |
511 |
|
512 |
static int qemu_signal_init(void) |
513 |
{ |
514 |
return 0; |
515 |
} |
516 |
|
517 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
518 |
{ |
519 |
abort(); |
520 |
} |
521 |
|
522 |
static void qemu_tcg_init_cpu_signals(void) |
523 |
{ |
524 |
} |
525 |
#endif /* _WIN32 */ |
526 |
|
527 |
#ifndef CONFIG_IOTHREAD
|
528 |
int qemu_init_main_loop(void) |
529 |
{ |
530 |
int ret;
|
531 |
|
532 |
ret = qemu_signal_init(); |
533 |
if (ret) {
|
534 |
return ret;
|
535 |
} |
536 |
|
537 |
qemu_init_sigbus(); |
538 |
|
539 |
return qemu_event_init();
|
540 |
} |
541 |
|
542 |
void qemu_main_loop_start(void) |
543 |
{ |
544 |
} |
545 |
|
546 |
void qemu_init_vcpu(void *_env) |
547 |
{ |
548 |
CPUState *env = _env; |
549 |
int r;
|
550 |
|
551 |
env->nr_cores = smp_cores; |
552 |
env->nr_threads = smp_threads; |
553 |
|
554 |
if (kvm_enabled()) {
|
555 |
r = kvm_init_vcpu(env); |
556 |
if (r < 0) { |
557 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
558 |
exit(1);
|
559 |
} |
560 |
qemu_kvm_init_cpu_signals(env); |
561 |
} else {
|
562 |
qemu_tcg_init_cpu_signals(); |
563 |
} |
564 |
} |
565 |
|
566 |
int qemu_cpu_is_self(void *env) |
567 |
{ |
568 |
return 1; |
569 |
} |
570 |
|
571 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
572 |
{ |
573 |
func(data); |
574 |
} |
575 |
|
576 |
void resume_all_vcpus(void) |
577 |
{ |
578 |
} |
579 |
|
580 |
void pause_all_vcpus(void) |
581 |
{ |
582 |
} |
583 |
|
584 |
void qemu_cpu_kick(void *env) |
585 |
{ |
586 |
} |
587 |
|
588 |
void qemu_cpu_kick_self(void) |
589 |
{ |
590 |
#ifndef _WIN32
|
591 |
assert(cpu_single_env); |
592 |
|
593 |
raise(SIG_IPI); |
594 |
#else
|
595 |
abort(); |
596 |
#endif
|
597 |
} |
598 |
|
599 |
void qemu_notify_event(void) |
600 |
{ |
601 |
CPUState *env = cpu_single_env; |
602 |
|
603 |
qemu_event_increment (); |
604 |
if (env) {
|
605 |
cpu_exit(env); |
606 |
} |
607 |
if (next_cpu && env != next_cpu) {
|
608 |
cpu_exit(next_cpu); |
609 |
} |
610 |
exit_request = 1;
|
611 |
} |
612 |
|
613 |
void qemu_mutex_lock_iothread(void) {} |
614 |
void qemu_mutex_unlock_iothread(void) {} |
615 |
|
616 |
void cpu_stop_current(void) |
617 |
{ |
618 |
} |
619 |
|
620 |
void vm_stop(int reason) |
621 |
{ |
622 |
do_vm_stop(reason); |
623 |
} |
624 |
|
625 |
#else /* CONFIG_IOTHREAD */ |
626 |
|
627 |
QemuMutex qemu_global_mutex; |
628 |
static QemuMutex qemu_fair_mutex;
|
629 |
|
630 |
static QemuThread io_thread;
|
631 |
|
632 |
static QemuThread *tcg_cpu_thread;
|
633 |
static QemuCond *tcg_halt_cond;
|
634 |
|
635 |
static int qemu_system_ready; |
636 |
/* cpu creation */
|
637 |
static QemuCond qemu_cpu_cond;
|
638 |
/* system init */
|
639 |
static QemuCond qemu_system_cond;
|
640 |
static QemuCond qemu_pause_cond;
|
641 |
static QemuCond qemu_work_cond;
|
642 |
|
643 |
int qemu_init_main_loop(void) |
644 |
{ |
645 |
int ret;
|
646 |
|
647 |
qemu_init_sigbus(); |
648 |
|
649 |
ret = qemu_signal_init(); |
650 |
if (ret) {
|
651 |
return ret;
|
652 |
} |
653 |
|
654 |
/* Note eventfd must be drained before signalfd handlers run */
|
655 |
ret = qemu_event_init(); |
656 |
if (ret) {
|
657 |
return ret;
|
658 |
} |
659 |
|
660 |
qemu_cond_init(&qemu_cpu_cond); |
661 |
qemu_cond_init(&qemu_system_cond); |
662 |
qemu_cond_init(&qemu_pause_cond); |
663 |
qemu_cond_init(&qemu_work_cond); |
664 |
qemu_mutex_init(&qemu_fair_mutex); |
665 |
qemu_mutex_init(&qemu_global_mutex); |
666 |
qemu_mutex_lock(&qemu_global_mutex); |
667 |
|
668 |
qemu_thread_get_self(&io_thread); |
669 |
|
670 |
return 0; |
671 |
} |
672 |
|
673 |
void qemu_main_loop_start(void) |
674 |
{ |
675 |
qemu_system_ready = 1;
|
676 |
qemu_cond_broadcast(&qemu_system_cond); |
677 |
} |
678 |
|
679 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
680 |
{ |
681 |
struct qemu_work_item wi;
|
682 |
|
683 |
if (qemu_cpu_is_self(env)) {
|
684 |
func(data); |
685 |
return;
|
686 |
} |
687 |
|
688 |
wi.func = func; |
689 |
wi.data = data; |
690 |
if (!env->queued_work_first) {
|
691 |
env->queued_work_first = &wi; |
692 |
} else {
|
693 |
env->queued_work_last->next = &wi; |
694 |
} |
695 |
env->queued_work_last = &wi; |
696 |
wi.next = NULL;
|
697 |
wi.done = false;
|
698 |
|
699 |
qemu_cpu_kick(env); |
700 |
while (!wi.done) {
|
701 |
CPUState *self_env = cpu_single_env; |
702 |
|
703 |
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
704 |
cpu_single_env = self_env; |
705 |
} |
706 |
} |
707 |
|
708 |
static void flush_queued_work(CPUState *env) |
709 |
{ |
710 |
struct qemu_work_item *wi;
|
711 |
|
712 |
if (!env->queued_work_first) {
|
713 |
return;
|
714 |
} |
715 |
|
716 |
while ((wi = env->queued_work_first)) {
|
717 |
env->queued_work_first = wi->next; |
718 |
wi->func(wi->data); |
719 |
wi->done = true;
|
720 |
} |
721 |
env->queued_work_last = NULL;
|
722 |
qemu_cond_broadcast(&qemu_work_cond); |
723 |
} |
724 |
|
725 |
static void qemu_wait_io_event_common(CPUState *env) |
726 |
{ |
727 |
if (env->stop) {
|
728 |
env->stop = 0;
|
729 |
env->stopped = 1;
|
730 |
qemu_cond_signal(&qemu_pause_cond); |
731 |
} |
732 |
flush_queued_work(env); |
733 |
env->thread_kicked = false;
|
734 |
} |
735 |
|
736 |
static void qemu_tcg_wait_io_event(void) |
737 |
{ |
738 |
CPUState *env; |
739 |
|
740 |
while (all_cpu_threads_idle()) {
|
741 |
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); |
742 |
} |
743 |
|
744 |
qemu_mutex_unlock(&qemu_global_mutex); |
745 |
|
746 |
/*
|
747 |
* Users of qemu_global_mutex can be starved, having no chance
|
748 |
* to acquire it since this path will get to it first.
|
749 |
* So use another lock to provide fairness.
|
750 |
*/
|
751 |
qemu_mutex_lock(&qemu_fair_mutex); |
752 |
qemu_mutex_unlock(&qemu_fair_mutex); |
753 |
|
754 |
qemu_mutex_lock(&qemu_global_mutex); |
755 |
|
756 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
757 |
qemu_wait_io_event_common(env); |
758 |
} |
759 |
} |
760 |
|
761 |
static void qemu_kvm_wait_io_event(CPUState *env) |
762 |
{ |
763 |
while (cpu_thread_is_idle(env)) {
|
764 |
qemu_cond_wait(env->halt_cond, &qemu_global_mutex); |
765 |
} |
766 |
|
767 |
qemu_kvm_eat_signals(env); |
768 |
qemu_wait_io_event_common(env); |
769 |
} |
770 |
|
771 |
static void *qemu_kvm_cpu_thread_fn(void *arg) |
772 |
{ |
773 |
CPUState *env = arg; |
774 |
int r;
|
775 |
|
776 |
qemu_mutex_lock(&qemu_global_mutex); |
777 |
qemu_thread_get_self(env->thread); |
778 |
|
779 |
r = kvm_init_vcpu(env); |
780 |
if (r < 0) { |
781 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
782 |
exit(1);
|
783 |
} |
784 |
|
785 |
qemu_kvm_init_cpu_signals(env); |
786 |
|
787 |
/* signal CPU creation */
|
788 |
env->created = 1;
|
789 |
qemu_cond_signal(&qemu_cpu_cond); |
790 |
|
791 |
/* and wait for machine initialization */
|
792 |
while (!qemu_system_ready) {
|
793 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
794 |
} |
795 |
|
796 |
while (1) { |
797 |
if (cpu_can_run(env)) {
|
798 |
r = kvm_cpu_exec(env); |
799 |
if (r == EXCP_DEBUG) {
|
800 |
cpu_handle_guest_debug(env); |
801 |
} |
802 |
} |
803 |
qemu_kvm_wait_io_event(env); |
804 |
} |
805 |
|
806 |
return NULL; |
807 |
} |
808 |
|
809 |
static void *qemu_tcg_cpu_thread_fn(void *arg) |
810 |
{ |
811 |
CPUState *env = arg; |
812 |
|
813 |
qemu_tcg_init_cpu_signals(); |
814 |
qemu_thread_get_self(env->thread); |
815 |
|
816 |
/* signal CPU creation */
|
817 |
qemu_mutex_lock(&qemu_global_mutex); |
818 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
819 |
env->created = 1;
|
820 |
} |
821 |
qemu_cond_signal(&qemu_cpu_cond); |
822 |
|
823 |
/* and wait for machine initialization */
|
824 |
while (!qemu_system_ready) {
|
825 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
826 |
} |
827 |
|
828 |
while (1) { |
829 |
cpu_exec_all(); |
830 |
qemu_tcg_wait_io_event(); |
831 |
} |
832 |
|
833 |
return NULL; |
834 |
} |
835 |
|
836 |
static void qemu_cpu_kick_thread(CPUState *env) |
837 |
{ |
838 |
#ifndef _WIN32
|
839 |
int err;
|
840 |
|
841 |
err = pthread_kill(env->thread->thread, SIG_IPI); |
842 |
if (err) {
|
843 |
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
|
844 |
exit(1);
|
845 |
} |
846 |
#else /* _WIN32 */ |
847 |
if (!qemu_cpu_is_self(env)) {
|
848 |
SuspendThread(env->thread->thread); |
849 |
cpu_signal(0);
|
850 |
ResumeThread(env->thread->thread); |
851 |
} |
852 |
#endif
|
853 |
} |
854 |
|
855 |
void qemu_cpu_kick(void *_env) |
856 |
{ |
857 |
CPUState *env = _env; |
858 |
|
859 |
qemu_cond_broadcast(env->halt_cond); |
860 |
if (!env->thread_kicked) {
|
861 |
qemu_cpu_kick_thread(env); |
862 |
env->thread_kicked = true;
|
863 |
} |
864 |
} |
865 |
|
866 |
void qemu_cpu_kick_self(void) |
867 |
{ |
868 |
#ifndef _WIN32
|
869 |
assert(cpu_single_env); |
870 |
|
871 |
if (!cpu_single_env->thread_kicked) {
|
872 |
qemu_cpu_kick_thread(cpu_single_env); |
873 |
cpu_single_env->thread_kicked = true;
|
874 |
} |
875 |
#else
|
876 |
abort(); |
877 |
#endif
|
878 |
} |
879 |
|
880 |
int qemu_cpu_is_self(void *_env) |
881 |
{ |
882 |
CPUState *env = _env; |
883 |
|
884 |
return qemu_thread_is_self(env->thread);
|
885 |
} |
886 |
|
887 |
void qemu_mutex_lock_iothread(void) |
888 |
{ |
889 |
if (kvm_enabled()) {
|
890 |
qemu_mutex_lock(&qemu_global_mutex); |
891 |
} else {
|
892 |
qemu_mutex_lock(&qemu_fair_mutex); |
893 |
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
894 |
qemu_cpu_kick_thread(first_cpu); |
895 |
qemu_mutex_lock(&qemu_global_mutex); |
896 |
} |
897 |
qemu_mutex_unlock(&qemu_fair_mutex); |
898 |
} |
899 |
} |
900 |
|
901 |
void qemu_mutex_unlock_iothread(void) |
902 |
{ |
903 |
qemu_mutex_unlock(&qemu_global_mutex); |
904 |
} |
905 |
|
906 |
static int all_vcpus_paused(void) |
907 |
{ |
908 |
CPUState *penv = first_cpu; |
909 |
|
910 |
while (penv) {
|
911 |
if (!penv->stopped) {
|
912 |
return 0; |
913 |
} |
914 |
penv = (CPUState *)penv->next_cpu; |
915 |
} |
916 |
|
917 |
return 1; |
918 |
} |
919 |
|
920 |
void pause_all_vcpus(void) |
921 |
{ |
922 |
CPUState *penv = first_cpu; |
923 |
|
924 |
while (penv) {
|
925 |
penv->stop = 1;
|
926 |
qemu_cpu_kick(penv); |
927 |
penv = (CPUState *)penv->next_cpu; |
928 |
} |
929 |
|
930 |
while (!all_vcpus_paused()) {
|
931 |
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
932 |
penv = first_cpu; |
933 |
while (penv) {
|
934 |
qemu_cpu_kick(penv); |
935 |
penv = (CPUState *)penv->next_cpu; |
936 |
} |
937 |
} |
938 |
} |
939 |
|
940 |
void resume_all_vcpus(void) |
941 |
{ |
942 |
CPUState *penv = first_cpu; |
943 |
|
944 |
while (penv) {
|
945 |
penv->stop = 0;
|
946 |
penv->stopped = 0;
|
947 |
qemu_cpu_kick(penv); |
948 |
penv = (CPUState *)penv->next_cpu; |
949 |
} |
950 |
} |
951 |
|
952 |
static void qemu_tcg_init_vcpu(void *_env) |
953 |
{ |
954 |
CPUState *env = _env; |
955 |
|
956 |
/* share a single thread for all cpus with TCG */
|
957 |
if (!tcg_cpu_thread) {
|
958 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
959 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
960 |
qemu_cond_init(env->halt_cond); |
961 |
qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
962 |
while (env->created == 0) { |
963 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
964 |
} |
965 |
tcg_cpu_thread = env->thread; |
966 |
tcg_halt_cond = env->halt_cond; |
967 |
} else {
|
968 |
env->thread = tcg_cpu_thread; |
969 |
env->halt_cond = tcg_halt_cond; |
970 |
} |
971 |
} |
972 |
|
973 |
static void qemu_kvm_start_vcpu(CPUState *env) |
974 |
{ |
975 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
976 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
977 |
qemu_cond_init(env->halt_cond); |
978 |
qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
979 |
while (env->created == 0) { |
980 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
981 |
} |
982 |
} |
983 |
|
984 |
void qemu_init_vcpu(void *_env) |
985 |
{ |
986 |
CPUState *env = _env; |
987 |
|
988 |
env->nr_cores = smp_cores; |
989 |
env->nr_threads = smp_threads; |
990 |
if (kvm_enabled()) {
|
991 |
qemu_kvm_start_vcpu(env); |
992 |
} else {
|
993 |
qemu_tcg_init_vcpu(env); |
994 |
} |
995 |
} |
996 |
|
997 |
void qemu_notify_event(void) |
998 |
{ |
999 |
qemu_event_increment(); |
1000 |
} |
1001 |
|
1002 |
void cpu_stop_current(void) |
1003 |
{ |
1004 |
if (cpu_single_env) {
|
1005 |
cpu_single_env->stop = 0;
|
1006 |
cpu_single_env->stopped = 1;
|
1007 |
cpu_exit(cpu_single_env); |
1008 |
qemu_cond_signal(&qemu_pause_cond); |
1009 |
} |
1010 |
} |
1011 |
|
1012 |
void vm_stop(int reason) |
1013 |
{ |
1014 |
if (!qemu_thread_is_self(&io_thread)) {
|
1015 |
qemu_system_vmstop_request(reason); |
1016 |
/*
|
1017 |
* FIXME: should not return to device code in case
|
1018 |
* vm_stop() has been requested.
|
1019 |
*/
|
1020 |
cpu_stop_current(); |
1021 |
return;
|
1022 |
} |
1023 |
do_vm_stop(reason); |
1024 |
} |
1025 |
|
1026 |
#endif
|
1027 |
|
1028 |
static int tcg_cpu_exec(CPUState *env) |
1029 |
{ |
1030 |
int ret;
|
1031 |
#ifdef CONFIG_PROFILER
|
1032 |
int64_t ti; |
1033 |
#endif
|
1034 |
|
1035 |
#ifdef CONFIG_PROFILER
|
1036 |
ti = profile_getclock(); |
1037 |
#endif
|
1038 |
if (use_icount) {
|
1039 |
int64_t count; |
1040 |
int decr;
|
1041 |
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
1042 |
env->icount_decr.u16.low = 0;
|
1043 |
env->icount_extra = 0;
|
1044 |
count = qemu_icount_round (qemu_next_deadline()); |
1045 |
qemu_icount += count; |
1046 |
decr = (count > 0xffff) ? 0xffff : count; |
1047 |
count -= decr; |
1048 |
env->icount_decr.u16.low = decr; |
1049 |
env->icount_extra = count; |
1050 |
} |
1051 |
ret = cpu_exec(env); |
1052 |
#ifdef CONFIG_PROFILER
|
1053 |
qemu_time += profile_getclock() - ti; |
1054 |
#endif
|
1055 |
if (use_icount) {
|
1056 |
/* Fold pending instructions back into the
|
1057 |
instruction counter, and clear the interrupt flag. */
|
1058 |
qemu_icount -= (env->icount_decr.u16.low |
1059 |
+ env->icount_extra); |
1060 |
env->icount_decr.u32 = 0;
|
1061 |
env->icount_extra = 0;
|
1062 |
} |
1063 |
return ret;
|
1064 |
} |
1065 |
|
1066 |
bool cpu_exec_all(void) |
1067 |
{ |
1068 |
int r;
|
1069 |
|
1070 |
if (next_cpu == NULL) { |
1071 |
next_cpu = first_cpu; |
1072 |
} |
1073 |
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
1074 |
CPUState *env = next_cpu; |
1075 |
|
1076 |
qemu_clock_enable(vm_clock, |
1077 |
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
1078 |
|
1079 |
#ifndef CONFIG_IOTHREAD
|
1080 |
if (qemu_alarm_pending()) {
|
1081 |
break;
|
1082 |
} |
1083 |
#endif
|
1084 |
if (cpu_can_run(env)) {
|
1085 |
if (kvm_enabled()) {
|
1086 |
r = kvm_cpu_exec(env); |
1087 |
qemu_kvm_eat_signals(env); |
1088 |
} else {
|
1089 |
r = tcg_cpu_exec(env); |
1090 |
} |
1091 |
if (r == EXCP_DEBUG) {
|
1092 |
cpu_handle_guest_debug(env); |
1093 |
break;
|
1094 |
} |
1095 |
} else if (env->stop || env->stopped) { |
1096 |
break;
|
1097 |
} |
1098 |
} |
1099 |
exit_request = 0;
|
1100 |
return !all_cpu_threads_idle();
|
1101 |
} |
1102 |
|
1103 |
void set_numa_modes(void) |
1104 |
{ |
1105 |
CPUState *env; |
1106 |
int i;
|
1107 |
|
1108 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1109 |
for (i = 0; i < nb_numa_nodes; i++) { |
1110 |
if (node_cpumask[i] & (1 << env->cpu_index)) { |
1111 |
env->numa_node = i; |
1112 |
} |
1113 |
} |
1114 |
} |
1115 |
} |
1116 |
|
1117 |
void set_cpu_log(const char *optarg) |
1118 |
{ |
1119 |
int mask;
|
1120 |
const CPULogItem *item;
|
1121 |
|
1122 |
mask = cpu_str_to_log_mask(optarg); |
1123 |
if (!mask) {
|
1124 |
printf("Log items (comma separated):\n");
|
1125 |
for (item = cpu_log_items; item->mask != 0; item++) { |
1126 |
printf("%-10s %s\n", item->name, item->help);
|
1127 |
} |
1128 |
exit(1);
|
1129 |
} |
1130 |
cpu_set_log(mask); |
1131 |
} |
1132 |
|
1133 |
/* Return the virtual CPU time, based on the instruction counter. */
|
1134 |
int64_t cpu_get_icount(void)
|
1135 |
{ |
1136 |
int64_t icount; |
1137 |
CPUState *env = cpu_single_env;; |
1138 |
|
1139 |
icount = qemu_icount; |
1140 |
if (env) {
|
1141 |
if (!can_do_io(env)) {
|
1142 |
fprintf(stderr, "Bad clock read\n");
|
1143 |
} |
1144 |
icount -= (env->icount_decr.u16.low + env->icount_extra); |
1145 |
} |
1146 |
return qemu_icount_bias + (icount << icount_time_shift);
|
1147 |
} |
1148 |
|
1149 |
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
1150 |
{ |
1151 |
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
1152 |
#if defined(cpu_list_id)
|
1153 |
cpu_list_id(f, cpu_fprintf, optarg); |
1154 |
#elif defined(cpu_list)
|
1155 |
cpu_list(f, cpu_fprintf); /* deprecated */
|
1156 |
#endif
|
1157 |
} |