root / cpus.c @ 8cf3f22b
History | View | Annotate | Download (25.4 kB)
1 |
/*
|
---|---|
2 |
* QEMU System Emulator
|
3 |
*
|
4 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
/* Needed early for CONFIG_BSD etc. */
|
26 |
#include "config-host.h" |
27 |
|
28 |
#include "monitor.h" |
29 |
#include "sysemu.h" |
30 |
#include "gdbstub.h" |
31 |
#include "dma.h" |
32 |
#include "kvm.h" |
33 |
#include "exec-all.h" |
34 |
|
35 |
#include "qemu-thread.h" |
36 |
#include "cpus.h" |
37 |
#include "compatfd.h" |
38 |
|
39 |
#ifdef SIGRTMIN
|
40 |
#define SIG_IPI (SIGRTMIN+4) |
41 |
#else
|
42 |
#define SIG_IPI SIGUSR1
|
43 |
#endif
|
44 |
|
45 |
#ifdef CONFIG_LINUX
|
46 |
|
47 |
#include <sys/prctl.h> |
48 |
|
49 |
#ifndef PR_MCE_KILL
|
50 |
#define PR_MCE_KILL 33 |
51 |
#endif
|
52 |
|
53 |
#ifndef PR_MCE_KILL_SET
|
54 |
#define PR_MCE_KILL_SET 1 |
55 |
#endif
|
56 |
|
57 |
#ifndef PR_MCE_KILL_EARLY
|
58 |
#define PR_MCE_KILL_EARLY 1 |
59 |
#endif
|
60 |
|
61 |
#endif /* CONFIG_LINUX */ |
62 |
|
63 |
static CPUState *next_cpu;
|
64 |
|
65 |
/***********************************************************/
|
66 |
void hw_error(const char *fmt, ...) |
67 |
{ |
68 |
va_list ap; |
69 |
CPUState *env; |
70 |
|
71 |
va_start(ap, fmt); |
72 |
fprintf(stderr, "qemu: hardware error: ");
|
73 |
vfprintf(stderr, fmt, ap); |
74 |
fprintf(stderr, "\n");
|
75 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
76 |
fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
77 |
#ifdef TARGET_I386
|
78 |
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
79 |
#else
|
80 |
cpu_dump_state(env, stderr, fprintf, 0);
|
81 |
#endif
|
82 |
} |
83 |
va_end(ap); |
84 |
abort(); |
85 |
} |
86 |
|
87 |
void cpu_synchronize_all_states(void) |
88 |
{ |
89 |
CPUState *cpu; |
90 |
|
91 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
92 |
cpu_synchronize_state(cpu); |
93 |
} |
94 |
} |
95 |
|
96 |
void cpu_synchronize_all_post_reset(void) |
97 |
{ |
98 |
CPUState *cpu; |
99 |
|
100 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
101 |
cpu_synchronize_post_reset(cpu); |
102 |
} |
103 |
} |
104 |
|
105 |
void cpu_synchronize_all_post_init(void) |
106 |
{ |
107 |
CPUState *cpu; |
108 |
|
109 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
110 |
cpu_synchronize_post_init(cpu); |
111 |
} |
112 |
} |
113 |
|
114 |
int cpu_is_stopped(CPUState *env)
|
115 |
{ |
116 |
return !vm_running || env->stopped;
|
117 |
} |
118 |
|
119 |
static void do_vm_stop(int reason) |
120 |
{ |
121 |
if (vm_running) {
|
122 |
cpu_disable_ticks(); |
123 |
vm_running = 0;
|
124 |
pause_all_vcpus(); |
125 |
vm_state_notify(0, reason);
|
126 |
qemu_aio_flush(); |
127 |
bdrv_flush_all(); |
128 |
monitor_protocol_event(QEVENT_STOP, NULL);
|
129 |
} |
130 |
} |
131 |
|
132 |
static int cpu_can_run(CPUState *env) |
133 |
{ |
134 |
if (env->stop) {
|
135 |
return 0; |
136 |
} |
137 |
if (env->stopped || !vm_running) {
|
138 |
return 0; |
139 |
} |
140 |
return 1; |
141 |
} |
142 |
|
143 |
static bool cpu_thread_is_idle(CPUState *env) |
144 |
{ |
145 |
if (env->stop || env->queued_work_first) {
|
146 |
return false; |
147 |
} |
148 |
if (env->stopped || !vm_running) {
|
149 |
return true; |
150 |
} |
151 |
if (!env->halted || qemu_cpu_has_work(env)) {
|
152 |
return false; |
153 |
} |
154 |
return true; |
155 |
} |
156 |
|
157 |
static bool all_cpu_threads_idle(void) |
158 |
{ |
159 |
CPUState *env; |
160 |
|
161 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
162 |
if (!cpu_thread_is_idle(env)) {
|
163 |
return false; |
164 |
} |
165 |
} |
166 |
return true; |
167 |
} |
168 |
|
169 |
static CPUDebugExcpHandler *debug_excp_handler;
|
170 |
|
171 |
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) |
172 |
{ |
173 |
CPUDebugExcpHandler *old_handler = debug_excp_handler; |
174 |
|
175 |
debug_excp_handler = handler; |
176 |
return old_handler;
|
177 |
} |
178 |
|
179 |
static void cpu_handle_debug_exception(CPUState *env) |
180 |
{ |
181 |
CPUWatchpoint *wp; |
182 |
|
183 |
if (!env->watchpoint_hit) {
|
184 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
185 |
wp->flags &= ~BP_WATCHPOINT_HIT; |
186 |
} |
187 |
} |
188 |
if (debug_excp_handler) {
|
189 |
debug_excp_handler(env); |
190 |
} |
191 |
|
192 |
gdb_set_stop_cpu(env); |
193 |
qemu_system_debug_request(); |
194 |
#ifdef CONFIG_IOTHREAD
|
195 |
env->stopped = 1;
|
196 |
#endif
|
197 |
} |
198 |
|
199 |
#ifdef CONFIG_LINUX
|
200 |
static void sigbus_reraise(void) |
201 |
{ |
202 |
sigset_t set; |
203 |
struct sigaction action;
|
204 |
|
205 |
memset(&action, 0, sizeof(action)); |
206 |
action.sa_handler = SIG_DFL; |
207 |
if (!sigaction(SIGBUS, &action, NULL)) { |
208 |
raise(SIGBUS); |
209 |
sigemptyset(&set); |
210 |
sigaddset(&set, SIGBUS); |
211 |
sigprocmask(SIG_UNBLOCK, &set, NULL);
|
212 |
} |
213 |
perror("Failed to re-raise SIGBUS!\n");
|
214 |
abort(); |
215 |
} |
216 |
|
217 |
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, |
218 |
void *ctx)
|
219 |
{ |
220 |
if (kvm_on_sigbus(siginfo->ssi_code,
|
221 |
(void *)(intptr_t)siginfo->ssi_addr)) {
|
222 |
sigbus_reraise(); |
223 |
} |
224 |
} |
225 |
|
226 |
static void qemu_init_sigbus(void) |
227 |
{ |
228 |
struct sigaction action;
|
229 |
|
230 |
memset(&action, 0, sizeof(action)); |
231 |
action.sa_flags = SA_SIGINFO; |
232 |
action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; |
233 |
sigaction(SIGBUS, &action, NULL);
|
234 |
|
235 |
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); |
236 |
} |
237 |
|
238 |
#else /* !CONFIG_LINUX */ |
239 |
|
240 |
static void qemu_init_sigbus(void) |
241 |
{ |
242 |
} |
243 |
#endif /* !CONFIG_LINUX */ |
244 |
|
245 |
#ifndef _WIN32
|
246 |
static int io_thread_fd = -1; |
247 |
|
248 |
static void qemu_event_increment(void) |
249 |
{ |
250 |
/* Write 8 bytes to be compatible with eventfd. */
|
251 |
static const uint64_t val = 1; |
252 |
ssize_t ret; |
253 |
|
254 |
if (io_thread_fd == -1) { |
255 |
return;
|
256 |
} |
257 |
do {
|
258 |
ret = write(io_thread_fd, &val, sizeof(val));
|
259 |
} while (ret < 0 && errno == EINTR); |
260 |
|
261 |
/* EAGAIN is fine, a read must be pending. */
|
262 |
if (ret < 0 && errno != EAGAIN) { |
263 |
fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
264 |
strerror(errno)); |
265 |
exit (1);
|
266 |
} |
267 |
} |
268 |
|
269 |
static void qemu_event_read(void *opaque) |
270 |
{ |
271 |
int fd = (unsigned long)opaque; |
272 |
ssize_t len; |
273 |
char buffer[512]; |
274 |
|
275 |
/* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
276 |
do {
|
277 |
len = read(fd, buffer, sizeof(buffer));
|
278 |
} while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
279 |
} |
280 |
|
281 |
static int qemu_event_init(void) |
282 |
{ |
283 |
int err;
|
284 |
int fds[2]; |
285 |
|
286 |
err = qemu_eventfd(fds); |
287 |
if (err == -1) { |
288 |
return -errno;
|
289 |
} |
290 |
err = fcntl_setfl(fds[0], O_NONBLOCK);
|
291 |
if (err < 0) { |
292 |
goto fail;
|
293 |
} |
294 |
err = fcntl_setfl(fds[1], O_NONBLOCK);
|
295 |
if (err < 0) { |
296 |
goto fail;
|
297 |
} |
298 |
qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
299 |
(void *)(unsigned long)fds[0]); |
300 |
|
301 |
io_thread_fd = fds[1];
|
302 |
return 0; |
303 |
|
304 |
fail:
|
305 |
close(fds[0]);
|
306 |
close(fds[1]);
|
307 |
return err;
|
308 |
} |
309 |
|
310 |
static void dummy_signal(int sig) |
311 |
{ |
312 |
} |
313 |
|
314 |
/* If we have signalfd, we mask out the signals we want to handle and then
|
315 |
* use signalfd to listen for them. We rely on whatever the current signal
|
316 |
* handler is to dispatch the signals when we receive them.
|
317 |
*/
|
318 |
static void sigfd_handler(void *opaque) |
319 |
{ |
320 |
int fd = (unsigned long) opaque; |
321 |
struct qemu_signalfd_siginfo info;
|
322 |
struct sigaction action;
|
323 |
ssize_t len; |
324 |
|
325 |
while (1) { |
326 |
do {
|
327 |
len = read(fd, &info, sizeof(info));
|
328 |
} while (len == -1 && errno == EINTR); |
329 |
|
330 |
if (len == -1 && errno == EAGAIN) { |
331 |
break;
|
332 |
} |
333 |
|
334 |
if (len != sizeof(info)) { |
335 |
printf("read from sigfd returned %zd: %m\n", len);
|
336 |
return;
|
337 |
} |
338 |
|
339 |
sigaction(info.ssi_signo, NULL, &action);
|
340 |
if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
|
341 |
action.sa_sigaction(info.ssi_signo, |
342 |
(siginfo_t *)&info, NULL);
|
343 |
} else if (action.sa_handler) { |
344 |
action.sa_handler(info.ssi_signo); |
345 |
} |
346 |
} |
347 |
} |
348 |
|
349 |
static int qemu_signalfd_init(sigset_t mask) |
350 |
{ |
351 |
int sigfd;
|
352 |
|
353 |
sigfd = qemu_signalfd(&mask); |
354 |
if (sigfd == -1) { |
355 |
fprintf(stderr, "failed to create signalfd\n");
|
356 |
return -errno;
|
357 |
} |
358 |
|
359 |
fcntl_setfl(sigfd, O_NONBLOCK); |
360 |
|
361 |
qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, |
362 |
(void *)(unsigned long) sigfd); |
363 |
|
364 |
return 0; |
365 |
} |
366 |
|
367 |
static void qemu_kvm_eat_signals(CPUState *env) |
368 |
{ |
369 |
struct timespec ts = { 0, 0 }; |
370 |
siginfo_t siginfo; |
371 |
sigset_t waitset; |
372 |
sigset_t chkset; |
373 |
int r;
|
374 |
|
375 |
sigemptyset(&waitset); |
376 |
sigaddset(&waitset, SIG_IPI); |
377 |
sigaddset(&waitset, SIGBUS); |
378 |
|
379 |
do {
|
380 |
r = sigtimedwait(&waitset, &siginfo, &ts); |
381 |
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { |
382 |
perror("sigtimedwait");
|
383 |
exit(1);
|
384 |
} |
385 |
|
386 |
switch (r) {
|
387 |
case SIGBUS:
|
388 |
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
|
389 |
sigbus_reraise(); |
390 |
} |
391 |
break;
|
392 |
default:
|
393 |
break;
|
394 |
} |
395 |
|
396 |
r = sigpending(&chkset); |
397 |
if (r == -1) { |
398 |
perror("sigpending");
|
399 |
exit(1);
|
400 |
} |
401 |
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
|
402 |
|
403 |
#ifndef CONFIG_IOTHREAD
|
404 |
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
|
405 |
qemu_notify_event(); |
406 |
} |
407 |
#endif
|
408 |
} |
409 |
|
410 |
#else /* _WIN32 */ |
411 |
|
412 |
HANDLE qemu_event_handle; |
413 |
|
414 |
static void dummy_event_handler(void *opaque) |
415 |
{ |
416 |
} |
417 |
|
418 |
static int qemu_event_init(void) |
419 |
{ |
420 |
qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
421 |
if (!qemu_event_handle) {
|
422 |
fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
423 |
return -1; |
424 |
} |
425 |
qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
426 |
return 0; |
427 |
} |
428 |
|
429 |
static void qemu_event_increment(void) |
430 |
{ |
431 |
if (!SetEvent(qemu_event_handle)) {
|
432 |
fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
433 |
GetLastError()); |
434 |
exit (1);
|
435 |
} |
436 |
} |
437 |
|
438 |
static void qemu_kvm_eat_signals(CPUState *env) |
439 |
{ |
440 |
} |
441 |
#endif /* _WIN32 */ |
442 |
|
443 |
#ifndef CONFIG_IOTHREAD
|
444 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
445 |
{ |
446 |
#ifndef _WIN32
|
447 |
int r;
|
448 |
sigset_t set; |
449 |
struct sigaction sigact;
|
450 |
|
451 |
memset(&sigact, 0, sizeof(sigact)); |
452 |
sigact.sa_handler = dummy_signal; |
453 |
sigaction(SIG_IPI, &sigact, NULL);
|
454 |
|
455 |
sigemptyset(&set); |
456 |
sigaddset(&set, SIG_IPI); |
457 |
sigaddset(&set, SIGIO); |
458 |
sigaddset(&set, SIGALRM); |
459 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
460 |
|
461 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
462 |
sigdelset(&set, SIG_IPI); |
463 |
sigdelset(&set, SIGBUS); |
464 |
sigdelset(&set, SIGIO); |
465 |
sigdelset(&set, SIGALRM); |
466 |
r = kvm_set_signal_mask(env, &set); |
467 |
if (r) {
|
468 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
469 |
exit(1);
|
470 |
} |
471 |
#endif
|
472 |
} |
473 |
|
474 |
#ifndef _WIN32
|
475 |
static sigset_t block_synchronous_signals(void) |
476 |
{ |
477 |
sigset_t set; |
478 |
|
479 |
sigemptyset(&set); |
480 |
sigaddset(&set, SIGBUS); |
481 |
if (kvm_enabled()) {
|
482 |
/*
|
483 |
* We need to process timer signals synchronously to avoid a race
|
484 |
* between exit_request check and KVM vcpu entry.
|
485 |
*/
|
486 |
sigaddset(&set, SIGIO); |
487 |
sigaddset(&set, SIGALRM); |
488 |
} |
489 |
|
490 |
return set;
|
491 |
} |
492 |
#endif
|
493 |
|
494 |
int qemu_init_main_loop(void) |
495 |
{ |
496 |
#ifndef _WIN32
|
497 |
sigset_t blocked_signals; |
498 |
int ret;
|
499 |
|
500 |
blocked_signals = block_synchronous_signals(); |
501 |
|
502 |
ret = qemu_signalfd_init(blocked_signals); |
503 |
if (ret) {
|
504 |
return ret;
|
505 |
} |
506 |
#endif
|
507 |
|
508 |
qemu_init_sigbus(); |
509 |
|
510 |
return qemu_event_init();
|
511 |
} |
512 |
|
513 |
void qemu_main_loop_start(void) |
514 |
{ |
515 |
} |
516 |
|
517 |
void qemu_init_vcpu(void *_env) |
518 |
{ |
519 |
CPUState *env = _env; |
520 |
int r;
|
521 |
|
522 |
env->nr_cores = smp_cores; |
523 |
env->nr_threads = smp_threads; |
524 |
|
525 |
if (kvm_enabled()) {
|
526 |
r = kvm_init_vcpu(env); |
527 |
if (r < 0) { |
528 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
529 |
exit(1);
|
530 |
} |
531 |
qemu_kvm_init_cpu_signals(env); |
532 |
} |
533 |
} |
534 |
|
535 |
int qemu_cpu_is_self(void *env) |
536 |
{ |
537 |
return 1; |
538 |
} |
539 |
|
540 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
541 |
{ |
542 |
func(data); |
543 |
} |
544 |
|
545 |
void resume_all_vcpus(void) |
546 |
{ |
547 |
} |
548 |
|
549 |
void pause_all_vcpus(void) |
550 |
{ |
551 |
} |
552 |
|
553 |
void qemu_cpu_kick(void *env) |
554 |
{ |
555 |
} |
556 |
|
557 |
void qemu_cpu_kick_self(void) |
558 |
{ |
559 |
#ifndef _WIN32
|
560 |
assert(cpu_single_env); |
561 |
|
562 |
raise(SIG_IPI); |
563 |
#else
|
564 |
abort(); |
565 |
#endif
|
566 |
} |
567 |
|
568 |
void qemu_notify_event(void) |
569 |
{ |
570 |
CPUState *env = cpu_single_env; |
571 |
|
572 |
qemu_event_increment (); |
573 |
if (env) {
|
574 |
cpu_exit(env); |
575 |
} |
576 |
if (next_cpu && env != next_cpu) {
|
577 |
cpu_exit(next_cpu); |
578 |
} |
579 |
exit_request = 1;
|
580 |
} |
581 |
|
582 |
void qemu_mutex_lock_iothread(void) {} |
583 |
void qemu_mutex_unlock_iothread(void) {} |
584 |
|
585 |
void cpu_stop_current(void) |
586 |
{ |
587 |
} |
588 |
|
589 |
void vm_stop(int reason) |
590 |
{ |
591 |
do_vm_stop(reason); |
592 |
} |
593 |
|
594 |
#else /* CONFIG_IOTHREAD */ |
595 |
|
596 |
QemuMutex qemu_global_mutex; |
597 |
static QemuMutex qemu_fair_mutex;
|
598 |
|
599 |
static QemuThread io_thread;
|
600 |
|
601 |
static QemuThread *tcg_cpu_thread;
|
602 |
static QemuCond *tcg_halt_cond;
|
603 |
|
604 |
static int qemu_system_ready; |
605 |
/* cpu creation */
|
606 |
static QemuCond qemu_cpu_cond;
|
607 |
/* system init */
|
608 |
static QemuCond qemu_system_cond;
|
609 |
static QemuCond qemu_pause_cond;
|
610 |
static QemuCond qemu_work_cond;
|
611 |
|
612 |
static void cpu_signal(int sig) |
613 |
{ |
614 |
if (cpu_single_env) {
|
615 |
cpu_exit(cpu_single_env); |
616 |
} |
617 |
exit_request = 1;
|
618 |
} |
619 |
|
620 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
621 |
{ |
622 |
int r;
|
623 |
sigset_t set; |
624 |
struct sigaction sigact;
|
625 |
|
626 |
memset(&sigact, 0, sizeof(sigact)); |
627 |
sigact.sa_handler = dummy_signal; |
628 |
sigaction(SIG_IPI, &sigact, NULL);
|
629 |
|
630 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
631 |
sigdelset(&set, SIG_IPI); |
632 |
sigdelset(&set, SIGBUS); |
633 |
r = kvm_set_signal_mask(env, &set); |
634 |
if (r) {
|
635 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
636 |
exit(1);
|
637 |
} |
638 |
} |
639 |
|
640 |
static void qemu_tcg_init_cpu_signals(void) |
641 |
{ |
642 |
sigset_t set; |
643 |
struct sigaction sigact;
|
644 |
|
645 |
memset(&sigact, 0, sizeof(sigact)); |
646 |
sigact.sa_handler = cpu_signal; |
647 |
sigaction(SIG_IPI, &sigact, NULL);
|
648 |
|
649 |
sigemptyset(&set); |
650 |
sigaddset(&set, SIG_IPI); |
651 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
652 |
} |
653 |
|
654 |
static sigset_t block_io_signals(void) |
655 |
{ |
656 |
sigset_t set; |
657 |
|
658 |
/* SIGUSR2 used by posix-aio-compat.c */
|
659 |
sigemptyset(&set); |
660 |
sigaddset(&set, SIGUSR2); |
661 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
662 |
|
663 |
sigemptyset(&set); |
664 |
sigaddset(&set, SIGIO); |
665 |
sigaddset(&set, SIGALRM); |
666 |
sigaddset(&set, SIG_IPI); |
667 |
sigaddset(&set, SIGBUS); |
668 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
669 |
|
670 |
return set;
|
671 |
} |
672 |
|
673 |
int qemu_init_main_loop(void) |
674 |
{ |
675 |
int ret;
|
676 |
sigset_t blocked_signals; |
677 |
|
678 |
qemu_init_sigbus(); |
679 |
|
680 |
blocked_signals = block_io_signals(); |
681 |
|
682 |
ret = qemu_signalfd_init(blocked_signals); |
683 |
if (ret) {
|
684 |
return ret;
|
685 |
} |
686 |
|
687 |
/* Note eventfd must be drained before signalfd handlers run */
|
688 |
ret = qemu_event_init(); |
689 |
if (ret) {
|
690 |
return ret;
|
691 |
} |
692 |
|
693 |
qemu_cond_init(&qemu_cpu_cond); |
694 |
qemu_cond_init(&qemu_system_cond); |
695 |
qemu_cond_init(&qemu_pause_cond); |
696 |
qemu_cond_init(&qemu_work_cond); |
697 |
qemu_mutex_init(&qemu_fair_mutex); |
698 |
qemu_mutex_init(&qemu_global_mutex); |
699 |
qemu_mutex_lock(&qemu_global_mutex); |
700 |
|
701 |
qemu_thread_get_self(&io_thread); |
702 |
|
703 |
return 0; |
704 |
} |
705 |
|
706 |
void qemu_main_loop_start(void) |
707 |
{ |
708 |
qemu_system_ready = 1;
|
709 |
qemu_cond_broadcast(&qemu_system_cond); |
710 |
} |
711 |
|
712 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
713 |
{ |
714 |
struct qemu_work_item wi;
|
715 |
|
716 |
if (qemu_cpu_is_self(env)) {
|
717 |
func(data); |
718 |
return;
|
719 |
} |
720 |
|
721 |
wi.func = func; |
722 |
wi.data = data; |
723 |
if (!env->queued_work_first) {
|
724 |
env->queued_work_first = &wi; |
725 |
} else {
|
726 |
env->queued_work_last->next = &wi; |
727 |
} |
728 |
env->queued_work_last = &wi; |
729 |
wi.next = NULL;
|
730 |
wi.done = false;
|
731 |
|
732 |
qemu_cpu_kick(env); |
733 |
while (!wi.done) {
|
734 |
CPUState *self_env = cpu_single_env; |
735 |
|
736 |
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
737 |
cpu_single_env = self_env; |
738 |
} |
739 |
} |
740 |
|
741 |
static void flush_queued_work(CPUState *env) |
742 |
{ |
743 |
struct qemu_work_item *wi;
|
744 |
|
745 |
if (!env->queued_work_first) {
|
746 |
return;
|
747 |
} |
748 |
|
749 |
while ((wi = env->queued_work_first)) {
|
750 |
env->queued_work_first = wi->next; |
751 |
wi->func(wi->data); |
752 |
wi->done = true;
|
753 |
} |
754 |
env->queued_work_last = NULL;
|
755 |
qemu_cond_broadcast(&qemu_work_cond); |
756 |
} |
757 |
|
758 |
static void qemu_wait_io_event_common(CPUState *env) |
759 |
{ |
760 |
if (env->stop) {
|
761 |
env->stop = 0;
|
762 |
env->stopped = 1;
|
763 |
qemu_cond_signal(&qemu_pause_cond); |
764 |
} |
765 |
flush_queued_work(env); |
766 |
env->thread_kicked = false;
|
767 |
} |
768 |
|
769 |
static void qemu_tcg_wait_io_event(void) |
770 |
{ |
771 |
CPUState *env; |
772 |
|
773 |
while (all_cpu_threads_idle()) {
|
774 |
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); |
775 |
} |
776 |
|
777 |
qemu_mutex_unlock(&qemu_global_mutex); |
778 |
|
779 |
/*
|
780 |
* Users of qemu_global_mutex can be starved, having no chance
|
781 |
* to acquire it since this path will get to it first.
|
782 |
* So use another lock to provide fairness.
|
783 |
*/
|
784 |
qemu_mutex_lock(&qemu_fair_mutex); |
785 |
qemu_mutex_unlock(&qemu_fair_mutex); |
786 |
|
787 |
qemu_mutex_lock(&qemu_global_mutex); |
788 |
|
789 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
790 |
qemu_wait_io_event_common(env); |
791 |
} |
792 |
} |
793 |
|
794 |
static void qemu_kvm_wait_io_event(CPUState *env) |
795 |
{ |
796 |
while (cpu_thread_is_idle(env)) {
|
797 |
qemu_cond_wait(env->halt_cond, &qemu_global_mutex); |
798 |
} |
799 |
|
800 |
qemu_kvm_eat_signals(env); |
801 |
qemu_wait_io_event_common(env); |
802 |
} |
803 |
|
804 |
static void *qemu_kvm_cpu_thread_fn(void *arg) |
805 |
{ |
806 |
CPUState *env = arg; |
807 |
int r;
|
808 |
|
809 |
qemu_mutex_lock(&qemu_global_mutex); |
810 |
qemu_thread_get_self(env->thread); |
811 |
|
812 |
r = kvm_init_vcpu(env); |
813 |
if (r < 0) { |
814 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
815 |
exit(1);
|
816 |
} |
817 |
|
818 |
qemu_kvm_init_cpu_signals(env); |
819 |
|
820 |
/* signal CPU creation */
|
821 |
env->created = 1;
|
822 |
qemu_cond_signal(&qemu_cpu_cond); |
823 |
|
824 |
/* and wait for machine initialization */
|
825 |
while (!qemu_system_ready) {
|
826 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
827 |
} |
828 |
|
829 |
while (1) { |
830 |
if (cpu_can_run(env)) {
|
831 |
r = kvm_cpu_exec(env); |
832 |
if (r == EXCP_DEBUG) {
|
833 |
cpu_handle_debug_exception(env); |
834 |
} |
835 |
} |
836 |
qemu_kvm_wait_io_event(env); |
837 |
} |
838 |
|
839 |
return NULL; |
840 |
} |
841 |
|
842 |
static void *qemu_tcg_cpu_thread_fn(void *arg) |
843 |
{ |
844 |
CPUState *env = arg; |
845 |
|
846 |
qemu_tcg_init_cpu_signals(); |
847 |
qemu_thread_get_self(env->thread); |
848 |
|
849 |
/* signal CPU creation */
|
850 |
qemu_mutex_lock(&qemu_global_mutex); |
851 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
852 |
env->created = 1;
|
853 |
} |
854 |
qemu_cond_signal(&qemu_cpu_cond); |
855 |
|
856 |
/* and wait for machine initialization */
|
857 |
while (!qemu_system_ready) {
|
858 |
qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
859 |
} |
860 |
|
861 |
while (1) { |
862 |
cpu_exec_all(); |
863 |
qemu_tcg_wait_io_event(); |
864 |
} |
865 |
|
866 |
return NULL; |
867 |
} |
868 |
|
869 |
void qemu_cpu_kick(void *_env) |
870 |
{ |
871 |
CPUState *env = _env; |
872 |
|
873 |
qemu_cond_broadcast(env->halt_cond); |
874 |
if (!env->thread_kicked) {
|
875 |
qemu_thread_signal(env->thread, SIG_IPI); |
876 |
env->thread_kicked = true;
|
877 |
} |
878 |
} |
879 |
|
880 |
void qemu_cpu_kick_self(void) |
881 |
{ |
882 |
assert(cpu_single_env); |
883 |
|
884 |
if (!cpu_single_env->thread_kicked) {
|
885 |
qemu_thread_signal(cpu_single_env->thread, SIG_IPI); |
886 |
cpu_single_env->thread_kicked = true;
|
887 |
} |
888 |
} |
889 |
|
890 |
int qemu_cpu_is_self(void *_env) |
891 |
{ |
892 |
CPUState *env = _env; |
893 |
|
894 |
return qemu_thread_is_self(env->thread);
|
895 |
} |
896 |
|
897 |
void qemu_mutex_lock_iothread(void) |
898 |
{ |
899 |
if (kvm_enabled()) {
|
900 |
qemu_mutex_lock(&qemu_global_mutex); |
901 |
} else {
|
902 |
qemu_mutex_lock(&qemu_fair_mutex); |
903 |
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
904 |
qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
905 |
qemu_mutex_lock(&qemu_global_mutex); |
906 |
} |
907 |
qemu_mutex_unlock(&qemu_fair_mutex); |
908 |
} |
909 |
} |
910 |
|
911 |
void qemu_mutex_unlock_iothread(void) |
912 |
{ |
913 |
qemu_mutex_unlock(&qemu_global_mutex); |
914 |
} |
915 |
|
916 |
static int all_vcpus_paused(void) |
917 |
{ |
918 |
CPUState *penv = first_cpu; |
919 |
|
920 |
while (penv) {
|
921 |
if (!penv->stopped) {
|
922 |
return 0; |
923 |
} |
924 |
penv = (CPUState *)penv->next_cpu; |
925 |
} |
926 |
|
927 |
return 1; |
928 |
} |
929 |
|
930 |
void pause_all_vcpus(void) |
931 |
{ |
932 |
CPUState *penv = first_cpu; |
933 |
|
934 |
while (penv) {
|
935 |
penv->stop = 1;
|
936 |
qemu_cpu_kick(penv); |
937 |
penv = (CPUState *)penv->next_cpu; |
938 |
} |
939 |
|
940 |
while (!all_vcpus_paused()) {
|
941 |
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
942 |
penv = first_cpu; |
943 |
while (penv) {
|
944 |
qemu_cpu_kick(penv); |
945 |
penv = (CPUState *)penv->next_cpu; |
946 |
} |
947 |
} |
948 |
} |
949 |
|
950 |
void resume_all_vcpus(void) |
951 |
{ |
952 |
CPUState *penv = first_cpu; |
953 |
|
954 |
while (penv) {
|
955 |
penv->stop = 0;
|
956 |
penv->stopped = 0;
|
957 |
qemu_cpu_kick(penv); |
958 |
penv = (CPUState *)penv->next_cpu; |
959 |
} |
960 |
} |
961 |
|
962 |
static void qemu_tcg_init_vcpu(void *_env) |
963 |
{ |
964 |
CPUState *env = _env; |
965 |
|
966 |
/* share a single thread for all cpus with TCG */
|
967 |
if (!tcg_cpu_thread) {
|
968 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
969 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
970 |
qemu_cond_init(env->halt_cond); |
971 |
qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
972 |
while (env->created == 0) { |
973 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
974 |
} |
975 |
tcg_cpu_thread = env->thread; |
976 |
tcg_halt_cond = env->halt_cond; |
977 |
} else {
|
978 |
env->thread = tcg_cpu_thread; |
979 |
env->halt_cond = tcg_halt_cond; |
980 |
} |
981 |
} |
982 |
|
983 |
static void qemu_kvm_start_vcpu(CPUState *env) |
984 |
{ |
985 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
986 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
987 |
qemu_cond_init(env->halt_cond); |
988 |
qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
989 |
while (env->created == 0) { |
990 |
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
991 |
} |
992 |
} |
993 |
|
994 |
void qemu_init_vcpu(void *_env) |
995 |
{ |
996 |
CPUState *env = _env; |
997 |
|
998 |
env->nr_cores = smp_cores; |
999 |
env->nr_threads = smp_threads; |
1000 |
if (kvm_enabled()) {
|
1001 |
qemu_kvm_start_vcpu(env); |
1002 |
} else {
|
1003 |
qemu_tcg_init_vcpu(env); |
1004 |
} |
1005 |
} |
1006 |
|
1007 |
void qemu_notify_event(void) |
1008 |
{ |
1009 |
qemu_event_increment(); |
1010 |
} |
1011 |
|
1012 |
void cpu_stop_current(void) |
1013 |
{ |
1014 |
if (cpu_single_env) {
|
1015 |
cpu_single_env->stop = 0;
|
1016 |
cpu_single_env->stopped = 1;
|
1017 |
cpu_exit(cpu_single_env); |
1018 |
qemu_cond_signal(&qemu_pause_cond); |
1019 |
} |
1020 |
} |
1021 |
|
1022 |
void vm_stop(int reason) |
1023 |
{ |
1024 |
if (!qemu_thread_is_self(&io_thread)) {
|
1025 |
qemu_system_vmstop_request(reason); |
1026 |
/*
|
1027 |
* FIXME: should not return to device code in case
|
1028 |
* vm_stop() has been requested.
|
1029 |
*/
|
1030 |
cpu_stop_current(); |
1031 |
return;
|
1032 |
} |
1033 |
do_vm_stop(reason); |
1034 |
} |
1035 |
|
1036 |
#endif
|
1037 |
|
1038 |
static int tcg_cpu_exec(CPUState *env) |
1039 |
{ |
1040 |
int ret;
|
1041 |
#ifdef CONFIG_PROFILER
|
1042 |
int64_t ti; |
1043 |
#endif
|
1044 |
|
1045 |
#ifdef CONFIG_PROFILER
|
1046 |
ti = profile_getclock(); |
1047 |
#endif
|
1048 |
if (use_icount) {
|
1049 |
int64_t count; |
1050 |
int decr;
|
1051 |
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
1052 |
env->icount_decr.u16.low = 0;
|
1053 |
env->icount_extra = 0;
|
1054 |
count = qemu_icount_round (qemu_next_deadline()); |
1055 |
qemu_icount += count; |
1056 |
decr = (count > 0xffff) ? 0xffff : count; |
1057 |
count -= decr; |
1058 |
env->icount_decr.u16.low = decr; |
1059 |
env->icount_extra = count; |
1060 |
} |
1061 |
ret = cpu_exec(env); |
1062 |
#ifdef CONFIG_PROFILER
|
1063 |
qemu_time += profile_getclock() - ti; |
1064 |
#endif
|
1065 |
if (use_icount) {
|
1066 |
/* Fold pending instructions back into the
|
1067 |
instruction counter, and clear the interrupt flag. */
|
1068 |
qemu_icount -= (env->icount_decr.u16.low |
1069 |
+ env->icount_extra); |
1070 |
env->icount_decr.u32 = 0;
|
1071 |
env->icount_extra = 0;
|
1072 |
} |
1073 |
return ret;
|
1074 |
} |
1075 |
|
1076 |
bool cpu_exec_all(void) |
1077 |
{ |
1078 |
int r;
|
1079 |
|
1080 |
if (next_cpu == NULL) { |
1081 |
next_cpu = first_cpu; |
1082 |
} |
1083 |
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
1084 |
CPUState *env = next_cpu; |
1085 |
|
1086 |
qemu_clock_enable(vm_clock, |
1087 |
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
1088 |
|
1089 |
#ifndef CONFIG_IOTHREAD
|
1090 |
if (qemu_alarm_pending()) {
|
1091 |
break;
|
1092 |
} |
1093 |
#endif
|
1094 |
if (cpu_can_run(env)) {
|
1095 |
if (kvm_enabled()) {
|
1096 |
r = kvm_cpu_exec(env); |
1097 |
qemu_kvm_eat_signals(env); |
1098 |
} else {
|
1099 |
r = tcg_cpu_exec(env); |
1100 |
} |
1101 |
if (r == EXCP_DEBUG) {
|
1102 |
cpu_handle_debug_exception(env); |
1103 |
break;
|
1104 |
} |
1105 |
} else if (env->stop || env->stopped) { |
1106 |
break;
|
1107 |
} |
1108 |
} |
1109 |
exit_request = 0;
|
1110 |
return !all_cpu_threads_idle();
|
1111 |
} |
1112 |
|
1113 |
void set_numa_modes(void) |
1114 |
{ |
1115 |
CPUState *env; |
1116 |
int i;
|
1117 |
|
1118 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1119 |
for (i = 0; i < nb_numa_nodes; i++) { |
1120 |
if (node_cpumask[i] & (1 << env->cpu_index)) { |
1121 |
env->numa_node = i; |
1122 |
} |
1123 |
} |
1124 |
} |
1125 |
} |
1126 |
|
1127 |
void set_cpu_log(const char *optarg) |
1128 |
{ |
1129 |
int mask;
|
1130 |
const CPULogItem *item;
|
1131 |
|
1132 |
mask = cpu_str_to_log_mask(optarg); |
1133 |
if (!mask) {
|
1134 |
printf("Log items (comma separated):\n");
|
1135 |
for (item = cpu_log_items; item->mask != 0; item++) { |
1136 |
printf("%-10s %s\n", item->name, item->help);
|
1137 |
} |
1138 |
exit(1);
|
1139 |
} |
1140 |
cpu_set_log(mask); |
1141 |
} |
1142 |
|
1143 |
/* Return the virtual CPU time, based on the instruction counter. */
|
1144 |
int64_t cpu_get_icount(void)
|
1145 |
{ |
1146 |
int64_t icount; |
1147 |
CPUState *env = cpu_single_env;; |
1148 |
|
1149 |
icount = qemu_icount; |
1150 |
if (env) {
|
1151 |
if (!can_do_io(env)) {
|
1152 |
fprintf(stderr, "Bad clock read\n");
|
1153 |
} |
1154 |
icount -= (env->icount_decr.u16.low + env->icount_extra); |
1155 |
} |
1156 |
return qemu_icount_bias + (icount << icount_time_shift);
|
1157 |
} |
1158 |
|
1159 |
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
1160 |
{ |
1161 |
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
1162 |
#if defined(cpu_list_id)
|
1163 |
cpu_list_id(f, cpu_fprintf, optarg); |
1164 |
#elif defined(cpu_list)
|
1165 |
cpu_list(f, cpu_fprintf); /* deprecated */
|
1166 |
#endif
|
1167 |
} |