root / cpus.c @ 8cf71710
History | View | Annotate | Download (24.7 kB)
1 |
/*
|
---|---|
2 |
* QEMU System Emulator
|
3 |
*
|
4 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
/* Needed early for CONFIG_BSD etc. */
|
26 |
#include "config-host.h" |
27 |
|
28 |
#include "monitor.h" |
29 |
#include "sysemu.h" |
30 |
#include "gdbstub.h" |
31 |
#include "dma.h" |
32 |
#include "kvm.h" |
33 |
#include "exec-all.h" |
34 |
|
35 |
#include "cpus.h" |
36 |
#include "compatfd.h" |
37 |
|
38 |
#ifdef SIGRTMIN
|
39 |
#define SIG_IPI (SIGRTMIN+4) |
40 |
#else
|
41 |
#define SIG_IPI SIGUSR1
|
42 |
#endif
|
43 |
|
44 |
#ifdef CONFIG_LINUX
|
45 |
|
46 |
#include <sys/prctl.h> |
47 |
|
48 |
#ifndef PR_MCE_KILL
|
49 |
#define PR_MCE_KILL 33 |
50 |
#endif
|
51 |
|
52 |
#ifndef PR_MCE_KILL_SET
|
53 |
#define PR_MCE_KILL_SET 1 |
54 |
#endif
|
55 |
|
56 |
#ifndef PR_MCE_KILL_EARLY
|
57 |
#define PR_MCE_KILL_EARLY 1 |
58 |
#endif
|
59 |
|
60 |
#endif /* CONFIG_LINUX */ |
61 |
|
62 |
static CPUState *next_cpu;
|
63 |
|
64 |
/***********************************************************/
|
65 |
void hw_error(const char *fmt, ...) |
66 |
{ |
67 |
va_list ap; |
68 |
CPUState *env; |
69 |
|
70 |
va_start(ap, fmt); |
71 |
fprintf(stderr, "qemu: hardware error: ");
|
72 |
vfprintf(stderr, fmt, ap); |
73 |
fprintf(stderr, "\n");
|
74 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
75 |
fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
76 |
#ifdef TARGET_I386
|
77 |
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
78 |
#else
|
79 |
cpu_dump_state(env, stderr, fprintf, 0);
|
80 |
#endif
|
81 |
} |
82 |
va_end(ap); |
83 |
abort(); |
84 |
} |
85 |
|
86 |
void cpu_synchronize_all_states(void) |
87 |
{ |
88 |
CPUState *cpu; |
89 |
|
90 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
91 |
cpu_synchronize_state(cpu); |
92 |
} |
93 |
} |
94 |
|
95 |
void cpu_synchronize_all_post_reset(void) |
96 |
{ |
97 |
CPUState *cpu; |
98 |
|
99 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
100 |
cpu_synchronize_post_reset(cpu); |
101 |
} |
102 |
} |
103 |
|
104 |
void cpu_synchronize_all_post_init(void) |
105 |
{ |
106 |
CPUState *cpu; |
107 |
|
108 |
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
109 |
cpu_synchronize_post_init(cpu); |
110 |
} |
111 |
} |
112 |
|
113 |
int cpu_is_stopped(CPUState *env)
|
114 |
{ |
115 |
return !vm_running || env->stopped;
|
116 |
} |
117 |
|
118 |
static void do_vm_stop(int reason) |
119 |
{ |
120 |
if (vm_running) {
|
121 |
cpu_disable_ticks(); |
122 |
vm_running = 0;
|
123 |
pause_all_vcpus(); |
124 |
vm_state_notify(0, reason);
|
125 |
qemu_aio_flush(); |
126 |
bdrv_flush_all(); |
127 |
monitor_protocol_event(QEVENT_STOP, NULL);
|
128 |
} |
129 |
} |
130 |
|
131 |
static int cpu_can_run(CPUState *env) |
132 |
{ |
133 |
if (env->stop) {
|
134 |
return 0; |
135 |
} |
136 |
if (env->stopped || !vm_running) {
|
137 |
return 0; |
138 |
} |
139 |
return 1; |
140 |
} |
141 |
|
142 |
static bool cpu_thread_is_idle(CPUState *env) |
143 |
{ |
144 |
if (env->stop || env->queued_work_first) {
|
145 |
return false; |
146 |
} |
147 |
if (env->stopped || !vm_running) {
|
148 |
return true; |
149 |
} |
150 |
if (!env->halted || qemu_cpu_has_work(env)) {
|
151 |
return false; |
152 |
} |
153 |
return true; |
154 |
} |
155 |
|
156 |
static bool all_cpu_threads_idle(void) |
157 |
{ |
158 |
CPUState *env; |
159 |
|
160 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
161 |
if (!cpu_thread_is_idle(env)) {
|
162 |
return false; |
163 |
} |
164 |
} |
165 |
return true; |
166 |
} |
167 |
|
168 |
static void cpu_debug_handler(CPUState *env) |
169 |
{ |
170 |
gdb_set_stop_cpu(env); |
171 |
qemu_system_debug_request(); |
172 |
} |
173 |
|
174 |
#ifdef CONFIG_LINUX
|
175 |
static void sigbus_reraise(void) |
176 |
{ |
177 |
sigset_t set; |
178 |
struct sigaction action;
|
179 |
|
180 |
memset(&action, 0, sizeof(action)); |
181 |
action.sa_handler = SIG_DFL; |
182 |
if (!sigaction(SIGBUS, &action, NULL)) { |
183 |
raise(SIGBUS); |
184 |
sigemptyset(&set); |
185 |
sigaddset(&set, SIGBUS); |
186 |
sigprocmask(SIG_UNBLOCK, &set, NULL);
|
187 |
} |
188 |
perror("Failed to re-raise SIGBUS!\n");
|
189 |
abort(); |
190 |
} |
191 |
|
192 |
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, |
193 |
void *ctx)
|
194 |
{ |
195 |
if (kvm_on_sigbus(siginfo->ssi_code,
|
196 |
(void *)(intptr_t)siginfo->ssi_addr)) {
|
197 |
sigbus_reraise(); |
198 |
} |
199 |
} |
200 |
|
201 |
static void qemu_init_sigbus(void) |
202 |
{ |
203 |
struct sigaction action;
|
204 |
|
205 |
memset(&action, 0, sizeof(action)); |
206 |
action.sa_flags = SA_SIGINFO; |
207 |
action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; |
208 |
sigaction(SIGBUS, &action, NULL);
|
209 |
|
210 |
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); |
211 |
} |
212 |
|
213 |
#else /* !CONFIG_LINUX */ |
214 |
|
215 |
static void qemu_init_sigbus(void) |
216 |
{ |
217 |
} |
218 |
#endif /* !CONFIG_LINUX */ |
219 |
|
220 |
#ifndef _WIN32
|
221 |
static int io_thread_fd = -1; |
222 |
|
223 |
static void qemu_event_increment(void) |
224 |
{ |
225 |
/* Write 8 bytes to be compatible with eventfd. */
|
226 |
static const uint64_t val = 1; |
227 |
ssize_t ret; |
228 |
|
229 |
if (io_thread_fd == -1) { |
230 |
return;
|
231 |
} |
232 |
do {
|
233 |
ret = write(io_thread_fd, &val, sizeof(val));
|
234 |
} while (ret < 0 && errno == EINTR); |
235 |
|
236 |
/* EAGAIN is fine, a read must be pending. */
|
237 |
if (ret < 0 && errno != EAGAIN) { |
238 |
fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
239 |
strerror(errno)); |
240 |
exit (1);
|
241 |
} |
242 |
} |
243 |
|
244 |
static void qemu_event_read(void *opaque) |
245 |
{ |
246 |
int fd = (unsigned long)opaque; |
247 |
ssize_t len; |
248 |
char buffer[512]; |
249 |
|
250 |
/* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
251 |
do {
|
252 |
len = read(fd, buffer, sizeof(buffer));
|
253 |
} while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
254 |
} |
255 |
|
256 |
static int qemu_event_init(void) |
257 |
{ |
258 |
int err;
|
259 |
int fds[2]; |
260 |
|
261 |
err = qemu_eventfd(fds); |
262 |
if (err == -1) { |
263 |
return -errno;
|
264 |
} |
265 |
err = fcntl_setfl(fds[0], O_NONBLOCK);
|
266 |
if (err < 0) { |
267 |
goto fail;
|
268 |
} |
269 |
err = fcntl_setfl(fds[1], O_NONBLOCK);
|
270 |
if (err < 0) { |
271 |
goto fail;
|
272 |
} |
273 |
qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
274 |
(void *)(unsigned long)fds[0]); |
275 |
|
276 |
io_thread_fd = fds[1];
|
277 |
return 0; |
278 |
|
279 |
fail:
|
280 |
close(fds[0]);
|
281 |
close(fds[1]);
|
282 |
return err;
|
283 |
} |
284 |
|
285 |
static void dummy_signal(int sig) |
286 |
{ |
287 |
} |
288 |
|
289 |
/* If we have signalfd, we mask out the signals we want to handle and then
|
290 |
* use signalfd to listen for them. We rely on whatever the current signal
|
291 |
* handler is to dispatch the signals when we receive them.
|
292 |
*/
|
293 |
static void sigfd_handler(void *opaque) |
294 |
{ |
295 |
int fd = (unsigned long) opaque; |
296 |
struct qemu_signalfd_siginfo info;
|
297 |
struct sigaction action;
|
298 |
ssize_t len; |
299 |
|
300 |
while (1) { |
301 |
do {
|
302 |
len = read(fd, &info, sizeof(info));
|
303 |
} while (len == -1 && errno == EINTR); |
304 |
|
305 |
if (len == -1 && errno == EAGAIN) { |
306 |
break;
|
307 |
} |
308 |
|
309 |
if (len != sizeof(info)) { |
310 |
printf("read from sigfd returned %zd: %m\n", len);
|
311 |
return;
|
312 |
} |
313 |
|
314 |
sigaction(info.ssi_signo, NULL, &action);
|
315 |
if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
|
316 |
action.sa_sigaction(info.ssi_signo, |
317 |
(siginfo_t *)&info, NULL);
|
318 |
} else if (action.sa_handler) { |
319 |
action.sa_handler(info.ssi_signo); |
320 |
} |
321 |
} |
322 |
} |
323 |
|
324 |
static int qemu_signalfd_init(sigset_t mask) |
325 |
{ |
326 |
int sigfd;
|
327 |
|
328 |
sigfd = qemu_signalfd(&mask); |
329 |
if (sigfd == -1) { |
330 |
fprintf(stderr, "failed to create signalfd\n");
|
331 |
return -errno;
|
332 |
} |
333 |
|
334 |
fcntl_setfl(sigfd, O_NONBLOCK); |
335 |
|
336 |
qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, |
337 |
(void *)(unsigned long) sigfd); |
338 |
|
339 |
return 0; |
340 |
} |
341 |
|
342 |
static void qemu_kvm_eat_signals(CPUState *env) |
343 |
{ |
344 |
struct timespec ts = { 0, 0 }; |
345 |
siginfo_t siginfo; |
346 |
sigset_t waitset; |
347 |
sigset_t chkset; |
348 |
int r;
|
349 |
|
350 |
sigemptyset(&waitset); |
351 |
sigaddset(&waitset, SIG_IPI); |
352 |
sigaddset(&waitset, SIGBUS); |
353 |
|
354 |
do {
|
355 |
r = sigtimedwait(&waitset, &siginfo, &ts); |
356 |
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { |
357 |
perror("sigtimedwait");
|
358 |
exit(1);
|
359 |
} |
360 |
|
361 |
switch (r) {
|
362 |
case SIGBUS:
|
363 |
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
|
364 |
sigbus_reraise(); |
365 |
} |
366 |
break;
|
367 |
default:
|
368 |
break;
|
369 |
} |
370 |
|
371 |
r = sigpending(&chkset); |
372 |
if (r == -1) { |
373 |
perror("sigpending");
|
374 |
exit(1);
|
375 |
} |
376 |
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
|
377 |
|
378 |
#ifndef CONFIG_IOTHREAD
|
379 |
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
|
380 |
qemu_notify_event(); |
381 |
} |
382 |
#endif
|
383 |
} |
384 |
|
385 |
#else /* _WIN32 */ |
386 |
|
387 |
HANDLE qemu_event_handle; |
388 |
|
389 |
static void dummy_event_handler(void *opaque) |
390 |
{ |
391 |
} |
392 |
|
393 |
static int qemu_event_init(void) |
394 |
{ |
395 |
qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
396 |
if (!qemu_event_handle) {
|
397 |
fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
398 |
return -1; |
399 |
} |
400 |
qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
401 |
return 0; |
402 |
} |
403 |
|
404 |
static void qemu_event_increment(void) |
405 |
{ |
406 |
if (!SetEvent(qemu_event_handle)) {
|
407 |
fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
408 |
GetLastError()); |
409 |
exit (1);
|
410 |
} |
411 |
} |
412 |
|
413 |
static void qemu_kvm_eat_signals(CPUState *env) |
414 |
{ |
415 |
} |
416 |
#endif /* _WIN32 */ |
417 |
|
418 |
#ifndef CONFIG_IOTHREAD
|
419 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
420 |
{ |
421 |
#ifndef _WIN32
|
422 |
int r;
|
423 |
sigset_t set; |
424 |
struct sigaction sigact;
|
425 |
|
426 |
memset(&sigact, 0, sizeof(sigact)); |
427 |
sigact.sa_handler = dummy_signal; |
428 |
sigaction(SIG_IPI, &sigact, NULL);
|
429 |
|
430 |
sigemptyset(&set); |
431 |
sigaddset(&set, SIG_IPI); |
432 |
sigaddset(&set, SIGIO); |
433 |
sigaddset(&set, SIGALRM); |
434 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
435 |
|
436 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
437 |
sigdelset(&set, SIG_IPI); |
438 |
sigdelset(&set, SIGBUS); |
439 |
sigdelset(&set, SIGIO); |
440 |
sigdelset(&set, SIGALRM); |
441 |
r = kvm_set_signal_mask(env, &set); |
442 |
if (r) {
|
443 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
444 |
exit(1);
|
445 |
} |
446 |
#endif
|
447 |
} |
448 |
|
449 |
#ifndef _WIN32
|
450 |
static sigset_t block_synchronous_signals(void) |
451 |
{ |
452 |
sigset_t set; |
453 |
|
454 |
sigemptyset(&set); |
455 |
sigaddset(&set, SIGBUS); |
456 |
if (kvm_enabled()) {
|
457 |
/*
|
458 |
* We need to process timer signals synchronously to avoid a race
|
459 |
* between exit_request check and KVM vcpu entry.
|
460 |
*/
|
461 |
sigaddset(&set, SIGIO); |
462 |
sigaddset(&set, SIGALRM); |
463 |
} |
464 |
|
465 |
return set;
|
466 |
} |
467 |
#endif
|
468 |
|
469 |
int qemu_init_main_loop(void) |
470 |
{ |
471 |
#ifndef _WIN32
|
472 |
sigset_t blocked_signals; |
473 |
int ret;
|
474 |
|
475 |
blocked_signals = block_synchronous_signals(); |
476 |
|
477 |
ret = qemu_signalfd_init(blocked_signals); |
478 |
if (ret) {
|
479 |
return ret;
|
480 |
} |
481 |
#endif
|
482 |
cpu_set_debug_excp_handler(cpu_debug_handler); |
483 |
|
484 |
qemu_init_sigbus(); |
485 |
|
486 |
return qemu_event_init();
|
487 |
} |
488 |
|
489 |
void qemu_main_loop_start(void) |
490 |
{ |
491 |
} |
492 |
|
493 |
void qemu_init_vcpu(void *_env) |
494 |
{ |
495 |
CPUState *env = _env; |
496 |
int r;
|
497 |
|
498 |
env->nr_cores = smp_cores; |
499 |
env->nr_threads = smp_threads; |
500 |
|
501 |
if (kvm_enabled()) {
|
502 |
r = kvm_init_vcpu(env); |
503 |
if (r < 0) { |
504 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
505 |
exit(1);
|
506 |
} |
507 |
qemu_kvm_init_cpu_signals(env); |
508 |
} |
509 |
} |
510 |
|
511 |
int qemu_cpu_self(void *env) |
512 |
{ |
513 |
return 1; |
514 |
} |
515 |
|
516 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
517 |
{ |
518 |
func(data); |
519 |
} |
520 |
|
521 |
void resume_all_vcpus(void) |
522 |
{ |
523 |
} |
524 |
|
525 |
void pause_all_vcpus(void) |
526 |
{ |
527 |
} |
528 |
|
529 |
void qemu_cpu_kick(void *env) |
530 |
{ |
531 |
} |
532 |
|
533 |
void qemu_cpu_kick_self(void) |
534 |
{ |
535 |
#ifndef _WIN32
|
536 |
assert(cpu_single_env); |
537 |
|
538 |
raise(SIG_IPI); |
539 |
#else
|
540 |
abort(); |
541 |
#endif
|
542 |
} |
543 |
|
544 |
void qemu_notify_event(void) |
545 |
{ |
546 |
CPUState *env = cpu_single_env; |
547 |
|
548 |
qemu_event_increment (); |
549 |
if (env) {
|
550 |
cpu_exit(env); |
551 |
} |
552 |
if (next_cpu && env != next_cpu) {
|
553 |
cpu_exit(next_cpu); |
554 |
} |
555 |
exit_request = 1;
|
556 |
} |
557 |
|
558 |
void qemu_mutex_lock_iothread(void) {} |
559 |
void qemu_mutex_unlock_iothread(void) {} |
560 |
|
561 |
void cpu_stop_current(void) |
562 |
{ |
563 |
} |
564 |
|
565 |
void vm_stop(int reason) |
566 |
{ |
567 |
do_vm_stop(reason); |
568 |
} |
569 |
|
570 |
#else /* CONFIG_IOTHREAD */ |
571 |
|
572 |
#include "qemu-thread.h" |
573 |
|
574 |
QemuMutex qemu_global_mutex; |
575 |
static QemuMutex qemu_fair_mutex;
|
576 |
|
577 |
static QemuThread io_thread;
|
578 |
|
579 |
static QemuThread *tcg_cpu_thread;
|
580 |
static QemuCond *tcg_halt_cond;
|
581 |
|
582 |
static int qemu_system_ready; |
583 |
/* cpu creation */
|
584 |
static QemuCond qemu_cpu_cond;
|
585 |
/* system init */
|
586 |
static QemuCond qemu_system_cond;
|
587 |
static QemuCond qemu_pause_cond;
|
588 |
static QemuCond qemu_work_cond;
|
589 |
|
590 |
static void cpu_signal(int sig) |
591 |
{ |
592 |
if (cpu_single_env) {
|
593 |
cpu_exit(cpu_single_env); |
594 |
} |
595 |
exit_request = 1;
|
596 |
} |
597 |
|
598 |
static void qemu_kvm_init_cpu_signals(CPUState *env) |
599 |
{ |
600 |
int r;
|
601 |
sigset_t set; |
602 |
struct sigaction sigact;
|
603 |
|
604 |
memset(&sigact, 0, sizeof(sigact)); |
605 |
sigact.sa_handler = dummy_signal; |
606 |
sigaction(SIG_IPI, &sigact, NULL);
|
607 |
|
608 |
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
609 |
sigdelset(&set, SIG_IPI); |
610 |
sigdelset(&set, SIGBUS); |
611 |
r = kvm_set_signal_mask(env, &set); |
612 |
if (r) {
|
613 |
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
|
614 |
exit(1);
|
615 |
} |
616 |
} |
617 |
|
618 |
static void qemu_tcg_init_cpu_signals(void) |
619 |
{ |
620 |
sigset_t set; |
621 |
struct sigaction sigact;
|
622 |
|
623 |
memset(&sigact, 0, sizeof(sigact)); |
624 |
sigact.sa_handler = cpu_signal; |
625 |
sigaction(SIG_IPI, &sigact, NULL);
|
626 |
|
627 |
sigemptyset(&set); |
628 |
sigaddset(&set, SIG_IPI); |
629 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
630 |
} |
631 |
|
632 |
static sigset_t block_io_signals(void) |
633 |
{ |
634 |
sigset_t set; |
635 |
|
636 |
/* SIGUSR2 used by posix-aio-compat.c */
|
637 |
sigemptyset(&set); |
638 |
sigaddset(&set, SIGUSR2); |
639 |
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
640 |
|
641 |
sigemptyset(&set); |
642 |
sigaddset(&set, SIGIO); |
643 |
sigaddset(&set, SIGALRM); |
644 |
sigaddset(&set, SIG_IPI); |
645 |
sigaddset(&set, SIGBUS); |
646 |
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
647 |
|
648 |
return set;
|
649 |
} |
650 |
|
651 |
int qemu_init_main_loop(void) |
652 |
{ |
653 |
int ret;
|
654 |
sigset_t blocked_signals; |
655 |
|
656 |
cpu_set_debug_excp_handler(cpu_debug_handler); |
657 |
|
658 |
qemu_init_sigbus(); |
659 |
|
660 |
blocked_signals = block_io_signals(); |
661 |
|
662 |
ret = qemu_signalfd_init(blocked_signals); |
663 |
if (ret) {
|
664 |
return ret;
|
665 |
} |
666 |
|
667 |
/* Note eventfd must be drained before signalfd handlers run */
|
668 |
ret = qemu_event_init(); |
669 |
if (ret) {
|
670 |
return ret;
|
671 |
} |
672 |
|
673 |
qemu_cond_init(&qemu_pause_cond); |
674 |
qemu_cond_init(&qemu_system_cond); |
675 |
qemu_mutex_init(&qemu_fair_mutex); |
676 |
qemu_mutex_init(&qemu_global_mutex); |
677 |
qemu_mutex_lock(&qemu_global_mutex); |
678 |
|
679 |
qemu_thread_self(&io_thread); |
680 |
|
681 |
return 0; |
682 |
} |
683 |
|
684 |
void qemu_main_loop_start(void) |
685 |
{ |
686 |
qemu_system_ready = 1;
|
687 |
qemu_cond_broadcast(&qemu_system_cond); |
688 |
} |
689 |
|
690 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
691 |
{ |
692 |
struct qemu_work_item wi;
|
693 |
|
694 |
if (qemu_cpu_self(env)) {
|
695 |
func(data); |
696 |
return;
|
697 |
} |
698 |
|
699 |
wi.func = func; |
700 |
wi.data = data; |
701 |
if (!env->queued_work_first) {
|
702 |
env->queued_work_first = &wi; |
703 |
} else {
|
704 |
env->queued_work_last->next = &wi; |
705 |
} |
706 |
env->queued_work_last = &wi; |
707 |
wi.next = NULL;
|
708 |
wi.done = false;
|
709 |
|
710 |
qemu_cpu_kick(env); |
711 |
while (!wi.done) {
|
712 |
CPUState *self_env = cpu_single_env; |
713 |
|
714 |
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
715 |
cpu_single_env = self_env; |
716 |
} |
717 |
} |
718 |
|
719 |
static void flush_queued_work(CPUState *env) |
720 |
{ |
721 |
struct qemu_work_item *wi;
|
722 |
|
723 |
if (!env->queued_work_first) {
|
724 |
return;
|
725 |
} |
726 |
|
727 |
while ((wi = env->queued_work_first)) {
|
728 |
env->queued_work_first = wi->next; |
729 |
wi->func(wi->data); |
730 |
wi->done = true;
|
731 |
} |
732 |
env->queued_work_last = NULL;
|
733 |
qemu_cond_broadcast(&qemu_work_cond); |
734 |
} |
735 |
|
736 |
static void qemu_wait_io_event_common(CPUState *env) |
737 |
{ |
738 |
if (env->stop) {
|
739 |
env->stop = 0;
|
740 |
env->stopped = 1;
|
741 |
qemu_cond_signal(&qemu_pause_cond); |
742 |
} |
743 |
flush_queued_work(env); |
744 |
env->thread_kicked = false;
|
745 |
} |
746 |
|
747 |
static void qemu_tcg_wait_io_event(void) |
748 |
{ |
749 |
CPUState *env; |
750 |
|
751 |
while (all_cpu_threads_idle()) {
|
752 |
qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000);
|
753 |
} |
754 |
|
755 |
qemu_mutex_unlock(&qemu_global_mutex); |
756 |
|
757 |
/*
|
758 |
* Users of qemu_global_mutex can be starved, having no chance
|
759 |
* to acquire it since this path will get to it first.
|
760 |
* So use another lock to provide fairness.
|
761 |
*/
|
762 |
qemu_mutex_lock(&qemu_fair_mutex); |
763 |
qemu_mutex_unlock(&qemu_fair_mutex); |
764 |
|
765 |
qemu_mutex_lock(&qemu_global_mutex); |
766 |
|
767 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
768 |
qemu_wait_io_event_common(env); |
769 |
} |
770 |
} |
771 |
|
772 |
static void qemu_kvm_wait_io_event(CPUState *env) |
773 |
{ |
774 |
while (cpu_thread_is_idle(env)) {
|
775 |
qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
776 |
} |
777 |
|
778 |
qemu_kvm_eat_signals(env); |
779 |
qemu_wait_io_event_common(env); |
780 |
} |
781 |
|
782 |
static int qemu_cpu_exec(CPUState *env); |
783 |
|
784 |
static void *qemu_kvm_cpu_thread_fn(void *arg) |
785 |
{ |
786 |
CPUState *env = arg; |
787 |
int r;
|
788 |
|
789 |
qemu_mutex_lock(&qemu_global_mutex); |
790 |
qemu_thread_self(env->thread); |
791 |
|
792 |
r = kvm_init_vcpu(env); |
793 |
if (r < 0) { |
794 |
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
795 |
exit(1);
|
796 |
} |
797 |
|
798 |
qemu_kvm_init_cpu_signals(env); |
799 |
|
800 |
/* signal CPU creation */
|
801 |
env->created = 1;
|
802 |
qemu_cond_signal(&qemu_cpu_cond); |
803 |
|
804 |
/* and wait for machine initialization */
|
805 |
while (!qemu_system_ready) {
|
806 |
qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
807 |
} |
808 |
|
809 |
while (1) { |
810 |
if (cpu_can_run(env)) {
|
811 |
qemu_cpu_exec(env); |
812 |
} |
813 |
qemu_kvm_wait_io_event(env); |
814 |
} |
815 |
|
816 |
return NULL; |
817 |
} |
818 |
|
819 |
static void *qemu_tcg_cpu_thread_fn(void *arg) |
820 |
{ |
821 |
CPUState *env = arg; |
822 |
|
823 |
qemu_tcg_init_cpu_signals(); |
824 |
qemu_thread_self(env->thread); |
825 |
|
826 |
/* signal CPU creation */
|
827 |
qemu_mutex_lock(&qemu_global_mutex); |
828 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
829 |
env->created = 1;
|
830 |
} |
831 |
qemu_cond_signal(&qemu_cpu_cond); |
832 |
|
833 |
/* and wait for machine initialization */
|
834 |
while (!qemu_system_ready) {
|
835 |
qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
836 |
} |
837 |
|
838 |
while (1) { |
839 |
cpu_exec_all(); |
840 |
qemu_tcg_wait_io_event(); |
841 |
} |
842 |
|
843 |
return NULL; |
844 |
} |
845 |
|
846 |
void qemu_cpu_kick(void *_env) |
847 |
{ |
848 |
CPUState *env = _env; |
849 |
|
850 |
qemu_cond_broadcast(env->halt_cond); |
851 |
if (!env->thread_kicked) {
|
852 |
qemu_thread_signal(env->thread, SIG_IPI); |
853 |
env->thread_kicked = true;
|
854 |
} |
855 |
} |
856 |
|
857 |
void qemu_cpu_kick_self(void) |
858 |
{ |
859 |
assert(cpu_single_env); |
860 |
|
861 |
if (!cpu_single_env->thread_kicked) {
|
862 |
qemu_thread_signal(cpu_single_env->thread, SIG_IPI); |
863 |
cpu_single_env->thread_kicked = true;
|
864 |
} |
865 |
} |
866 |
|
867 |
int qemu_cpu_self(void *_env) |
868 |
{ |
869 |
CPUState *env = _env; |
870 |
QemuThread this; |
871 |
|
872 |
qemu_thread_self(&this); |
873 |
|
874 |
return qemu_thread_equal(&this, env->thread);
|
875 |
} |
876 |
|
877 |
void qemu_mutex_lock_iothread(void) |
878 |
{ |
879 |
if (kvm_enabled()) {
|
880 |
qemu_mutex_lock(&qemu_global_mutex); |
881 |
} else {
|
882 |
qemu_mutex_lock(&qemu_fair_mutex); |
883 |
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
884 |
qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
885 |
qemu_mutex_lock(&qemu_global_mutex); |
886 |
} |
887 |
qemu_mutex_unlock(&qemu_fair_mutex); |
888 |
} |
889 |
} |
890 |
|
891 |
void qemu_mutex_unlock_iothread(void) |
892 |
{ |
893 |
qemu_mutex_unlock(&qemu_global_mutex); |
894 |
} |
895 |
|
896 |
static int all_vcpus_paused(void) |
897 |
{ |
898 |
CPUState *penv = first_cpu; |
899 |
|
900 |
while (penv) {
|
901 |
if (!penv->stopped) {
|
902 |
return 0; |
903 |
} |
904 |
penv = (CPUState *)penv->next_cpu; |
905 |
} |
906 |
|
907 |
return 1; |
908 |
} |
909 |
|
910 |
void pause_all_vcpus(void) |
911 |
{ |
912 |
CPUState *penv = first_cpu; |
913 |
|
914 |
while (penv) {
|
915 |
penv->stop = 1;
|
916 |
qemu_cpu_kick(penv); |
917 |
penv = (CPUState *)penv->next_cpu; |
918 |
} |
919 |
|
920 |
while (!all_vcpus_paused()) {
|
921 |
qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
922 |
penv = first_cpu; |
923 |
while (penv) {
|
924 |
qemu_cpu_kick(penv); |
925 |
penv = (CPUState *)penv->next_cpu; |
926 |
} |
927 |
} |
928 |
} |
929 |
|
930 |
void resume_all_vcpus(void) |
931 |
{ |
932 |
CPUState *penv = first_cpu; |
933 |
|
934 |
while (penv) {
|
935 |
penv->stop = 0;
|
936 |
penv->stopped = 0;
|
937 |
qemu_cpu_kick(penv); |
938 |
penv = (CPUState *)penv->next_cpu; |
939 |
} |
940 |
} |
941 |
|
942 |
static void qemu_tcg_init_vcpu(void *_env) |
943 |
{ |
944 |
CPUState *env = _env; |
945 |
|
946 |
/* share a single thread for all cpus with TCG */
|
947 |
if (!tcg_cpu_thread) {
|
948 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
949 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
950 |
qemu_cond_init(env->halt_cond); |
951 |
qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
952 |
while (env->created == 0) { |
953 |
qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
954 |
} |
955 |
tcg_cpu_thread = env->thread; |
956 |
tcg_halt_cond = env->halt_cond; |
957 |
} else {
|
958 |
env->thread = tcg_cpu_thread; |
959 |
env->halt_cond = tcg_halt_cond; |
960 |
} |
961 |
} |
962 |
|
963 |
static void qemu_kvm_start_vcpu(CPUState *env) |
964 |
{ |
965 |
env->thread = qemu_mallocz(sizeof(QemuThread));
|
966 |
env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
967 |
qemu_cond_init(env->halt_cond); |
968 |
qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
969 |
while (env->created == 0) { |
970 |
qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
971 |
} |
972 |
} |
973 |
|
974 |
void qemu_init_vcpu(void *_env) |
975 |
{ |
976 |
CPUState *env = _env; |
977 |
|
978 |
env->nr_cores = smp_cores; |
979 |
env->nr_threads = smp_threads; |
980 |
if (kvm_enabled()) {
|
981 |
qemu_kvm_start_vcpu(env); |
982 |
} else {
|
983 |
qemu_tcg_init_vcpu(env); |
984 |
} |
985 |
} |
986 |
|
987 |
void qemu_notify_event(void) |
988 |
{ |
989 |
qemu_event_increment(); |
990 |
} |
991 |
|
992 |
void cpu_stop_current(void) |
993 |
{ |
994 |
if (cpu_single_env) {
|
995 |
cpu_single_env->stopped = 1;
|
996 |
cpu_exit(cpu_single_env); |
997 |
} |
998 |
} |
999 |
|
1000 |
void vm_stop(int reason) |
1001 |
{ |
1002 |
QemuThread me; |
1003 |
qemu_thread_self(&me); |
1004 |
|
1005 |
if (!qemu_thread_equal(&me, &io_thread)) {
|
1006 |
qemu_system_vmstop_request(reason); |
1007 |
/*
|
1008 |
* FIXME: should not return to device code in case
|
1009 |
* vm_stop() has been requested.
|
1010 |
*/
|
1011 |
cpu_stop_current(); |
1012 |
return;
|
1013 |
} |
1014 |
do_vm_stop(reason); |
1015 |
} |
1016 |
|
1017 |
#endif
|
1018 |
|
1019 |
static int qemu_cpu_exec(CPUState *env) |
1020 |
{ |
1021 |
int ret;
|
1022 |
#ifdef CONFIG_PROFILER
|
1023 |
int64_t ti; |
1024 |
#endif
|
1025 |
|
1026 |
#ifdef CONFIG_PROFILER
|
1027 |
ti = profile_getclock(); |
1028 |
#endif
|
1029 |
if (use_icount) {
|
1030 |
int64_t count; |
1031 |
int decr;
|
1032 |
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
1033 |
env->icount_decr.u16.low = 0;
|
1034 |
env->icount_extra = 0;
|
1035 |
count = qemu_icount_round (qemu_next_deadline()); |
1036 |
qemu_icount += count; |
1037 |
decr = (count > 0xffff) ? 0xffff : count; |
1038 |
count -= decr; |
1039 |
env->icount_decr.u16.low = decr; |
1040 |
env->icount_extra = count; |
1041 |
} |
1042 |
ret = cpu_exec(env); |
1043 |
#ifdef CONFIG_PROFILER
|
1044 |
qemu_time += profile_getclock() - ti; |
1045 |
#endif
|
1046 |
if (use_icount) {
|
1047 |
/* Fold pending instructions back into the
|
1048 |
instruction counter, and clear the interrupt flag. */
|
1049 |
qemu_icount -= (env->icount_decr.u16.low |
1050 |
+ env->icount_extra); |
1051 |
env->icount_decr.u32 = 0;
|
1052 |
env->icount_extra = 0;
|
1053 |
} |
1054 |
return ret;
|
1055 |
} |
1056 |
|
1057 |
bool cpu_exec_all(void) |
1058 |
{ |
1059 |
int r;
|
1060 |
|
1061 |
if (next_cpu == NULL) { |
1062 |
next_cpu = first_cpu; |
1063 |
} |
1064 |
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
1065 |
CPUState *env = next_cpu; |
1066 |
|
1067 |
qemu_clock_enable(vm_clock, |
1068 |
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
1069 |
|
1070 |
if (qemu_alarm_pending()) {
|
1071 |
break;
|
1072 |
} |
1073 |
if (cpu_can_run(env)) {
|
1074 |
r = qemu_cpu_exec(env); |
1075 |
if (kvm_enabled()) {
|
1076 |
qemu_kvm_eat_signals(env); |
1077 |
} |
1078 |
if (r == EXCP_DEBUG) {
|
1079 |
break;
|
1080 |
} |
1081 |
} else if (env->stop) { |
1082 |
break;
|
1083 |
} |
1084 |
} |
1085 |
exit_request = 0;
|
1086 |
return !all_cpu_threads_idle();
|
1087 |
} |
1088 |
|
1089 |
void set_numa_modes(void) |
1090 |
{ |
1091 |
CPUState *env; |
1092 |
int i;
|
1093 |
|
1094 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1095 |
for (i = 0; i < nb_numa_nodes; i++) { |
1096 |
if (node_cpumask[i] & (1 << env->cpu_index)) { |
1097 |
env->numa_node = i; |
1098 |
} |
1099 |
} |
1100 |
} |
1101 |
} |
1102 |
|
1103 |
void set_cpu_log(const char *optarg) |
1104 |
{ |
1105 |
int mask;
|
1106 |
const CPULogItem *item;
|
1107 |
|
1108 |
mask = cpu_str_to_log_mask(optarg); |
1109 |
if (!mask) {
|
1110 |
printf("Log items (comma separated):\n");
|
1111 |
for (item = cpu_log_items; item->mask != 0; item++) { |
1112 |
printf("%-10s %s\n", item->name, item->help);
|
1113 |
} |
1114 |
exit(1);
|
1115 |
} |
1116 |
cpu_set_log(mask); |
1117 |
} |
1118 |
|
1119 |
/* Return the virtual CPU time, based on the instruction counter. */
|
1120 |
int64_t cpu_get_icount(void)
|
1121 |
{ |
1122 |
int64_t icount; |
1123 |
CPUState *env = cpu_single_env;; |
1124 |
|
1125 |
icount = qemu_icount; |
1126 |
if (env) {
|
1127 |
if (!can_do_io(env)) {
|
1128 |
fprintf(stderr, "Bad clock read\n");
|
1129 |
} |
1130 |
icount -= (env->icount_decr.u16.low + env->icount_extra); |
1131 |
} |
1132 |
return qemu_icount_bias + (icount << icount_time_shift);
|
1133 |
} |
1134 |
|
1135 |
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
1136 |
{ |
1137 |
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
1138 |
#if defined(cpu_list_id)
|
1139 |
cpu_list_id(f, cpu_fprintf, optarg); |
1140 |
#elif defined(cpu_list)
|
1141 |
cpu_list(f, cpu_fprintf); /* deprecated */
|
1142 |
#endif
|
1143 |
} |