Revision 296af7c9

b/Makefile.target
161 161
# System emulator target
162 162
ifdef CONFIG_SOFTMMU
163 163

  
164
obj-y = vl.o monitor.o machine.o gdbstub.o
164
obj-y = vl.o cpus.o monitor.o machine.o gdbstub.o
165 165
obj-y += qemu-timer.o
166 166
# virtio has to be here due to weird dependency between PCI and virtio-net.
167 167
# need to fix this properly
b/cpus.c
1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

  
25
/* Needed early for CONFIG_BSD etc. */
26
#include "config-host.h"
27

  
28
#include "monitor.h"
29
#include "sysemu.h"
30
#include "gdbstub.h"
31
#include "dma.h"
32
#include "kvm.h"
33

  
34
#include "cpus.h"
35

  
36
static CPUState *cur_cpu;
37
static CPUState *next_cpu;
38

  
39
/***********************************************************/
40
void hw_error(const char *fmt, ...)
41
{
42
    va_list ap;
43
    CPUState *env;
44

  
45
    va_start(ap, fmt);
46
    fprintf(stderr, "qemu: hardware error: ");
47
    vfprintf(stderr, fmt, ap);
48
    fprintf(stderr, "\n");
49
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
50
        fprintf(stderr, "CPU #%d:\n", env->cpu_index);
51
#ifdef TARGET_I386
52
        cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
53
#else
54
        cpu_dump_state(env, stderr, fprintf, 0);
55
#endif
56
    }
57
    va_end(ap);
58
    abort();
59
}
60

  
61
void cpu_synchronize_all_states(void)
62
{
63
    CPUState *cpu;
64

  
65
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
66
        cpu_synchronize_state(cpu);
67
    }
68
}
69

  
70
void cpu_synchronize_all_post_reset(void)
71
{
72
    CPUState *cpu;
73

  
74
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
75
        cpu_synchronize_post_reset(cpu);
76
    }
77
}
78

  
79
void cpu_synchronize_all_post_init(void)
80
{
81
    CPUState *cpu;
82

  
83
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
84
        cpu_synchronize_post_init(cpu);
85
    }
86
}
87

  
88
static void do_vm_stop(int reason)
89
{
90
    if (vm_running) {
91
        cpu_disable_ticks();
92
        vm_running = 0;
93
        pause_all_vcpus();
94
        vm_state_notify(0, reason);
95
        monitor_protocol_event(QEVENT_STOP, NULL);
96
    }
97
}
98

  
99
static int cpu_can_run(CPUState *env)
100
{
101
    if (env->stop)
102
        return 0;
103
    if (env->stopped)
104
        return 0;
105
    if (!vm_running)
106
        return 0;
107
    return 1;
108
}
109

  
110
static int cpu_has_work(CPUState *env)
111
{
112
    if (env->stop)
113
        return 1;
114
    if (env->stopped)
115
        return 0;
116
    if (!env->halted)
117
        return 1;
118
    if (qemu_cpu_has_work(env))
119
        return 1;
120
    return 0;
121
}
122

  
123
static int tcg_has_work(void)
124
{
125
    CPUState *env;
126

  
127
    for (env = first_cpu; env != NULL; env = env->next_cpu)
128
        if (cpu_has_work(env))
129
            return 1;
130
    return 0;
131
}
132

  
133
#ifndef _WIN32
134
static int io_thread_fd = -1;
135

  
136
static void qemu_event_increment(void)
137
{
138
    /* Write 8 bytes to be compatible with eventfd.  */
139
    static uint64_t val = 1;
140
    ssize_t ret;
141

  
142
    if (io_thread_fd == -1)
143
        return;
144

  
145
    do {
146
        ret = write(io_thread_fd, &val, sizeof(val));
147
    } while (ret < 0 && errno == EINTR);
148

  
149
    /* EAGAIN is fine, a read must be pending.  */
150
    if (ret < 0 && errno != EAGAIN) {
151
        fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
152
                strerror(errno));
153
        exit (1);
154
    }
155
}
156

  
157
static void qemu_event_read(void *opaque)
158
{
159
    int fd = (unsigned long)opaque;
160
    ssize_t len;
161
    char buffer[512];
162

  
163
    /* Drain the notify pipe.  For eventfd, only 8 bytes will be read.  */
164
    do {
165
        len = read(fd, buffer, sizeof(buffer));
166
    } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
167
}
168

  
169
static int qemu_event_init(void)
170
{
171
    int err;
172
    int fds[2];
173

  
174
    err = qemu_eventfd(fds);
175
    if (err == -1)
176
        return -errno;
177

  
178
    err = fcntl_setfl(fds[0], O_NONBLOCK);
179
    if (err < 0)
180
        goto fail;
181

  
182
    err = fcntl_setfl(fds[1], O_NONBLOCK);
183
    if (err < 0)
184
        goto fail;
185

  
186
    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
187
                         (void *)(unsigned long)fds[0]);
188

  
189
    io_thread_fd = fds[1];
190
    return 0;
191

  
192
fail:
193
    close(fds[0]);
194
    close(fds[1]);
195
    return err;
196
}
197
#else
198
HANDLE qemu_event_handle;
199

  
200
static void dummy_event_handler(void *opaque)
201
{
202
}
203

  
204
static int qemu_event_init(void)
205
{
206
    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
207
    if (!qemu_event_handle) {
208
        fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
209
        return -1;
210
    }
211
    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
212
    return 0;
213
}
214

  
215
static void qemu_event_increment(void)
216
{
217
    if (!SetEvent(qemu_event_handle)) {
218
        fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
219
                GetLastError());
220
        exit (1);
221
    }
222
}
223
#endif
224

  
225
#ifndef CONFIG_IOTHREAD
226
int qemu_init_main_loop(void)
227
{
228
    return qemu_event_init();
229
}
230

  
231
void qemu_init_vcpu(void *_env)
232
{
233
    CPUState *env = _env;
234

  
235
    env->nr_cores = smp_cores;
236
    env->nr_threads = smp_threads;
237
    if (kvm_enabled())
238
        kvm_init_vcpu(env);
239
    return;
240
}
241

  
242
int qemu_cpu_self(void *env)
243
{
244
    return 1;
245
}
246

  
247
void resume_all_vcpus(void)
248
{
249
}
250

  
251
void pause_all_vcpus(void)
252
{
253
}
254

  
255
void qemu_cpu_kick(void *env)
256
{
257
    return;
258
}
259

  
260
void qemu_notify_event(void)
261
{
262
    CPUState *env = cpu_single_env;
263

  
264
    qemu_event_increment ();
265
    if (env) {
266
        cpu_exit(env);
267
    }
268
    if (next_cpu && env != next_cpu) {
269
        cpu_exit(next_cpu);
270
    }
271
}
272

  
273
void qemu_mutex_lock_iothread(void) {}
274
void qemu_mutex_unlock_iothread(void) {}
275

  
276
void vm_stop(int reason)
277
{
278
    do_vm_stop(reason);
279
}
280

  
281
#else /* CONFIG_IOTHREAD */
282

  
283
#include "qemu-thread.h"
284

  
285
QemuMutex qemu_global_mutex;
286
static QemuMutex qemu_fair_mutex;
287

  
288
static QemuThread io_thread;
289

  
290
static QemuThread *tcg_cpu_thread;
291
static QemuCond *tcg_halt_cond;
292

  
293
static int qemu_system_ready;
294
/* cpu creation */
295
static QemuCond qemu_cpu_cond;
296
/* system init */
297
static QemuCond qemu_system_cond;
298
static QemuCond qemu_pause_cond;
299

  
300
static void tcg_block_io_signals(void);
301
static void kvm_block_io_signals(CPUState *env);
302
static void unblock_io_signals(void);
303

  
304
int qemu_init_main_loop(void)
305
{
306
    int ret;
307

  
308
    ret = qemu_event_init();
309
    if (ret)
310
        return ret;
311

  
312
    qemu_cond_init(&qemu_pause_cond);
313
    qemu_mutex_init(&qemu_fair_mutex);
314
    qemu_mutex_init(&qemu_global_mutex);
315
    qemu_mutex_lock(&qemu_global_mutex);
316

  
317
    unblock_io_signals();
318
    qemu_thread_self(&io_thread);
319

  
320
    return 0;
321
}
322

  
323
static void qemu_wait_io_event_common(CPUState *env)
324
{
325
    if (env->stop) {
326
        env->stop = 0;
327
        env->stopped = 1;
328
        qemu_cond_signal(&qemu_pause_cond);
329
    }
330
}
331

  
332
static void qemu_wait_io_event(CPUState *env)
333
{
334
    while (!tcg_has_work())
335
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
336

  
337
    qemu_mutex_unlock(&qemu_global_mutex);
338

  
339
    /*
340
     * Users of qemu_global_mutex can be starved, having no chance
341
     * to acquire it since this path will get to it first.
342
     * So use another lock to provide fairness.
343
     */
344
    qemu_mutex_lock(&qemu_fair_mutex);
345
    qemu_mutex_unlock(&qemu_fair_mutex);
346

  
347
    qemu_mutex_lock(&qemu_global_mutex);
348
    qemu_wait_io_event_common(env);
349
}
350

  
351
static void qemu_kvm_eat_signal(CPUState *env, int timeout)
352
{
353
    struct timespec ts;
354
    int r, e;
355
    siginfo_t siginfo;
356
    sigset_t waitset;
357

  
358
    ts.tv_sec = timeout / 1000;
359
    ts.tv_nsec = (timeout % 1000) * 1000000;
360

  
361
    sigemptyset(&waitset);
362
    sigaddset(&waitset, SIG_IPI);
363

  
364
    qemu_mutex_unlock(&qemu_global_mutex);
365
    r = sigtimedwait(&waitset, &siginfo, &ts);
366
    e = errno;
367
    qemu_mutex_lock(&qemu_global_mutex);
368

  
369
    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
370
        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
371
        exit(1);
372
    }
373
}
374

  
375
static void qemu_kvm_wait_io_event(CPUState *env)
376
{
377
    while (!cpu_has_work(env))
378
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
379

  
380
    qemu_kvm_eat_signal(env, 0);
381
    qemu_wait_io_event_common(env);
382
}
383

  
384
static int qemu_cpu_exec(CPUState *env);
385

  
386
static void *kvm_cpu_thread_fn(void *arg)
387
{
388
    CPUState *env = arg;
389

  
390
    qemu_thread_self(env->thread);
391
    if (kvm_enabled())
392
        kvm_init_vcpu(env);
393

  
394
    kvm_block_io_signals(env);
395

  
396
    /* signal CPU creation */
397
    qemu_mutex_lock(&qemu_global_mutex);
398
    env->created = 1;
399
    qemu_cond_signal(&qemu_cpu_cond);
400

  
401
    /* and wait for machine initialization */
402
    while (!qemu_system_ready)
403
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
404

  
405
    while (1) {
406
        if (cpu_can_run(env))
407
            qemu_cpu_exec(env);
408
        qemu_kvm_wait_io_event(env);
409
    }
410

  
411
    return NULL;
412
}
413

  
414
static void *tcg_cpu_thread_fn(void *arg)
415
{
416
    CPUState *env = arg;
417

  
418
    tcg_block_io_signals();
419
    qemu_thread_self(env->thread);
420

  
421
    /* signal CPU creation */
422
    qemu_mutex_lock(&qemu_global_mutex);
423
    for (env = first_cpu; env != NULL; env = env->next_cpu)
424
        env->created = 1;
425
    qemu_cond_signal(&qemu_cpu_cond);
426

  
427
    /* and wait for machine initialization */
428
    while (!qemu_system_ready)
429
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
430

  
431
    while (1) {
432
        tcg_cpu_exec();
433
        qemu_wait_io_event(cur_cpu);
434
    }
435

  
436
    return NULL;
437
}
438

  
439
void qemu_cpu_kick(void *_env)
440
{
441
    CPUState *env = _env;
442
    qemu_cond_broadcast(env->halt_cond);
443
    if (kvm_enabled())
444
        qemu_thread_signal(env->thread, SIG_IPI);
445
}
446

  
447
int qemu_cpu_self(void *_env)
448
{
449
    CPUState *env = _env;
450
    QemuThread this;
451

  
452
    qemu_thread_self(&this);
453

  
454
    return qemu_thread_equal(&this, env->thread);
455
}
456

  
457
static void cpu_signal(int sig)
458
{
459
    if (cpu_single_env)
460
        cpu_exit(cpu_single_env);
461
}
462

  
463
static void tcg_block_io_signals(void)
464
{
465
    sigset_t set;
466
    struct sigaction sigact;
467

  
468
    sigemptyset(&set);
469
    sigaddset(&set, SIGUSR2);
470
    sigaddset(&set, SIGIO);
471
    sigaddset(&set, SIGALRM);
472
    sigaddset(&set, SIGCHLD);
473
    pthread_sigmask(SIG_BLOCK, &set, NULL);
474

  
475
    sigemptyset(&set);
476
    sigaddset(&set, SIG_IPI);
477
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
478

  
479
    memset(&sigact, 0, sizeof(sigact));
480
    sigact.sa_handler = cpu_signal;
481
    sigaction(SIG_IPI, &sigact, NULL);
482
}
483

  
484
static void dummy_signal(int sig)
485
{
486
}
487

  
488
static void kvm_block_io_signals(CPUState *env)
489
{
490
    int r;
491
    sigset_t set;
492
    struct sigaction sigact;
493

  
494
    sigemptyset(&set);
495
    sigaddset(&set, SIGUSR2);
496
    sigaddset(&set, SIGIO);
497
    sigaddset(&set, SIGALRM);
498
    sigaddset(&set, SIGCHLD);
499
    sigaddset(&set, SIG_IPI);
500
    pthread_sigmask(SIG_BLOCK, &set, NULL);
501

  
502
    pthread_sigmask(SIG_BLOCK, NULL, &set);
503
    sigdelset(&set, SIG_IPI);
504

  
505
    memset(&sigact, 0, sizeof(sigact));
506
    sigact.sa_handler = dummy_signal;
507
    sigaction(SIG_IPI, &sigact, NULL);
508

  
509
    r = kvm_set_signal_mask(env, &set);
510
    if (r) {
511
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
512
        exit(1);
513
    }
514
}
515

  
516
static void unblock_io_signals(void)
517
{
518
    sigset_t set;
519

  
520
    sigemptyset(&set);
521
    sigaddset(&set, SIGUSR2);
522
    sigaddset(&set, SIGIO);
523
    sigaddset(&set, SIGALRM);
524
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
525

  
526
    sigemptyset(&set);
527
    sigaddset(&set, SIG_IPI);
528
    pthread_sigmask(SIG_BLOCK, &set, NULL);
529
}
530

  
531
static void qemu_signal_lock(unsigned int msecs)
532
{
533
    qemu_mutex_lock(&qemu_fair_mutex);
534

  
535
    while (qemu_mutex_trylock(&qemu_global_mutex)) {
536
        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
537
        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
538
            break;
539
    }
540
    qemu_mutex_unlock(&qemu_fair_mutex);
541
}
542

  
543
void qemu_mutex_lock_iothread(void)
544
{
545
    if (kvm_enabled()) {
546
        qemu_mutex_lock(&qemu_fair_mutex);
547
        qemu_mutex_lock(&qemu_global_mutex);
548
        qemu_mutex_unlock(&qemu_fair_mutex);
549
    } else
550
        qemu_signal_lock(100);
551
}
552

  
553
void qemu_mutex_unlock_iothread(void)
554
{
555
    qemu_mutex_unlock(&qemu_global_mutex);
556
}
557

  
558
static int all_vcpus_paused(void)
559
{
560
    CPUState *penv = first_cpu;
561

  
562
    while (penv) {
563
        if (!penv->stopped)
564
            return 0;
565
        penv = (CPUState *)penv->next_cpu;
566
    }
567

  
568
    return 1;
569
}
570

  
571
void pause_all_vcpus(void)
572
{
573
    CPUState *penv = first_cpu;
574

  
575
    while (penv) {
576
        penv->stop = 1;
577
        qemu_thread_signal(penv->thread, SIG_IPI);
578
        qemu_cpu_kick(penv);
579
        penv = (CPUState *)penv->next_cpu;
580
    }
581

  
582
    while (!all_vcpus_paused()) {
583
        qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
584
        penv = first_cpu;
585
        while (penv) {
586
            qemu_thread_signal(penv->thread, SIG_IPI);
587
            penv = (CPUState *)penv->next_cpu;
588
        }
589
    }
590
}
591

  
592
void resume_all_vcpus(void)
593
{
594
    CPUState *penv = first_cpu;
595

  
596
    while (penv) {
597
        penv->stop = 0;
598
        penv->stopped = 0;
599
        qemu_thread_signal(penv->thread, SIG_IPI);
600
        qemu_cpu_kick(penv);
601
        penv = (CPUState *)penv->next_cpu;
602
    }
603
}
604

  
605
static void tcg_init_vcpu(void *_env)
606
{
607
    CPUState *env = _env;
608
    /* share a single thread for all cpus with TCG */
609
    if (!tcg_cpu_thread) {
610
        env->thread = qemu_mallocz(sizeof(QemuThread));
611
        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
612
        qemu_cond_init(env->halt_cond);
613
        qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
614
        while (env->created == 0)
615
            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
616
        tcg_cpu_thread = env->thread;
617
        tcg_halt_cond = env->halt_cond;
618
    } else {
619
        env->thread = tcg_cpu_thread;
620
        env->halt_cond = tcg_halt_cond;
621
    }
622
}
623

  
624
static void kvm_start_vcpu(CPUState *env)
625
{
626
    env->thread = qemu_mallocz(sizeof(QemuThread));
627
    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
628
    qemu_cond_init(env->halt_cond);
629
    qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
630
    while (env->created == 0)
631
        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
632
}
633

  
634
void qemu_init_vcpu(void *_env)
635
{
636
    CPUState *env = _env;
637

  
638
    env->nr_cores = smp_cores;
639
    env->nr_threads = smp_threads;
640
    if (kvm_enabled())
641
        kvm_start_vcpu(env);
642
    else
643
        tcg_init_vcpu(env);
644
}
645

  
646
void qemu_notify_event(void)
647
{
648
    qemu_event_increment();
649
}
650

  
651
static void qemu_system_vmstop_request(int reason)
652
{
653
    vmstop_requested = reason;
654
    qemu_notify_event();
655
}
656

  
657
void vm_stop(int reason)
658
{
659
    QemuThread me;
660
    qemu_thread_self(&me);
661

  
662
    if (!qemu_thread_equal(&me, &io_thread)) {
663
        qemu_system_vmstop_request(reason);
664
        /*
665
         * FIXME: should not return to device code in case
666
         * vm_stop() has been requested.
667
         */
668
        if (cpu_single_env) {
669
            cpu_exit(cpu_single_env);
670
            cpu_single_env->stop = 1;
671
        }
672
        return;
673
    }
674
    do_vm_stop(reason);
675
}
676

  
677
#endif
678

  
679
static int qemu_cpu_exec(CPUState *env)
680
{
681
    int ret;
682
#ifdef CONFIG_PROFILER
683
    int64_t ti;
684
#endif
685

  
686
#ifdef CONFIG_PROFILER
687
    ti = profile_getclock();
688
#endif
689
    if (use_icount) {
690
        int64_t count;
691
        int decr;
692
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
693
        env->icount_decr.u16.low = 0;
694
        env->icount_extra = 0;
695
        count = qemu_icount_round (qemu_next_deadline());
696
        qemu_icount += count;
697
        decr = (count > 0xffff) ? 0xffff : count;
698
        count -= decr;
699
        env->icount_decr.u16.low = decr;
700
        env->icount_extra = count;
701
    }
702
    ret = cpu_exec(env);
703
#ifdef CONFIG_PROFILER
704
    qemu_time += profile_getclock() - ti;
705
#endif
706
    if (use_icount) {
707
        /* Fold pending instructions back into the
708
           instruction counter, and clear the interrupt flag.  */
709
        qemu_icount -= (env->icount_decr.u16.low
710
                        + env->icount_extra);
711
        env->icount_decr.u32 = 0;
712
        env->icount_extra = 0;
713
    }
714
    return ret;
715
}
716

  
717
bool tcg_cpu_exec(void)
718
{
719
    int ret = 0;
720

  
721
    if (next_cpu == NULL)
722
        next_cpu = first_cpu;
723
    for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
724
        CPUState *env = cur_cpu = next_cpu;
725

  
726
        qemu_clock_enable(vm_clock,
727
                          (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
728

  
729
        if (qemu_alarm_pending())
730
            break;
731
        if (cpu_can_run(env))
732
            ret = qemu_cpu_exec(env);
733
        else if (env->stop)
734
            break;
735

  
736
        if (ret == EXCP_DEBUG) {
737
            gdb_set_stop_cpu(env);
738
            debug_requested = EXCP_DEBUG;
739
            break;
740
        }
741
    }
742
    return tcg_has_work();
743
}
744

  
745
void set_numa_modes(void)
746
{
747
    CPUState *env;
748
    int i;
749

  
750
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
751
        for (i = 0; i < nb_numa_nodes; i++) {
752
            if (node_cpumask[i] & (1 << env->cpu_index)) {
753
                env->numa_node = i;
754
            }
755
        }
756
    }
757
}
758

  
759
void set_cpu_log(const char *optarg)
760
{
761
    int mask;
762
    const CPULogItem *item;
763

  
764
    mask = cpu_str_to_log_mask(optarg);
765
    if (!mask) {
766
        printf("Log items (comma separated):\n");
767
        for (item = cpu_log_items; item->mask != 0; item++) {
768
            printf("%-10s %s\n", item->name, item->help);
769
        }
770
        exit(1);
771
    }
772
    cpu_set_log(mask);
773
}
b/cpus.h
1
#ifndef QEMU_CPUS_H
2
#define QEMU_CPUS_H
3

  
4
/* cpu-common.c */
5
int qemu_init_main_loop(void);
6
void resume_all_vcpus(void);
7
void pause_all_vcpus(void);
8

  
9
/* vl.c */
10
extern int smp_cores;
11
extern int smp_threads;
12
extern int debug_requested;
13
void vm_state_notify(int running, int reason);
14
bool tcg_cpu_exec(void);
15
void set_numa_modes(void);
16
void set_cpu_log(const char *optarg);
17

  
18
#endif
b/vl.c
160 160
#include "slirp/libslirp.h"
161 161

  
162 162
#include "qemu-queue.h"
163
#include "cpus.h"
163 164

  
164 165
//#define DEBUG_NET
165 166
//#define DEBUG_SLIRP
......
249 250
uint64_t node_mem[MAX_NODES];
250 251
uint64_t node_cpumask[MAX_NODES];
251 252

  
252
static CPUState *cur_cpu;
253
static CPUState *next_cpu;
254 253
static QEMUTimer *nographic_timer;
255 254

  
256 255
uint8_t qemu_uuid[16];
......
314 313
target_phys_addr_t isa_mem_base = 0;
315 314
PicState2 *isa_pic;
316 315

  
317
/***********************************************************/
318
void hw_error(const char *fmt, ...)
319
{
320
    va_list ap;
321
    CPUState *env;
322

  
323
    va_start(ap, fmt);
324
    fprintf(stderr, "qemu: hardware error: ");
325
    vfprintf(stderr, fmt, ap);
326
    fprintf(stderr, "\n");
327
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
328
        fprintf(stderr, "CPU #%d:\n", env->cpu_index);
329
#ifdef TARGET_I386
330
        cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
331
#else
332
        cpu_dump_state(env, stderr, fprintf, 0);
333
#endif
334
    }
335
    va_end(ap);
336
    abort();
337
}
338

  
339 316
static void set_proc_name(const char *s)
340 317
{
341 318
#if defined(__linux__) && defined(PR_SET_NAME)
......
1962 1939
    qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock));
1963 1940
}
1964 1941

  
1965
void cpu_synchronize_all_states(void)
1966
{
1967
    CPUState *cpu;
1968

  
1969
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1970
        cpu_synchronize_state(cpu);
1971
    }
1972
}
1973

  
1974
void cpu_synchronize_all_post_reset(void)
1975
{
1976
    CPUState *cpu;
1977

  
1978
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1979
        cpu_synchronize_post_reset(cpu);
1980
    }
1981
}
1982

  
1983
void cpu_synchronize_all_post_init(void)
1984
{
1985
    CPUState *cpu;
1986

  
1987
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1988
        cpu_synchronize_post_init(cpu);
1989
    }
1990
}
1991

  
1992 1942
struct vm_change_state_entry {
1993 1943
    VMChangeStateHandler *cb;
1994 1944
    void *opaque;
......
2016 1966
    qemu_free (e);
2017 1967
}
2018 1968

  
2019
static void vm_state_notify(int running, int reason)
1969
void vm_state_notify(int running, int reason)
2020 1970
{
2021 1971
    VMChangeStateEntry *e;
2022 1972

  
......
2025 1975
    }
2026 1976
}
2027 1977

  
2028
static void resume_all_vcpus(void);
2029
static void pause_all_vcpus(void);
2030

  
2031 1978
void vm_start(void)
2032 1979
{
2033 1980
    if (!vm_running) {
......
2051 1998
static int reset_requested;
2052 1999
static int shutdown_requested;
2053 2000
static int powerdown_requested;
2054
static int debug_requested;
2001
int debug_requested;
2055 2002
static int vmstop_requested;
2056 2003

  
2057 2004
int qemu_shutdown_requested(void)
......
2089 2036
    return r;
2090 2037
}
2091 2038

  
2092
static void do_vm_stop(int reason)
2093
{
2094
    if (vm_running) {
2095
        cpu_disable_ticks();
2096
        vm_running = 0;
2097
        pause_all_vcpus();
2098
        vm_state_notify(0, reason);
2099
        monitor_protocol_event(QEVENT_STOP, NULL);
2100
    }
2101
}
2102

  
2103 2039
void qemu_register_reset(QEMUResetHandler *func, void *opaque)
2104 2040
{
2105 2041
    QEMUResetEntry *re = qemu_mallocz(sizeof(QEMUResetEntry));
......
2156 2092
    qemu_notify_event();
2157 2093
}
2158 2094

  
2159
static int cpu_can_run(CPUState *env)
2160
{
2161
    if (env->stop)
2162
        return 0;
2163
    if (env->stopped)
2164
        return 0;
2165
    if (!vm_running)
2166
        return 0;
2167
    return 1;
2168
}
2169

  
2170
static int cpu_has_work(CPUState *env)
2171
{
2172
    if (env->stop)
2173
        return 1;
2174
    if (env->stopped)
2175
        return 0;
2176
    if (!env->halted)
2177
        return 1;
2178
    if (qemu_cpu_has_work(env))
2179
        return 1;
2180
    return 0;
2181
}
2182

  
2183
static int tcg_has_work(void)
2184
{
2185
    CPUState *env;
2186

  
2187
    for (env = first_cpu; env != NULL; env = env->next_cpu)
2188
        if (cpu_has_work(env))
2189
            return 1;
2190
    return 0;
2191
}
2192

  
2193
#ifndef _WIN32
2194
static int io_thread_fd = -1;
2195

  
2196
static void qemu_event_increment(void)
2197
{
2198
    /* Write 8 bytes to be compatible with eventfd.  */
2199
    static uint64_t val = 1;
2200
    ssize_t ret;
2201

  
2202
    if (io_thread_fd == -1)
2203
        return;
2204

  
2205
    do {
2206
        ret = write(io_thread_fd, &val, sizeof(val));
2207
    } while (ret < 0 && errno == EINTR);
2208

  
2209
    /* EAGAIN is fine, a read must be pending.  */
2210
    if (ret < 0 && errno != EAGAIN) {
2211
        fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
2212
                strerror(errno));
2213
        exit (1);
2214
    }
2215
}
2216

  
2217
static void qemu_event_read(void *opaque)
2218
{
2219
    int fd = (unsigned long)opaque;
2220
    ssize_t len;
2221
    char buffer[512];
2222

  
2223
    /* Drain the notify pipe.  For eventfd, only 8 bytes will be read.  */
2224
    do {
2225
        len = read(fd, buffer, sizeof(buffer));
2226
    } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
2227
}
2228

  
2229
static int qemu_event_init(void)
2230
{
2231
    int err;
2232
    int fds[2];
2233

  
2234
    err = qemu_eventfd(fds);
2235
    if (err == -1)
2236
        return -errno;
2237

  
2238
    err = fcntl_setfl(fds[0], O_NONBLOCK);
2239
    if (err < 0)
2240
        goto fail;
2241

  
2242
    err = fcntl_setfl(fds[1], O_NONBLOCK);
2243
    if (err < 0)
2244
        goto fail;
2245

  
2246
    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
2247
                         (void *)(unsigned long)fds[0]);
2248

  
2249
    io_thread_fd = fds[1];
2250
    return 0;
2251

  
2252
fail:
2253
    close(fds[0]);
2254
    close(fds[1]);
2255
    return err;
2256
}
2257
#else
2258
HANDLE qemu_event_handle;
2259

  
2260
static void dummy_event_handler(void *opaque)
2261
{
2262
}
2263

  
2264
static int qemu_event_init(void)
2265
{
2266
    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
2267
    if (!qemu_event_handle) {
2268
        fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
2269
        return -1;
2270
    }
2271
    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
2272
    return 0;
2273
}
2274

  
2275
static void qemu_event_increment(void)
2276
{
2277
    if (!SetEvent(qemu_event_handle)) {
2278
        fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
2279
                GetLastError());
2280
        exit (1);
2281
    }
2282
}
2283
#endif
2284

  
2285
#ifndef CONFIG_IOTHREAD
2286
static int qemu_init_main_loop(void)
2287
{
2288
    return qemu_event_init();
2289
}
2290

  
2291
void qemu_init_vcpu(void *_env)
2292
{
2293
    CPUState *env = _env;
2294

  
2295
    env->nr_cores = smp_cores;
2296
    env->nr_threads = smp_threads;
2297
    if (kvm_enabled())
2298
        kvm_init_vcpu(env);
2299
    return;
2300
}
2301

  
2302
int qemu_cpu_self(void *env)
2303
{
2304
    return 1;
2305
}
2306

  
2307
static void resume_all_vcpus(void)
2308
{
2309
}
2310

  
2311
static void pause_all_vcpus(void)
2312
{
2313
}
2314

  
2315
void qemu_cpu_kick(void *env)
2316
{
2317
    return;
2318
}
2319

  
2320
void qemu_notify_event(void)
2321
{
2322
    CPUState *env = cpu_single_env;
2323

  
2324
    qemu_event_increment ();
2325
    if (env) {
2326
        cpu_exit(env);
2327
    }
2328
    if (next_cpu && env != next_cpu) {
2329
	cpu_exit(next_cpu);
2330
    }
2331
}
2332

  
2333
void qemu_mutex_lock_iothread(void) {}
2334
void qemu_mutex_unlock_iothread(void) {}
2335

  
2336
void vm_stop(int reason)
2337
{
2338
    do_vm_stop(reason);
2339
}
2340

  
2341
#else /* CONFIG_IOTHREAD */
2342

  
2343
#include "qemu-thread.h"
2344

  
2345
QemuMutex qemu_global_mutex;
2346
static QemuMutex qemu_fair_mutex;
2347

  
2348
static QemuThread io_thread;
2349

  
2350
static QemuThread *tcg_cpu_thread;
2351
static QemuCond *tcg_halt_cond;
2352

  
2353
static int qemu_system_ready;
2354
/* cpu creation */
2355
static QemuCond qemu_cpu_cond;
2356
/* system init */
2357
static QemuCond qemu_system_cond;
2358
static QemuCond qemu_pause_cond;
2359

  
2360
static void tcg_block_io_signals(void);
2361
static void kvm_block_io_signals(CPUState *env);
2362
static void unblock_io_signals(void);
2363

  
2364
static int qemu_init_main_loop(void)
2365
{
2366
    int ret;
2367

  
2368
    ret = qemu_event_init();
2369
    if (ret)
2370
        return ret;
2371

  
2372
    qemu_cond_init(&qemu_pause_cond);
2373
    qemu_mutex_init(&qemu_fair_mutex);
2374
    qemu_mutex_init(&qemu_global_mutex);
2375
    qemu_mutex_lock(&qemu_global_mutex);
2376

  
2377
    unblock_io_signals();
2378
    qemu_thread_self(&io_thread);
2379

  
2380
    return 0;
2381
}
2382

  
2383
static void qemu_wait_io_event_common(CPUState *env)
2384
{
2385
    if (env->stop) {
2386
        env->stop = 0;
2387
        env->stopped = 1;
2388
        qemu_cond_signal(&qemu_pause_cond);
2389
    }
2390
}
2391

  
2392
static void qemu_wait_io_event(CPUState *env)
2393
{
2394
    while (!tcg_has_work())
2395
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
2396

  
2397
    qemu_mutex_unlock(&qemu_global_mutex);
2398

  
2399
    /*
2400
     * Users of qemu_global_mutex can be starved, having no chance
2401
     * to acquire it since this path will get to it first.
2402
     * So use another lock to provide fairness.
2403
     */
2404
    qemu_mutex_lock(&qemu_fair_mutex);
2405
    qemu_mutex_unlock(&qemu_fair_mutex);
2406

  
2407
    qemu_mutex_lock(&qemu_global_mutex);
2408
    qemu_wait_io_event_common(env);
2409
}
2410

  
2411
static void qemu_kvm_eat_signal(CPUState *env, int timeout)
2412
{
2413
    struct timespec ts;
2414
    int r, e;
2415
    siginfo_t siginfo;
2416
    sigset_t waitset;
2417

  
2418
    ts.tv_sec = timeout / 1000;
2419
    ts.tv_nsec = (timeout % 1000) * 1000000;
2420

  
2421
    sigemptyset(&waitset);
2422
    sigaddset(&waitset, SIG_IPI);
2423

  
2424
    qemu_mutex_unlock(&qemu_global_mutex);
2425
    r = sigtimedwait(&waitset, &siginfo, &ts);
2426
    e = errno;
2427
    qemu_mutex_lock(&qemu_global_mutex);
2428

  
2429
    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
2430
        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
2431
        exit(1);
2432
    }
2433
}
2434

  
2435
static void qemu_kvm_wait_io_event(CPUState *env)
2436
{
2437
    while (!cpu_has_work(env))
2438
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
2439

  
2440
    qemu_kvm_eat_signal(env, 0);
2441
    qemu_wait_io_event_common(env);
2442
}
2443

  
2444
static int qemu_cpu_exec(CPUState *env);
2445

  
2446
static void *kvm_cpu_thread_fn(void *arg)
2447
{
2448
    CPUState *env = arg;
2449

  
2450
    qemu_thread_self(env->thread);
2451
    if (kvm_enabled())
2452
        kvm_init_vcpu(env);
2453

  
2454
    kvm_block_io_signals(env);
2455

  
2456
    /* signal CPU creation */
2457
    qemu_mutex_lock(&qemu_global_mutex);
2458
    env->created = 1;
2459
    qemu_cond_signal(&qemu_cpu_cond);
2460

  
2461
    /* and wait for machine initialization */
2462
    while (!qemu_system_ready)
2463
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
2464

  
2465
    while (1) {
2466
        if (cpu_can_run(env))
2467
            qemu_cpu_exec(env);
2468
        qemu_kvm_wait_io_event(env);
2469
    }
2470

  
2471
    return NULL;
2472
}
2473

  
2474
static bool tcg_cpu_exec(void);
2475

  
2476
static void *tcg_cpu_thread_fn(void *arg)
2477
{
2478
    CPUState *env = arg;
2479

  
2480
    tcg_block_io_signals();
2481
    qemu_thread_self(env->thread);
2482

  
2483
    /* signal CPU creation */
2484
    qemu_mutex_lock(&qemu_global_mutex);
2485
    for (env = first_cpu; env != NULL; env = env->next_cpu)
2486
        env->created = 1;
2487
    qemu_cond_signal(&qemu_cpu_cond);
2488

  
2489
    /* and wait for machine initialization */
2490
    while (!qemu_system_ready)
2491
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
2492

  
2493
    while (1) {
2494
        tcg_cpu_exec();
2495
        qemu_wait_io_event(cur_cpu);
2496
    }
2497

  
2498
    return NULL;
2499
}
2500

  
2501
void qemu_cpu_kick(void *_env)
2502
{
2503
    CPUState *env = _env;
2504
    qemu_cond_broadcast(env->halt_cond);
2505
    if (kvm_enabled())
2506
        qemu_thread_signal(env->thread, SIG_IPI);
2507
}
2508

  
2509
int qemu_cpu_self(void *_env)
2510
{
2511
    CPUState *env = _env;
2512
    QemuThread this;
2513
 
2514
    qemu_thread_self(&this);
2515
 
2516
    return qemu_thread_equal(&this, env->thread);
2517
}
2518

  
2519
static void cpu_signal(int sig)
2520
{
2521
    if (cpu_single_env)
2522
        cpu_exit(cpu_single_env);
2523
}
2524

  
2525
static void tcg_block_io_signals(void)
2526
{
2527
    sigset_t set;
2528
    struct sigaction sigact;
2529

  
2530
    sigemptyset(&set);
2531
    sigaddset(&set, SIGUSR2);
2532
    sigaddset(&set, SIGIO);
2533
    sigaddset(&set, SIGALRM);
2534
    sigaddset(&set, SIGCHLD);
2535
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2536

  
2537
    sigemptyset(&set);
2538
    sigaddset(&set, SIG_IPI);
2539
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
2540

  
2541
    memset(&sigact, 0, sizeof(sigact));
2542
    sigact.sa_handler = cpu_signal;
2543
    sigaction(SIG_IPI, &sigact, NULL);
2544
}
2545

  
2546
static void dummy_signal(int sig)
2547
{
2548
}
2549

  
2550
static void kvm_block_io_signals(CPUState *env)
2551
{
2552
    int r;
2553
    sigset_t set;
2554
    struct sigaction sigact;
2555

  
2556
    sigemptyset(&set);
2557
    sigaddset(&set, SIGUSR2);
2558
    sigaddset(&set, SIGIO);
2559
    sigaddset(&set, SIGALRM);
2560
    sigaddset(&set, SIGCHLD);
2561
    sigaddset(&set, SIG_IPI);
2562
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2563

  
2564
    pthread_sigmask(SIG_BLOCK, NULL, &set);
2565
    sigdelset(&set, SIG_IPI);
2566

  
2567
    memset(&sigact, 0, sizeof(sigact));
2568
    sigact.sa_handler = dummy_signal;
2569
    sigaction(SIG_IPI, &sigact, NULL);
2570

  
2571
    r = kvm_set_signal_mask(env, &set);
2572
    if (r) {
2573
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
2574
        exit(1);
2575
    }
2576
}
2577

  
2578
static void unblock_io_signals(void)
2579
{
2580
    sigset_t set;
2581

  
2582
    sigemptyset(&set);
2583
    sigaddset(&set, SIGUSR2);
2584
    sigaddset(&set, SIGIO);
2585
    sigaddset(&set, SIGALRM);
2586
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
2587

  
2588
    sigemptyset(&set);
2589
    sigaddset(&set, SIG_IPI);
2590
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2591
}
2592

  
2593
static void qemu_signal_lock(unsigned int msecs)
2594
{
2595
    qemu_mutex_lock(&qemu_fair_mutex);
2596

  
2597
    while (qemu_mutex_trylock(&qemu_global_mutex)) {
2598
        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
2599
        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
2600
            break;
2601
    }
2602
    qemu_mutex_unlock(&qemu_fair_mutex);
2603
}
2604

  
2605
void qemu_mutex_lock_iothread(void)
2606
{
2607
    if (kvm_enabled()) {
2608
        qemu_mutex_lock(&qemu_fair_mutex);
2609
        qemu_mutex_lock(&qemu_global_mutex);
2610
        qemu_mutex_unlock(&qemu_fair_mutex);
2611
    } else
2612
        qemu_signal_lock(100);
2613
}
2614

  
2615
void qemu_mutex_unlock_iothread(void)
2616
{
2617
    qemu_mutex_unlock(&qemu_global_mutex);
2618
}
2619

  
2620
static int all_vcpus_paused(void)
2621
{
2622
    CPUState *penv = first_cpu;
2623

  
2624
    while (penv) {
2625
        if (!penv->stopped)
2626
            return 0;
2627
        penv = (CPUState *)penv->next_cpu;
2628
    }
2629

  
2630
    return 1;
2631
}
2632

  
2633
static void pause_all_vcpus(void)
2634
{
2635
    CPUState *penv = first_cpu;
2636

  
2637
    while (penv) {
2638
        penv->stop = 1;
2639
        qemu_thread_signal(penv->thread, SIG_IPI);
2640
        qemu_cpu_kick(penv);
2641
        penv = (CPUState *)penv->next_cpu;
2642
    }
2643

  
2644
    while (!all_vcpus_paused()) {
2645
        qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
2646
        penv = first_cpu;
2647
        while (penv) {
2648
            qemu_thread_signal(penv->thread, SIG_IPI);
2649
            penv = (CPUState *)penv->next_cpu;
2650
        }
2651
    }
2652
}
2653

  
2654
static void resume_all_vcpus(void)
2655
{
2656
    CPUState *penv = first_cpu;
2657

  
2658
    while (penv) {
2659
        penv->stop = 0;
2660
        penv->stopped = 0;
2661
        qemu_thread_signal(penv->thread, SIG_IPI);
2662
        qemu_cpu_kick(penv);
2663
        penv = (CPUState *)penv->next_cpu;
2664
    }
2665
}
2666

  
2667
static void tcg_init_vcpu(void *_env)
2668
{
2669
    CPUState *env = _env;
2670
    /* share a single thread for all cpus with TCG */
2671
    if (!tcg_cpu_thread) {
2672
        env->thread = qemu_mallocz(sizeof(QemuThread));
2673
        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
2674
        qemu_cond_init(env->halt_cond);
2675
        qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
2676
        while (env->created == 0)
2677
            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
2678
        tcg_cpu_thread = env->thread;
2679
        tcg_halt_cond = env->halt_cond;
2680
    } else {
2681
        env->thread = tcg_cpu_thread;
2682
        env->halt_cond = tcg_halt_cond;
2683
    }
2684
}
2685

  
2686
static void kvm_start_vcpu(CPUState *env)
2687
{
2688
    env->thread = qemu_mallocz(sizeof(QemuThread));
2689
    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
2690
    qemu_cond_init(env->halt_cond);
2691
    qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
2692
    while (env->created == 0)
2693
        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
2694
}
2695

  
2696
void qemu_init_vcpu(void *_env)
2697
{
2698
    CPUState *env = _env;
2699

  
2700
    env->nr_cores = smp_cores;
2701
    env->nr_threads = smp_threads;
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff