Statistics
| Branch: | Revision:

root / cpus.c @ 55274a30

History | View | Annotate | Download (17.5 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
/* Needed early for CONFIG_BSD etc. */
26
#include "config-host.h"
27

    
28
#include "monitor.h"
29
#include "sysemu.h"
30
#include "gdbstub.h"
31
#include "dma.h"
32
#include "kvm.h"
33

    
34
#include "cpus.h"
35

    
36
static CPUState *cur_cpu;
37
static CPUState *next_cpu;
38

    
39
/***********************************************************/
40
void hw_error(const char *fmt, ...)
41
{
42
    va_list ap;
43
    CPUState *env;
44

    
45
    va_start(ap, fmt);
46
    fprintf(stderr, "qemu: hardware error: ");
47
    vfprintf(stderr, fmt, ap);
48
    fprintf(stderr, "\n");
49
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
50
        fprintf(stderr, "CPU #%d:\n", env->cpu_index);
51
#ifdef TARGET_I386
52
        cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
53
#else
54
        cpu_dump_state(env, stderr, fprintf, 0);
55
#endif
56
    }
57
    va_end(ap);
58
    abort();
59
}
60

    
61
void cpu_synchronize_all_states(void)
62
{
63
    CPUState *cpu;
64

    
65
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
66
        cpu_synchronize_state(cpu);
67
    }
68
}
69

    
70
void cpu_synchronize_all_post_reset(void)
71
{
72
    CPUState *cpu;
73

    
74
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
75
        cpu_synchronize_post_reset(cpu);
76
    }
77
}
78

    
79
void cpu_synchronize_all_post_init(void)
80
{
81
    CPUState *cpu;
82

    
83
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
84
        cpu_synchronize_post_init(cpu);
85
    }
86
}
87

    
88
static void do_vm_stop(int reason)
89
{
90
    if (vm_running) {
91
        cpu_disable_ticks();
92
        vm_running = 0;
93
        pause_all_vcpus();
94
        vm_state_notify(0, reason);
95
        monitor_protocol_event(QEVENT_STOP, NULL);
96
    }
97
}
98

    
99
static int cpu_can_run(CPUState *env)
100
{
101
    if (env->stop)
102
        return 0;
103
    if (env->stopped || !vm_running)
104
        return 0;
105
    return 1;
106
}
107

    
108
static int cpu_has_work(CPUState *env)
109
{
110
    if (env->stop)
111
        return 1;
112
    if (env->stopped || !vm_running)
113
        return 0;
114
    if (!env->halted)
115
        return 1;
116
    if (qemu_cpu_has_work(env))
117
        return 1;
118
    return 0;
119
}
120

    
121
static int tcg_has_work(void)
122
{
123
    CPUState *env;
124

    
125
    for (env = first_cpu; env != NULL; env = env->next_cpu)
126
        if (cpu_has_work(env))
127
            return 1;
128
    return 0;
129
}
130

    
131
#ifndef _WIN32
132
static int io_thread_fd = -1;
133

    
134
static void qemu_event_increment(void)
135
{
136
    /* Write 8 bytes to be compatible with eventfd.  */
137
    static uint64_t val = 1;
138
    ssize_t ret;
139

    
140
    if (io_thread_fd == -1)
141
        return;
142

    
143
    do {
144
        ret = write(io_thread_fd, &val, sizeof(val));
145
    } while (ret < 0 && errno == EINTR);
146

    
147
    /* EAGAIN is fine, a read must be pending.  */
148
    if (ret < 0 && errno != EAGAIN) {
149
        fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
150
                strerror(errno));
151
        exit (1);
152
    }
153
}
154

    
155
static void qemu_event_read(void *opaque)
156
{
157
    int fd = (unsigned long)opaque;
158
    ssize_t len;
159
    char buffer[512];
160

    
161
    /* Drain the notify pipe.  For eventfd, only 8 bytes will be read.  */
162
    do {
163
        len = read(fd, buffer, sizeof(buffer));
164
    } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
165
}
166

    
167
static int qemu_event_init(void)
168
{
169
    int err;
170
    int fds[2];
171

    
172
    err = qemu_eventfd(fds);
173
    if (err == -1)
174
        return -errno;
175

    
176
    err = fcntl_setfl(fds[0], O_NONBLOCK);
177
    if (err < 0)
178
        goto fail;
179

    
180
    err = fcntl_setfl(fds[1], O_NONBLOCK);
181
    if (err < 0)
182
        goto fail;
183

    
184
    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
185
                         (void *)(unsigned long)fds[0]);
186

    
187
    io_thread_fd = fds[1];
188
    return 0;
189

    
190
fail:
191
    close(fds[0]);
192
    close(fds[1]);
193
    return err;
194
}
195
#else
196
HANDLE qemu_event_handle;
197

    
198
static void dummy_event_handler(void *opaque)
199
{
200
}
201

    
202
static int qemu_event_init(void)
203
{
204
    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
205
    if (!qemu_event_handle) {
206
        fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
207
        return -1;
208
    }
209
    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
210
    return 0;
211
}
212

    
213
static void qemu_event_increment(void)
214
{
215
    if (!SetEvent(qemu_event_handle)) {
216
        fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
217
                GetLastError());
218
        exit (1);
219
    }
220
}
221
#endif
222

    
223
#ifndef CONFIG_IOTHREAD
224
int qemu_init_main_loop(void)
225
{
226
    return qemu_event_init();
227
}
228

    
229
void qemu_init_vcpu(void *_env)
230
{
231
    CPUState *env = _env;
232

    
233
    env->nr_cores = smp_cores;
234
    env->nr_threads = smp_threads;
235
    if (kvm_enabled())
236
        kvm_init_vcpu(env);
237
    return;
238
}
239

    
240
int qemu_cpu_self(void *env)
241
{
242
    return 1;
243
}
244

    
245
void resume_all_vcpus(void)
246
{
247
}
248

    
249
void pause_all_vcpus(void)
250
{
251
}
252

    
253
void qemu_cpu_kick(void *env)
254
{
255
    return;
256
}
257

    
258
void qemu_notify_event(void)
259
{
260
    CPUState *env = cpu_single_env;
261

    
262
    qemu_event_increment ();
263
    if (env) {
264
        cpu_exit(env);
265
    }
266
    if (next_cpu && env != next_cpu) {
267
        cpu_exit(next_cpu);
268
    }
269
}
270

    
271
void qemu_mutex_lock_iothread(void) {}
272
void qemu_mutex_unlock_iothread(void) {}
273

    
274
void vm_stop(int reason)
275
{
276
    do_vm_stop(reason);
277
}
278

    
279
#else /* CONFIG_IOTHREAD */
280

    
281
#include "qemu-thread.h"
282

    
283
QemuMutex qemu_global_mutex;
284
static QemuMutex qemu_fair_mutex;
285

    
286
static QemuThread io_thread;
287

    
288
static QemuThread *tcg_cpu_thread;
289
static QemuCond *tcg_halt_cond;
290

    
291
static int qemu_system_ready;
292
/* cpu creation */
293
static QemuCond qemu_cpu_cond;
294
/* system init */
295
static QemuCond qemu_system_cond;
296
static QemuCond qemu_pause_cond;
297

    
298
static void tcg_block_io_signals(void);
299
static void kvm_block_io_signals(CPUState *env);
300
static void unblock_io_signals(void);
301

    
302
int qemu_init_main_loop(void)
303
{
304
    int ret;
305

    
306
    ret = qemu_event_init();
307
    if (ret)
308
        return ret;
309

    
310
    qemu_cond_init(&qemu_pause_cond);
311
    qemu_mutex_init(&qemu_fair_mutex);
312
    qemu_mutex_init(&qemu_global_mutex);
313
    qemu_mutex_lock(&qemu_global_mutex);
314

    
315
    unblock_io_signals();
316
    qemu_thread_self(&io_thread);
317

    
318
    return 0;
319
}
320

    
321
static void qemu_wait_io_event_common(CPUState *env)
322
{
323
    if (env->stop) {
324
        env->stop = 0;
325
        env->stopped = 1;
326
        qemu_cond_signal(&qemu_pause_cond);
327
    }
328
}
329

    
330
static void qemu_wait_io_event(CPUState *env)
331
{
332
    while (!tcg_has_work())
333
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
334

    
335
    qemu_mutex_unlock(&qemu_global_mutex);
336

    
337
    /*
338
     * Users of qemu_global_mutex can be starved, having no chance
339
     * to acquire it since this path will get to it first.
340
     * So use another lock to provide fairness.
341
     */
342
    qemu_mutex_lock(&qemu_fair_mutex);
343
    qemu_mutex_unlock(&qemu_fair_mutex);
344

    
345
    qemu_mutex_lock(&qemu_global_mutex);
346
    qemu_wait_io_event_common(env);
347
}
348

    
349
static void qemu_kvm_eat_signal(CPUState *env, int timeout)
350
{
351
    struct timespec ts;
352
    int r, e;
353
    siginfo_t siginfo;
354
    sigset_t waitset;
355

    
356
    ts.tv_sec = timeout / 1000;
357
    ts.tv_nsec = (timeout % 1000) * 1000000;
358

    
359
    sigemptyset(&waitset);
360
    sigaddset(&waitset, SIG_IPI);
361

    
362
    qemu_mutex_unlock(&qemu_global_mutex);
363
    r = sigtimedwait(&waitset, &siginfo, &ts);
364
    e = errno;
365
    qemu_mutex_lock(&qemu_global_mutex);
366

    
367
    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
368
        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
369
        exit(1);
370
    }
371
}
372

    
373
static void qemu_kvm_wait_io_event(CPUState *env)
374
{
375
    while (!cpu_has_work(env))
376
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
377

    
378
    qemu_kvm_eat_signal(env, 0);
379
    qemu_wait_io_event_common(env);
380
}
381

    
382
static int qemu_cpu_exec(CPUState *env);
383

    
384
static void *kvm_cpu_thread_fn(void *arg)
385
{
386
    CPUState *env = arg;
387

    
388
    qemu_thread_self(env->thread);
389
    if (kvm_enabled())
390
        kvm_init_vcpu(env);
391

    
392
    kvm_block_io_signals(env);
393

    
394
    /* signal CPU creation */
395
    qemu_mutex_lock(&qemu_global_mutex);
396
    env->created = 1;
397
    qemu_cond_signal(&qemu_cpu_cond);
398

    
399
    /* and wait for machine initialization */
400
    while (!qemu_system_ready)
401
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
402

    
403
    while (1) {
404
        if (cpu_can_run(env))
405
            qemu_cpu_exec(env);
406
        qemu_kvm_wait_io_event(env);
407
    }
408

    
409
    return NULL;
410
}
411

    
412
static void *tcg_cpu_thread_fn(void *arg)
413
{
414
    CPUState *env = arg;
415

    
416
    tcg_block_io_signals();
417
    qemu_thread_self(env->thread);
418

    
419
    /* signal CPU creation */
420
    qemu_mutex_lock(&qemu_global_mutex);
421
    for (env = first_cpu; env != NULL; env = env->next_cpu)
422
        env->created = 1;
423
    qemu_cond_signal(&qemu_cpu_cond);
424

    
425
    /* and wait for machine initialization */
426
    while (!qemu_system_ready)
427
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
428

    
429
    while (1) {
430
        tcg_cpu_exec();
431
        qemu_wait_io_event(cur_cpu);
432
    }
433

    
434
    return NULL;
435
}
436

    
437
void qemu_cpu_kick(void *_env)
438
{
439
    CPUState *env = _env;
440
    qemu_cond_broadcast(env->halt_cond);
441
    if (kvm_enabled())
442
        qemu_thread_signal(env->thread, SIG_IPI);
443
}
444

    
445
int qemu_cpu_self(void *_env)
446
{
447
    CPUState *env = _env;
448
    QemuThread this;
449

    
450
    qemu_thread_self(&this);
451

    
452
    return qemu_thread_equal(&this, env->thread);
453
}
454

    
455
static void cpu_signal(int sig)
456
{
457
    if (cpu_single_env)
458
        cpu_exit(cpu_single_env);
459
}
460

    
461
static void tcg_block_io_signals(void)
462
{
463
    sigset_t set;
464
    struct sigaction sigact;
465

    
466
    sigemptyset(&set);
467
    sigaddset(&set, SIGUSR2);
468
    sigaddset(&set, SIGIO);
469
    sigaddset(&set, SIGALRM);
470
    sigaddset(&set, SIGCHLD);
471
    pthread_sigmask(SIG_BLOCK, &set, NULL);
472

    
473
    sigemptyset(&set);
474
    sigaddset(&set, SIG_IPI);
475
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
476

    
477
    memset(&sigact, 0, sizeof(sigact));
478
    sigact.sa_handler = cpu_signal;
479
    sigaction(SIG_IPI, &sigact, NULL);
480
}
481

    
482
static void dummy_signal(int sig)
483
{
484
}
485

    
486
static void kvm_block_io_signals(CPUState *env)
487
{
488
    int r;
489
    sigset_t set;
490
    struct sigaction sigact;
491

    
492
    sigemptyset(&set);
493
    sigaddset(&set, SIGUSR2);
494
    sigaddset(&set, SIGIO);
495
    sigaddset(&set, SIGALRM);
496
    sigaddset(&set, SIGCHLD);
497
    sigaddset(&set, SIG_IPI);
498
    pthread_sigmask(SIG_BLOCK, &set, NULL);
499

    
500
    pthread_sigmask(SIG_BLOCK, NULL, &set);
501
    sigdelset(&set, SIG_IPI);
502

    
503
    memset(&sigact, 0, sizeof(sigact));
504
    sigact.sa_handler = dummy_signal;
505
    sigaction(SIG_IPI, &sigact, NULL);
506

    
507
    r = kvm_set_signal_mask(env, &set);
508
    if (r) {
509
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
510
        exit(1);
511
    }
512
}
513

    
514
static void unblock_io_signals(void)
515
{
516
    sigset_t set;
517

    
518
    sigemptyset(&set);
519
    sigaddset(&set, SIGUSR2);
520
    sigaddset(&set, SIGIO);
521
    sigaddset(&set, SIGALRM);
522
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
523

    
524
    sigemptyset(&set);
525
    sigaddset(&set, SIG_IPI);
526
    pthread_sigmask(SIG_BLOCK, &set, NULL);
527
}
528

    
529
static void qemu_signal_lock(unsigned int msecs)
530
{
531
    qemu_mutex_lock(&qemu_fair_mutex);
532

    
533
    while (qemu_mutex_trylock(&qemu_global_mutex)) {
534
        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
535
        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
536
            break;
537
    }
538
    qemu_mutex_unlock(&qemu_fair_mutex);
539
}
540

    
541
void qemu_mutex_lock_iothread(void)
542
{
543
    if (kvm_enabled()) {
544
        qemu_mutex_lock(&qemu_fair_mutex);
545
        qemu_mutex_lock(&qemu_global_mutex);
546
        qemu_mutex_unlock(&qemu_fair_mutex);
547
    } else
548
        qemu_signal_lock(100);
549
}
550

    
551
void qemu_mutex_unlock_iothread(void)
552
{
553
    qemu_mutex_unlock(&qemu_global_mutex);
554
}
555

    
556
static int all_vcpus_paused(void)
557
{
558
    CPUState *penv = first_cpu;
559

    
560
    while (penv) {
561
        if (!penv->stopped)
562
            return 0;
563
        penv = (CPUState *)penv->next_cpu;
564
    }
565

    
566
    return 1;
567
}
568

    
569
void pause_all_vcpus(void)
570
{
571
    CPUState *penv = first_cpu;
572

    
573
    while (penv) {
574
        penv->stop = 1;
575
        qemu_thread_signal(penv->thread, SIG_IPI);
576
        qemu_cpu_kick(penv);
577
        penv = (CPUState *)penv->next_cpu;
578
    }
579

    
580
    while (!all_vcpus_paused()) {
581
        qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
582
        penv = first_cpu;
583
        while (penv) {
584
            qemu_thread_signal(penv->thread, SIG_IPI);
585
            penv = (CPUState *)penv->next_cpu;
586
        }
587
    }
588
}
589

    
590
void resume_all_vcpus(void)
591
{
592
    CPUState *penv = first_cpu;
593

    
594
    while (penv) {
595
        penv->stop = 0;
596
        penv->stopped = 0;
597
        qemu_thread_signal(penv->thread, SIG_IPI);
598
        qemu_cpu_kick(penv);
599
        penv = (CPUState *)penv->next_cpu;
600
    }
601
}
602

    
603
static void tcg_init_vcpu(void *_env)
604
{
605
    CPUState *env = _env;
606
    /* share a single thread for all cpus with TCG */
607
    if (!tcg_cpu_thread) {
608
        env->thread = qemu_mallocz(sizeof(QemuThread));
609
        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
610
        qemu_cond_init(env->halt_cond);
611
        qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
612
        while (env->created == 0)
613
            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
614
        tcg_cpu_thread = env->thread;
615
        tcg_halt_cond = env->halt_cond;
616
    } else {
617
        env->thread = tcg_cpu_thread;
618
        env->halt_cond = tcg_halt_cond;
619
    }
620
}
621

    
622
static void kvm_start_vcpu(CPUState *env)
623
{
624
    env->thread = qemu_mallocz(sizeof(QemuThread));
625
    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
626
    qemu_cond_init(env->halt_cond);
627
    qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
628
    while (env->created == 0)
629
        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
630
}
631

    
632
void qemu_init_vcpu(void *_env)
633
{
634
    CPUState *env = _env;
635

    
636
    env->nr_cores = smp_cores;
637
    env->nr_threads = smp_threads;
638
    if (kvm_enabled())
639
        kvm_start_vcpu(env);
640
    else
641
        tcg_init_vcpu(env);
642
}
643

    
644
void qemu_notify_event(void)
645
{
646
    qemu_event_increment();
647
}
648

    
649
static void qemu_system_vmstop_request(int reason)
650
{
651
    vmstop_requested = reason;
652
    qemu_notify_event();
653
}
654

    
655
void vm_stop(int reason)
656
{
657
    QemuThread me;
658
    qemu_thread_self(&me);
659

    
660
    if (!qemu_thread_equal(&me, &io_thread)) {
661
        qemu_system_vmstop_request(reason);
662
        /*
663
         * FIXME: should not return to device code in case
664
         * vm_stop() has been requested.
665
         */
666
        if (cpu_single_env) {
667
            cpu_exit(cpu_single_env);
668
            cpu_single_env->stop = 1;
669
        }
670
        return;
671
    }
672
    do_vm_stop(reason);
673
}
674

    
675
#endif
676

    
677
static int qemu_cpu_exec(CPUState *env)
678
{
679
    int ret;
680
#ifdef CONFIG_PROFILER
681
    int64_t ti;
682
#endif
683

    
684
#ifdef CONFIG_PROFILER
685
    ti = profile_getclock();
686
#endif
687
    if (use_icount) {
688
        int64_t count;
689
        int decr;
690
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
691
        env->icount_decr.u16.low = 0;
692
        env->icount_extra = 0;
693
        count = qemu_icount_round (qemu_next_deadline());
694
        qemu_icount += count;
695
        decr = (count > 0xffff) ? 0xffff : count;
696
        count -= decr;
697
        env->icount_decr.u16.low = decr;
698
        env->icount_extra = count;
699
    }
700
    ret = cpu_exec(env);
701
#ifdef CONFIG_PROFILER
702
    qemu_time += profile_getclock() - ti;
703
#endif
704
    if (use_icount) {
705
        /* Fold pending instructions back into the
706
           instruction counter, and clear the interrupt flag.  */
707
        qemu_icount -= (env->icount_decr.u16.low
708
                        + env->icount_extra);
709
        env->icount_decr.u32 = 0;
710
        env->icount_extra = 0;
711
    }
712
    return ret;
713
}
714

    
715
bool tcg_cpu_exec(void)
716
{
717
    int ret = 0;
718

    
719
    if (next_cpu == NULL)
720
        next_cpu = first_cpu;
721
    for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
722
        CPUState *env = cur_cpu = next_cpu;
723

    
724
        qemu_clock_enable(vm_clock,
725
                          (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
726

    
727
        if (qemu_alarm_pending())
728
            break;
729
        if (cpu_can_run(env))
730
            ret = qemu_cpu_exec(env);
731
        else if (env->stop)
732
            break;
733

    
734
        if (ret == EXCP_DEBUG) {
735
            gdb_set_stop_cpu(env);
736
            debug_requested = EXCP_DEBUG;
737
            break;
738
        }
739
    }
740
    return tcg_has_work();
741
}
742

    
743
void set_numa_modes(void)
744
{
745
    CPUState *env;
746
    int i;
747

    
748
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
749
        for (i = 0; i < nb_numa_nodes; i++) {
750
            if (node_cpumask[i] & (1 << env->cpu_index)) {
751
                env->numa_node = i;
752
            }
753
        }
754
    }
755
}
756

    
757
void set_cpu_log(const char *optarg)
758
{
759
    int mask;
760
    const CPULogItem *item;
761

    
762
    mask = cpu_str_to_log_mask(optarg);
763
    if (!mask) {
764
        printf("Log items (comma separated):\n");
765
        for (item = cpu_log_items; item->mask != 0; item++) {
766
            printf("%-10s %s\n", item->name, item->help);
767
        }
768
        exit(1);
769
    }
770
    cpu_set_log(mask);
771
}
772

    
773
/* Return the virtual CPU time, based on the instruction counter.  */
774
int64_t cpu_get_icount(void)
775
{
776
    int64_t icount;
777
    CPUState *env = cpu_single_env;;
778

    
779
    icount = qemu_icount;
780
    if (env) {
781
        if (!can_do_io(env)) {
782
            fprintf(stderr, "Bad clock read\n");
783
        }
784
        icount -= (env->icount_decr.u16.low + env->icount_extra);
785
    }
786
    return qemu_icount_bias + (icount << icount_time_shift);
787
}