Revision 296af7c9 vl.c

b/vl.c
160 160
#include "slirp/libslirp.h"
161 161

  
162 162
#include "qemu-queue.h"
163
#include "cpus.h"
163 164

  
164 165
//#define DEBUG_NET
165 166
//#define DEBUG_SLIRP
......
249 250
uint64_t node_mem[MAX_NODES];
250 251
uint64_t node_cpumask[MAX_NODES];
251 252

  
252
static CPUState *cur_cpu;
253
static CPUState *next_cpu;
254 253
static QEMUTimer *nographic_timer;
255 254

  
256 255
uint8_t qemu_uuid[16];
......
314 313
target_phys_addr_t isa_mem_base = 0;
315 314
PicState2 *isa_pic;
316 315

  
317
/***********************************************************/
318
void hw_error(const char *fmt, ...)
319
{
320
    va_list ap;
321
    CPUState *env;
322

  
323
    va_start(ap, fmt);
324
    fprintf(stderr, "qemu: hardware error: ");
325
    vfprintf(stderr, fmt, ap);
326
    fprintf(stderr, "\n");
327
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
328
        fprintf(stderr, "CPU #%d:\n", env->cpu_index);
329
#ifdef TARGET_I386
330
        cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
331
#else
332
        cpu_dump_state(env, stderr, fprintf, 0);
333
#endif
334
    }
335
    va_end(ap);
336
    abort();
337
}
338

  
339 316
static void set_proc_name(const char *s)
340 317
{
341 318
#if defined(__linux__) && defined(PR_SET_NAME)
......
1962 1939
    qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock));
1963 1940
}
1964 1941

  
1965
void cpu_synchronize_all_states(void)
1966
{
1967
    CPUState *cpu;
1968

  
1969
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1970
        cpu_synchronize_state(cpu);
1971
    }
1972
}
1973

  
1974
void cpu_synchronize_all_post_reset(void)
1975
{
1976
    CPUState *cpu;
1977

  
1978
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1979
        cpu_synchronize_post_reset(cpu);
1980
    }
1981
}
1982

  
1983
void cpu_synchronize_all_post_init(void)
1984
{
1985
    CPUState *cpu;
1986

  
1987
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
1988
        cpu_synchronize_post_init(cpu);
1989
    }
1990
}
1991

  
1992 1942
struct vm_change_state_entry {
1993 1943
    VMChangeStateHandler *cb;
1994 1944
    void *opaque;
......
2016 1966
    qemu_free (e);
2017 1967
}
2018 1968

  
2019
static void vm_state_notify(int running, int reason)
1969
void vm_state_notify(int running, int reason)
2020 1970
{
2021 1971
    VMChangeStateEntry *e;
2022 1972

  
......
2025 1975
    }
2026 1976
}
2027 1977

  
2028
static void resume_all_vcpus(void);
2029
static void pause_all_vcpus(void);
2030

  
2031 1978
void vm_start(void)
2032 1979
{
2033 1980
    if (!vm_running) {
......
2051 1998
static int reset_requested;
2052 1999
static int shutdown_requested;
2053 2000
static int powerdown_requested;
2054
static int debug_requested;
2001
int debug_requested;
2055 2002
static int vmstop_requested;
2056 2003

  
2057 2004
int qemu_shutdown_requested(void)
......
2089 2036
    return r;
2090 2037
}
2091 2038

  
2092
static void do_vm_stop(int reason)
2093
{
2094
    if (vm_running) {
2095
        cpu_disable_ticks();
2096
        vm_running = 0;
2097
        pause_all_vcpus();
2098
        vm_state_notify(0, reason);
2099
        monitor_protocol_event(QEVENT_STOP, NULL);
2100
    }
2101
}
2102

  
2103 2039
void qemu_register_reset(QEMUResetHandler *func, void *opaque)
2104 2040
{
2105 2041
    QEMUResetEntry *re = qemu_mallocz(sizeof(QEMUResetEntry));
......
2156 2092
    qemu_notify_event();
2157 2093
}
2158 2094

  
2159
static int cpu_can_run(CPUState *env)
2160
{
2161
    if (env->stop)
2162
        return 0;
2163
    if (env->stopped)
2164
        return 0;
2165
    if (!vm_running)
2166
        return 0;
2167
    return 1;
2168
}
2169

  
2170
static int cpu_has_work(CPUState *env)
2171
{
2172
    if (env->stop)
2173
        return 1;
2174
    if (env->stopped)
2175
        return 0;
2176
    if (!env->halted)
2177
        return 1;
2178
    if (qemu_cpu_has_work(env))
2179
        return 1;
2180
    return 0;
2181
}
2182

  
2183
static int tcg_has_work(void)
2184
{
2185
    CPUState *env;
2186

  
2187
    for (env = first_cpu; env != NULL; env = env->next_cpu)
2188
        if (cpu_has_work(env))
2189
            return 1;
2190
    return 0;
2191
}
2192

  
2193
#ifndef _WIN32
2194
static int io_thread_fd = -1;
2195

  
2196
static void qemu_event_increment(void)
2197
{
2198
    /* Write 8 bytes to be compatible with eventfd.  */
2199
    static uint64_t val = 1;
2200
    ssize_t ret;
2201

  
2202
    if (io_thread_fd == -1)
2203
        return;
2204

  
2205
    do {
2206
        ret = write(io_thread_fd, &val, sizeof(val));
2207
    } while (ret < 0 && errno == EINTR);
2208

  
2209
    /* EAGAIN is fine, a read must be pending.  */
2210
    if (ret < 0 && errno != EAGAIN) {
2211
        fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
2212
                strerror(errno));
2213
        exit (1);
2214
    }
2215
}
2216

  
2217
static void qemu_event_read(void *opaque)
2218
{
2219
    int fd = (unsigned long)opaque;
2220
    ssize_t len;
2221
    char buffer[512];
2222

  
2223
    /* Drain the notify pipe.  For eventfd, only 8 bytes will be read.  */
2224
    do {
2225
        len = read(fd, buffer, sizeof(buffer));
2226
    } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
2227
}
2228

  
2229
static int qemu_event_init(void)
2230
{
2231
    int err;
2232
    int fds[2];
2233

  
2234
    err = qemu_eventfd(fds);
2235
    if (err == -1)
2236
        return -errno;
2237

  
2238
    err = fcntl_setfl(fds[0], O_NONBLOCK);
2239
    if (err < 0)
2240
        goto fail;
2241

  
2242
    err = fcntl_setfl(fds[1], O_NONBLOCK);
2243
    if (err < 0)
2244
        goto fail;
2245

  
2246
    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
2247
                         (void *)(unsigned long)fds[0]);
2248

  
2249
    io_thread_fd = fds[1];
2250
    return 0;
2251

  
2252
fail:
2253
    close(fds[0]);
2254
    close(fds[1]);
2255
    return err;
2256
}
2257
#else
2258
HANDLE qemu_event_handle;
2259

  
2260
static void dummy_event_handler(void *opaque)
2261
{
2262
}
2263

  
2264
static int qemu_event_init(void)
2265
{
2266
    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
2267
    if (!qemu_event_handle) {
2268
        fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
2269
        return -1;
2270
    }
2271
    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
2272
    return 0;
2273
}
2274

  
2275
static void qemu_event_increment(void)
2276
{
2277
    if (!SetEvent(qemu_event_handle)) {
2278
        fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
2279
                GetLastError());
2280
        exit (1);
2281
    }
2282
}
2283
#endif
2284

  
2285
#ifndef CONFIG_IOTHREAD
2286
static int qemu_init_main_loop(void)
2287
{
2288
    return qemu_event_init();
2289
}
2290

  
2291
void qemu_init_vcpu(void *_env)
2292
{
2293
    CPUState *env = _env;
2294

  
2295
    env->nr_cores = smp_cores;
2296
    env->nr_threads = smp_threads;
2297
    if (kvm_enabled())
2298
        kvm_init_vcpu(env);
2299
    return;
2300
}
2301

  
2302
int qemu_cpu_self(void *env)
2303
{
2304
    return 1;
2305
}
2306

  
2307
static void resume_all_vcpus(void)
2308
{
2309
}
2310

  
2311
static void pause_all_vcpus(void)
2312
{
2313
}
2314

  
2315
void qemu_cpu_kick(void *env)
2316
{
2317
    return;
2318
}
2319

  
2320
void qemu_notify_event(void)
2321
{
2322
    CPUState *env = cpu_single_env;
2323

  
2324
    qemu_event_increment ();
2325
    if (env) {
2326
        cpu_exit(env);
2327
    }
2328
    if (next_cpu && env != next_cpu) {
2329
	cpu_exit(next_cpu);
2330
    }
2331
}
2332

  
2333
void qemu_mutex_lock_iothread(void) {}
2334
void qemu_mutex_unlock_iothread(void) {}
2335

  
2336
void vm_stop(int reason)
2337
{
2338
    do_vm_stop(reason);
2339
}
2340

  
2341
#else /* CONFIG_IOTHREAD */
2342

  
2343
#include "qemu-thread.h"
2344

  
2345
QemuMutex qemu_global_mutex;
2346
static QemuMutex qemu_fair_mutex;
2347

  
2348
static QemuThread io_thread;
2349

  
2350
static QemuThread *tcg_cpu_thread;
2351
static QemuCond *tcg_halt_cond;
2352

  
2353
static int qemu_system_ready;
2354
/* cpu creation */
2355
static QemuCond qemu_cpu_cond;
2356
/* system init */
2357
static QemuCond qemu_system_cond;
2358
static QemuCond qemu_pause_cond;
2359

  
2360
static void tcg_block_io_signals(void);
2361
static void kvm_block_io_signals(CPUState *env);
2362
static void unblock_io_signals(void);
2363

  
2364
static int qemu_init_main_loop(void)
2365
{
2366
    int ret;
2367

  
2368
    ret = qemu_event_init();
2369
    if (ret)
2370
        return ret;
2371

  
2372
    qemu_cond_init(&qemu_pause_cond);
2373
    qemu_mutex_init(&qemu_fair_mutex);
2374
    qemu_mutex_init(&qemu_global_mutex);
2375
    qemu_mutex_lock(&qemu_global_mutex);
2376

  
2377
    unblock_io_signals();
2378
    qemu_thread_self(&io_thread);
2379

  
2380
    return 0;
2381
}
2382

  
2383
static void qemu_wait_io_event_common(CPUState *env)
2384
{
2385
    if (env->stop) {
2386
        env->stop = 0;
2387
        env->stopped = 1;
2388
        qemu_cond_signal(&qemu_pause_cond);
2389
    }
2390
}
2391

  
2392
static void qemu_wait_io_event(CPUState *env)
2393
{
2394
    while (!tcg_has_work())
2395
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
2396

  
2397
    qemu_mutex_unlock(&qemu_global_mutex);
2398

  
2399
    /*
2400
     * Users of qemu_global_mutex can be starved, having no chance
2401
     * to acquire it since this path will get to it first.
2402
     * So use another lock to provide fairness.
2403
     */
2404
    qemu_mutex_lock(&qemu_fair_mutex);
2405
    qemu_mutex_unlock(&qemu_fair_mutex);
2406

  
2407
    qemu_mutex_lock(&qemu_global_mutex);
2408
    qemu_wait_io_event_common(env);
2409
}
2410

  
2411
static void qemu_kvm_eat_signal(CPUState *env, int timeout)
2412
{
2413
    struct timespec ts;
2414
    int r, e;
2415
    siginfo_t siginfo;
2416
    sigset_t waitset;
2417

  
2418
    ts.tv_sec = timeout / 1000;
2419
    ts.tv_nsec = (timeout % 1000) * 1000000;
2420

  
2421
    sigemptyset(&waitset);
2422
    sigaddset(&waitset, SIG_IPI);
2423

  
2424
    qemu_mutex_unlock(&qemu_global_mutex);
2425
    r = sigtimedwait(&waitset, &siginfo, &ts);
2426
    e = errno;
2427
    qemu_mutex_lock(&qemu_global_mutex);
2428

  
2429
    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
2430
        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
2431
        exit(1);
2432
    }
2433
}
2434

  
2435
static void qemu_kvm_wait_io_event(CPUState *env)
2436
{
2437
    while (!cpu_has_work(env))
2438
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
2439

  
2440
    qemu_kvm_eat_signal(env, 0);
2441
    qemu_wait_io_event_common(env);
2442
}
2443

  
2444
static int qemu_cpu_exec(CPUState *env);
2445

  
2446
static void *kvm_cpu_thread_fn(void *arg)
2447
{
2448
    CPUState *env = arg;
2449

  
2450
    qemu_thread_self(env->thread);
2451
    if (kvm_enabled())
2452
        kvm_init_vcpu(env);
2453

  
2454
    kvm_block_io_signals(env);
2455

  
2456
    /* signal CPU creation */
2457
    qemu_mutex_lock(&qemu_global_mutex);
2458
    env->created = 1;
2459
    qemu_cond_signal(&qemu_cpu_cond);
2460

  
2461
    /* and wait for machine initialization */
2462
    while (!qemu_system_ready)
2463
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
2464

  
2465
    while (1) {
2466
        if (cpu_can_run(env))
2467
            qemu_cpu_exec(env);
2468
        qemu_kvm_wait_io_event(env);
2469
    }
2470

  
2471
    return NULL;
2472
}
2473

  
2474
static bool tcg_cpu_exec(void);
2475

  
2476
static void *tcg_cpu_thread_fn(void *arg)
2477
{
2478
    CPUState *env = arg;
2479

  
2480
    tcg_block_io_signals();
2481
    qemu_thread_self(env->thread);
2482

  
2483
    /* signal CPU creation */
2484
    qemu_mutex_lock(&qemu_global_mutex);
2485
    for (env = first_cpu; env != NULL; env = env->next_cpu)
2486
        env->created = 1;
2487
    qemu_cond_signal(&qemu_cpu_cond);
2488

  
2489
    /* and wait for machine initialization */
2490
    while (!qemu_system_ready)
2491
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
2492

  
2493
    while (1) {
2494
        tcg_cpu_exec();
2495
        qemu_wait_io_event(cur_cpu);
2496
    }
2497

  
2498
    return NULL;
2499
}
2500

  
2501
void qemu_cpu_kick(void *_env)
2502
{
2503
    CPUState *env = _env;
2504
    qemu_cond_broadcast(env->halt_cond);
2505
    if (kvm_enabled())
2506
        qemu_thread_signal(env->thread, SIG_IPI);
2507
}
2508

  
2509
int qemu_cpu_self(void *_env)
2510
{
2511
    CPUState *env = _env;
2512
    QemuThread this;
2513
 
2514
    qemu_thread_self(&this);
2515
 
2516
    return qemu_thread_equal(&this, env->thread);
2517
}
2518

  
2519
static void cpu_signal(int sig)
2520
{
2521
    if (cpu_single_env)
2522
        cpu_exit(cpu_single_env);
2523
}
2524

  
2525
static void tcg_block_io_signals(void)
2526
{
2527
    sigset_t set;
2528
    struct sigaction sigact;
2529

  
2530
    sigemptyset(&set);
2531
    sigaddset(&set, SIGUSR2);
2532
    sigaddset(&set, SIGIO);
2533
    sigaddset(&set, SIGALRM);
2534
    sigaddset(&set, SIGCHLD);
2535
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2536

  
2537
    sigemptyset(&set);
2538
    sigaddset(&set, SIG_IPI);
2539
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
2540

  
2541
    memset(&sigact, 0, sizeof(sigact));
2542
    sigact.sa_handler = cpu_signal;
2543
    sigaction(SIG_IPI, &sigact, NULL);
2544
}
2545

  
2546
static void dummy_signal(int sig)
2547
{
2548
}
2549

  
2550
static void kvm_block_io_signals(CPUState *env)
2551
{
2552
    int r;
2553
    sigset_t set;
2554
    struct sigaction sigact;
2555

  
2556
    sigemptyset(&set);
2557
    sigaddset(&set, SIGUSR2);
2558
    sigaddset(&set, SIGIO);
2559
    sigaddset(&set, SIGALRM);
2560
    sigaddset(&set, SIGCHLD);
2561
    sigaddset(&set, SIG_IPI);
2562
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2563

  
2564
    pthread_sigmask(SIG_BLOCK, NULL, &set);
2565
    sigdelset(&set, SIG_IPI);
2566

  
2567
    memset(&sigact, 0, sizeof(sigact));
2568
    sigact.sa_handler = dummy_signal;
2569
    sigaction(SIG_IPI, &sigact, NULL);
2570

  
2571
    r = kvm_set_signal_mask(env, &set);
2572
    if (r) {
2573
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
2574
        exit(1);
2575
    }
2576
}
2577

  
2578
static void unblock_io_signals(void)
2579
{
2580
    sigset_t set;
2581

  
2582
    sigemptyset(&set);
2583
    sigaddset(&set, SIGUSR2);
2584
    sigaddset(&set, SIGIO);
2585
    sigaddset(&set, SIGALRM);
2586
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
2587

  
2588
    sigemptyset(&set);
2589
    sigaddset(&set, SIG_IPI);
2590
    pthread_sigmask(SIG_BLOCK, &set, NULL);
2591
}
2592

  
2593
static void qemu_signal_lock(unsigned int msecs)
2594
{
2595
    qemu_mutex_lock(&qemu_fair_mutex);
2596

  
2597
    while (qemu_mutex_trylock(&qemu_global_mutex)) {
2598
        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
2599
        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
2600
            break;
2601
    }
2602
    qemu_mutex_unlock(&qemu_fair_mutex);
2603
}
2604

  
2605
void qemu_mutex_lock_iothread(void)
2606
{
2607
    if (kvm_enabled()) {
2608
        qemu_mutex_lock(&qemu_fair_mutex);
2609
        qemu_mutex_lock(&qemu_global_mutex);
2610
        qemu_mutex_unlock(&qemu_fair_mutex);
2611
    } else
2612
        qemu_signal_lock(100);
2613
}
2614

  
2615
void qemu_mutex_unlock_iothread(void)
2616
{
2617
    qemu_mutex_unlock(&qemu_global_mutex);
2618
}
2619

  
2620
static int all_vcpus_paused(void)
2621
{
2622
    CPUState *penv = first_cpu;
2623

  
2624
    while (penv) {
2625
        if (!penv->stopped)
2626
            return 0;
2627
        penv = (CPUState *)penv->next_cpu;
2628
    }
2629

  
2630
    return 1;
2631
}
2632

  
2633
static void pause_all_vcpus(void)
2634
{
2635
    CPUState *penv = first_cpu;
2636

  
2637
    while (penv) {
2638
        penv->stop = 1;
2639
        qemu_thread_signal(penv->thread, SIG_IPI);
2640
        qemu_cpu_kick(penv);
2641
        penv = (CPUState *)penv->next_cpu;
2642
    }
2643

  
2644
    while (!all_vcpus_paused()) {
2645
        qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
2646
        penv = first_cpu;
2647
        while (penv) {
2648
            qemu_thread_signal(penv->thread, SIG_IPI);
2649
            penv = (CPUState *)penv->next_cpu;
2650
        }
2651
    }
2652
}
2653

  
2654
static void resume_all_vcpus(void)
2655
{
2656
    CPUState *penv = first_cpu;
2657

  
2658
    while (penv) {
2659
        penv->stop = 0;
2660
        penv->stopped = 0;
2661
        qemu_thread_signal(penv->thread, SIG_IPI);
2662
        qemu_cpu_kick(penv);
2663
        penv = (CPUState *)penv->next_cpu;
2664
    }
2665
}
2666

  
2667
static void tcg_init_vcpu(void *_env)
2668
{
2669
    CPUState *env = _env;
2670
    /* share a single thread for all cpus with TCG */
2671
    if (!tcg_cpu_thread) {
2672
        env->thread = qemu_mallocz(sizeof(QemuThread));
2673
        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
2674
        qemu_cond_init(env->halt_cond);
2675
        qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
2676
        while (env->created == 0)
2677
            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
2678
        tcg_cpu_thread = env->thread;
2679
        tcg_halt_cond = env->halt_cond;
2680
    } else {
2681
        env->thread = tcg_cpu_thread;
2682
        env->halt_cond = tcg_halt_cond;
2683
    }
2684
}
2685

  
2686
static void kvm_start_vcpu(CPUState *env)
2687
{
2688
    env->thread = qemu_mallocz(sizeof(QemuThread));
2689
    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
2690
    qemu_cond_init(env->halt_cond);
2691
    qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
2692
    while (env->created == 0)
2693
        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
2694
}
2695

  
2696
void qemu_init_vcpu(void *_env)
2697
{
2698
    CPUState *env = _env;
2699

  
2700
    env->nr_cores = smp_cores;
2701
    env->nr_threads = smp_threads;
2702
    if (kvm_enabled())
2703
        kvm_start_vcpu(env);
2704
    else
2705
        tcg_init_vcpu(env);
2706
}
2707

  
2708
void qemu_notify_event(void)
2709
{
2710
    qemu_event_increment();
2711
}
2712

  
2713
static void qemu_system_vmstop_request(int reason)
2714
{
2715
    vmstop_requested = reason;
2716
    qemu_notify_event();
2717
}
2718

  
2719
void vm_stop(int reason)
2720
{
2721
    QemuThread me;
2722
    qemu_thread_self(&me);
2723

  
2724
    if (!qemu_thread_equal(&me, &io_thread)) {
2725
        qemu_system_vmstop_request(reason);
2726
        /*
2727
         * FIXME: should not return to device code in case
2728
         * vm_stop() has been requested.
2729
         */
2730
        if (cpu_single_env) {
2731
            cpu_exit(cpu_single_env);
2732
            cpu_single_env->stop = 1;
2733
        }
2734
        return;
2735
    }
2736
    do_vm_stop(reason);
2737
}
2738

  
2739
#endif
2740

  
2741

  
2742 2095
#ifdef _WIN32
2743 2096
static void host_main_loop_wait(int *timeout)
2744 2097
{
......
2865 2218

  
2866 2219
}
2867 2220

  
2868
static int qemu_cpu_exec(CPUState *env)
2869
{
2870
    int ret;
2871
#ifdef CONFIG_PROFILER
2872
    int64_t ti;
2873
#endif
2874

  
2875
#ifdef CONFIG_PROFILER
2876
    ti = profile_getclock();
2877
#endif
2878
    if (use_icount) {
2879
        int64_t count;
2880
        int decr;
2881
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
2882
        env->icount_decr.u16.low = 0;
2883
        env->icount_extra = 0;
2884
        count = qemu_icount_round (qemu_next_deadline());
2885
        qemu_icount += count;
2886
        decr = (count > 0xffff) ? 0xffff : count;
2887
        count -= decr;
2888
        env->icount_decr.u16.low = decr;
2889
        env->icount_extra = count;
2890
    }
2891
    ret = cpu_exec(env);
2892
#ifdef CONFIG_PROFILER
2893
    qemu_time += profile_getclock() - ti;
2894
#endif
2895
    if (use_icount) {
2896
        /* Fold pending instructions back into the
2897
           instruction counter, and clear the interrupt flag.  */
2898
        qemu_icount -= (env->icount_decr.u16.low
2899
                        + env->icount_extra);
2900
        env->icount_decr.u32 = 0;
2901
        env->icount_extra = 0;
2902
    }
2903
    return ret;
2904
}
2905

  
2906
static bool tcg_cpu_exec(void)
2907
{
2908
    int ret = 0;
2909

  
2910
    if (next_cpu == NULL)
2911
        next_cpu = first_cpu;
2912
    for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
2913
        CPUState *env = cur_cpu = next_cpu;
2914

  
2915
        qemu_clock_enable(vm_clock,
2916
                          (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
2917

  
2918
        if (qemu_alarm_pending())
2919
            break;
2920
        if (cpu_can_run(env))
2921
            ret = qemu_cpu_exec(env);
2922
        else if (env->stop)
2923
            break;
2924

  
2925
        if (ret == EXCP_DEBUG) {
2926
            gdb_set_stop_cpu(env);
2927
            debug_requested = EXCP_DEBUG;
2928
            break;
2929
        }
2930
    }
2931
    return tcg_has_work();
2932
}
2933

  
2934
static void set_numa_modes(void)
2935
{
2936
    CPUState *env;
2937
    int i;
2938

  
2939
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
2940
        for (i = 0; i < nb_numa_nodes; i++) {
2941
            if (node_cpumask[i] & (1 << env->cpu_index)) {
2942
                env->numa_node = i;
2943
            }
2944
        }
2945
    }
2946
}
2947

  
2948
static void set_cpu_log(const char *optarg)
2949
{
2950
    int mask;
2951
    const CPULogItem *item;
2952

  
2953
    mask = cpu_str_to_log_mask(optarg);
2954
    if (!mask) {
2955
        printf("Log items (comma separated):\n");
2956
        for (item = cpu_log_items; item->mask != 0; item++) {
2957
            printf("%-10s %s\n", item->name, item->help);
2958
        }
2959
        exit(1);
2960
    }
2961
    cpu_set_log(mask);
2962
}
2963

  
2964 2221
static int vm_can_run(void)
2965 2222
{
2966 2223
    if (powerdown_requested)

Also available in: Unified diff