Revision ab33fcda

b/cpus.c
155 155
    return true;
156 156
}
157 157

  
158
static bool all_cpu_threads_idle(void)
158
bool all_cpu_threads_idle(void)
159 159
{
160 160
    CPUState *env;
161 161

  
......
739 739
    CPUState *env;
740 740

  
741 741
    while (all_cpu_threads_idle()) {
742
       /* Start accounting real time to the virtual clock if the CPUs
743
          are idle.  */
744
        qemu_clock_warp(vm_clock);
742 745
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
743 746
    }
744 747

  
......
1073 1076
{
1074 1077
    int r;
1075 1078

  
1079
    /* Account partial waits to the vm_clock.  */
1080
    qemu_clock_warp(vm_clock);
1081

  
1076 1082
    if (next_cpu == NULL) {
1077 1083
        next_cpu = first_cpu;
1078 1084
    }
b/qemu-common.h
298 298
void qemu_cpu_kick(void *env);
299 299
void qemu_cpu_kick_self(void);
300 300
int qemu_cpu_is_self(void *env);
301
bool all_cpu_threads_idle(void);
301 302

  
302 303
/* work queue */
303 304
struct qemu_work_item {
b/qemu-timer.c
153 153
struct QEMUClock {
154 154
    int type;
155 155
    int enabled;
156

  
157
    QEMUTimer *warp_timer;
156 158
};
157 159

  
158 160
struct QEMUTimer {
......
386 388
    clock->enabled = enabled;
387 389
}
388 390

  
391
static int64_t vm_clock_warp_start;
392

  
393
static void icount_warp_rt(void *opaque)
394
{
395
    if (vm_clock_warp_start == -1) {
396
        return;
397
    }
398

  
399
    if (vm_running) {
400
        int64_t clock = qemu_get_clock_ns(rt_clock);
401
        int64_t warp_delta = clock - vm_clock_warp_start;
402
        if (use_icount == 1) {
403
            qemu_icount_bias += warp_delta;
404
        } else {
405
            /*
406
             * In adaptive mode, do not let the vm_clock run too
407
             * far ahead of real time.
408
             */
409
            int64_t cur_time = cpu_get_clock();
410
            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
411
            int64_t delta = cur_time - cur_icount;
412
            qemu_icount_bias += MIN(warp_delta, delta);
413
        }
414
        if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
415
                               qemu_get_clock_ns(vm_clock))) {
416
            qemu_notify_event();
417
        }
418
    }
419
    vm_clock_warp_start = -1;
420
}
421

  
422
void qemu_clock_warp(QEMUClock *clock)
423
{
424
    int64_t deadline;
425

  
426
    if (!clock->warp_timer) {
427
        return;
428
    }
429

  
430
    /*
431
     * There are too many global variables to make the "warp" behavior
432
     * applicable to other clocks.  But a clock argument removes the
433
     * need for if statements all over the place.
434
     */
435
    assert(clock == vm_clock);
436

  
437
    /*
438
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
439
     * ensures that the deadline for the timer is computed correctly below.
440
     * This also makes sure that the insn counter is synchronized before the
441
     * CPU starts running, in case the CPU is woken by an event other than
442
     * the earliest vm_clock timer.
443
     */
444
    icount_warp_rt(NULL);
445
    if (!all_cpu_threads_idle() || !active_timers[clock->type]) {
446
        qemu_del_timer(clock->warp_timer);
447
        return;
448
    }
449

  
450
    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
451
    deadline = qemu_next_deadline();
452
    if (deadline > 0) {
453
        /*
454
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
455
         * sleep.  Otherwise, the CPU might be waiting for a future timer
456
         * interrupt to wake it up, but the interrupt never comes because
457
         * the vCPU isn't running any insns and thus doesn't advance the
458
         * vm_clock.
459
         *
460
         * An extreme solution for this problem would be to never let VCPUs
461
         * sleep in icount mode if there is a pending vm_clock timer; rather
462
         * time could just advance to the next vm_clock event.  Instead, we
463
         * do stop VCPUs and only advance vm_clock after some "real" time,
464
         * (related to the time left until the next event) has passed.  This
465
         * rt_clock timer will do this.  This avoids that the warps are too
466
         * visible externally---for example, you will not be sending network
467
         * packets continously instead of every 100ms.
468
         */
469
        qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline);
470
    } else {
471
        qemu_notify_event();
472
    }
473
}
474

  
389 475
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
390 476
                          QEMUTimerCB *cb, void *opaque)
391 477
{
......
454 540
            qemu_rearm_alarm_timer(alarm_timer);
455 541
        }
456 542
        /* Interrupt execution to force deadline recalculation.  */
457
        if (use_icount)
543
        qemu_clock_warp(ts->clock);
544
        if (use_icount) {
458 545
            qemu_notify_event();
546
        }
459 547
    }
460 548
}
461 549

  
......
576 664
    if (!option)
577 665
        return;
578 666

  
667
#ifdef CONFIG_IOTHREAD
668
    vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
669
#endif
670

  
579 671
    if (strcmp(option, "auto") != 0) {
580 672
        icount_time_shift = strtol(option, NULL, 0);
581 673
        use_icount = 1;
b/qemu-timer.h
39 39

  
40 40
int64_t qemu_get_clock_ns(QEMUClock *clock);
41 41
void qemu_clock_enable(QEMUClock *clock, int enabled);
42
void qemu_clock_warp(QEMUClock *clock);
42 43

  
43 44
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
44 45
                          QEMUTimerCB *cb, void *opaque);

Also available in: Unified diff