Revision db1a4972 vl.c
b/vl.c | ||
---|---|---|
59 | 59 |
#ifdef __linux__ |
60 | 60 |
#include <pty.h> |
61 | 61 |
#include <malloc.h> |
62 |
#include <linux/rtc.h> |
|
63 | 62 |
#include <sys/prctl.h> |
64 | 63 |
|
65 |
/* For the benefit of older linux systems which don't supply it, |
|
66 |
we use a local copy of hpet.h. */ |
|
67 |
/* #include <linux/hpet.h> */ |
|
68 |
#include "hpet.h" |
|
69 |
|
|
70 | 64 |
#include <linux/ppdev.h> |
71 | 65 |
#include <linux/parport.h> |
72 | 66 |
#endif |
... | ... | |
101 | 95 |
|
102 | 96 |
#ifdef _WIN32 |
103 | 97 |
#include <windows.h> |
104 |
#include <mmsystem.h> |
|
105 | 98 |
#endif |
106 | 99 |
|
107 | 100 |
#ifdef CONFIG_SDL |
... | ... | |
258 | 251 |
|
259 | 252 |
static CPUState *cur_cpu; |
260 | 253 |
static CPUState *next_cpu; |
261 |
/* Conversion factor from emulated instructions to virtual clock ticks. */ |
|
262 |
static int icount_time_shift; |
|
263 |
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */ |
|
264 |
#define MAX_ICOUNT_SHIFT 10 |
|
265 |
/* Compensate for varying guest execution speed. */ |
|
266 |
static int64_t qemu_icount_bias; |
|
267 |
static QEMUTimer *icount_rt_timer; |
|
268 |
static QEMUTimer *icount_vm_timer; |
|
269 | 254 |
static QEMUTimer *nographic_timer; |
270 | 255 |
|
271 | 256 |
uint8_t qemu_uuid[16]; |
... | ... | |
421 | 406 |
return res.ll; |
422 | 407 |
} |
423 | 408 |
|
424 |
static int64_t get_clock_realtime(void) |
|
425 |
{ |
|
426 |
struct timeval tv; |
|
427 |
|
|
428 |
gettimeofday(&tv, NULL); |
|
429 |
return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); |
|
430 |
} |
|
431 |
|
|
432 |
#ifdef WIN32 |
|
433 |
|
|
434 |
static int64_t clock_freq; |
|
435 |
|
|
436 |
static void init_get_clock(void) |
|
437 |
{ |
|
438 |
LARGE_INTEGER freq; |
|
439 |
int ret; |
|
440 |
ret = QueryPerformanceFrequency(&freq); |
|
441 |
if (ret == 0) { |
|
442 |
fprintf(stderr, "Could not calibrate ticks\n"); |
|
443 |
exit(1); |
|
444 |
} |
|
445 |
clock_freq = freq.QuadPart; |
|
446 |
} |
|
447 |
|
|
448 |
static int64_t get_clock(void) |
|
449 |
{ |
|
450 |
LARGE_INTEGER ti; |
|
451 |
QueryPerformanceCounter(&ti); |
|
452 |
return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); |
|
453 |
} |
|
454 |
|
|
455 |
#else |
|
456 |
|
|
457 |
static int use_rt_clock; |
|
458 |
|
|
459 |
static void init_get_clock(void) |
|
460 |
{ |
|
461 |
use_rt_clock = 0; |
|
462 |
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \ |
|
463 |
|| defined(__DragonFly__) || defined(__FreeBSD_kernel__) |
|
464 |
{ |
|
465 |
struct timespec ts; |
|
466 |
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { |
|
467 |
use_rt_clock = 1; |
|
468 |
} |
|
469 |
} |
|
470 |
#endif |
|
471 |
} |
|
472 |
|
|
473 |
static int64_t get_clock(void) |
|
474 |
{ |
|
475 |
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \ |
|
476 |
|| defined(__DragonFly__) || defined(__FreeBSD_kernel__) |
|
477 |
if (use_rt_clock) { |
|
478 |
struct timespec ts; |
|
479 |
clock_gettime(CLOCK_MONOTONIC, &ts); |
|
480 |
return ts.tv_sec * 1000000000LL + ts.tv_nsec; |
|
481 |
} else |
|
482 |
#endif |
|
483 |
{ |
|
484 |
/* XXX: using gettimeofday leads to problems if the date |
|
485 |
changes, so it should be avoided. */ |
|
486 |
return get_clock_realtime(); |
|
487 |
} |
|
488 |
} |
|
489 |
#endif |
|
490 |
|
|
491 |
/* Return the virtual CPU time, based on the instruction counter. */ |
|
492 |
static int64_t cpu_get_icount(void) |
|
493 |
{ |
|
494 |
int64_t icount; |
|
495 |
CPUState *env = cpu_single_env;; |
|
496 |
icount = qemu_icount; |
|
497 |
if (env) { |
|
498 |
if (!can_do_io(env)) |
|
499 |
fprintf(stderr, "Bad clock read\n"); |
|
500 |
icount -= (env->icount_decr.u16.low + env->icount_extra); |
|
501 |
} |
|
502 |
return qemu_icount_bias + (icount << icount_time_shift); |
|
503 |
} |
|
504 |
|
|
505 |
/***********************************************************/ |
|
506 |
/* guest cycle counter */ |
|
507 |
|
|
508 |
typedef struct TimersState { |
|
509 |
int64_t cpu_ticks_prev; |
|
510 |
int64_t cpu_ticks_offset; |
|
511 |
int64_t cpu_clock_offset; |
|
512 |
int32_t cpu_ticks_enabled; |
|
513 |
int64_t dummy; |
|
514 |
} TimersState; |
|
515 |
|
|
516 |
TimersState timers_state; |
|
517 |
|
|
518 |
/* return the host CPU cycle counter and handle stop/restart */ |
|
519 |
int64_t cpu_get_ticks(void) |
|
520 |
{ |
|
521 |
if (use_icount) { |
|
522 |
return cpu_get_icount(); |
|
523 |
} |
|
524 |
if (!timers_state.cpu_ticks_enabled) { |
|
525 |
return timers_state.cpu_ticks_offset; |
|
526 |
} else { |
|
527 |
int64_t ticks; |
|
528 |
ticks = cpu_get_real_ticks(); |
|
529 |
if (timers_state.cpu_ticks_prev > ticks) { |
|
530 |
/* Note: non increasing ticks may happen if the host uses |
|
531 |
software suspend */ |
|
532 |
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; |
|
533 |
} |
|
534 |
timers_state.cpu_ticks_prev = ticks; |
|
535 |
return ticks + timers_state.cpu_ticks_offset; |
|
536 |
} |
|
537 |
} |
|
538 |
|
|
539 |
/* return the host CPU monotonic timer and handle stop/restart */ |
|
540 |
static int64_t cpu_get_clock(void) |
|
541 |
{ |
|
542 |
int64_t ti; |
|
543 |
if (!timers_state.cpu_ticks_enabled) { |
|
544 |
return timers_state.cpu_clock_offset; |
|
545 |
} else { |
|
546 |
ti = get_clock(); |
|
547 |
return ti + timers_state.cpu_clock_offset; |
|
548 |
} |
|
549 |
} |
|
550 |
|
|
551 |
#ifndef CONFIG_IOTHREAD |
|
552 |
static int64_t qemu_icount_delta(void) |
|
553 |
{ |
|
554 |
if (!use_icount) { |
|
555 |
return 5000 * (int64_t) 1000000; |
|
556 |
} else if (use_icount == 1) { |
|
557 |
/* When not using an adaptive execution frequency |
|
558 |
we tend to get badly out of sync with real time, |
|
559 |
so just delay for a reasonable amount of time. */ |
|
560 |
return 0; |
|
561 |
} else { |
|
562 |
return cpu_get_icount() - cpu_get_clock(); |
|
563 |
} |
|
564 |
} |
|
565 |
#endif |
|
566 |
|
|
567 |
/* enable cpu_get_ticks() */ |
|
568 |
void cpu_enable_ticks(void) |
|
569 |
{ |
|
570 |
if (!timers_state.cpu_ticks_enabled) { |
|
571 |
timers_state.cpu_ticks_offset -= cpu_get_real_ticks(); |
|
572 |
timers_state.cpu_clock_offset -= get_clock(); |
|
573 |
timers_state.cpu_ticks_enabled = 1; |
|
574 |
} |
|
575 |
} |
|
576 |
|
|
577 |
/* disable cpu_get_ticks() : the clock is stopped. You must not call |
|
578 |
cpu_get_ticks() after that. */ |
|
579 |
void cpu_disable_ticks(void) |
|
580 |
{ |
|
581 |
if (timers_state.cpu_ticks_enabled) { |
|
582 |
timers_state.cpu_ticks_offset = cpu_get_ticks(); |
|
583 |
timers_state.cpu_clock_offset = cpu_get_clock(); |
|
584 |
timers_state.cpu_ticks_enabled = 0; |
|
585 |
} |
|
586 |
} |
|
587 |
|
|
588 |
/***********************************************************/ |
|
589 |
/* timers */ |
|
590 |
|
|
591 |
#define QEMU_CLOCK_REALTIME 0 |
|
592 |
#define QEMU_CLOCK_VIRTUAL 1 |
|
593 |
#define QEMU_CLOCK_HOST 2 |
|
594 |
|
|
595 |
struct QEMUClock { |
|
596 |
int type; |
|
597 |
int enabled; |
|
598 |
/* XXX: add frequency */ |
|
599 |
}; |
|
600 |
|
|
601 |
struct QEMUTimer { |
|
602 |
QEMUClock *clock; |
|
603 |
int64_t expire_time; |
|
604 |
QEMUTimerCB *cb; |
|
605 |
void *opaque; |
|
606 |
struct QEMUTimer *next; |
|
607 |
}; |
|
608 |
|
|
609 |
struct qemu_alarm_timer { |
|
610 |
char const *name; |
|
611 |
int (*start)(struct qemu_alarm_timer *t); |
|
612 |
void (*stop)(struct qemu_alarm_timer *t); |
|
613 |
void (*rearm)(struct qemu_alarm_timer *t); |
|
614 |
void *priv; |
|
615 |
|
|
616 |
char expired; |
|
617 |
char pending; |
|
618 |
}; |
|
619 |
|
|
620 |
static struct qemu_alarm_timer *alarm_timer; |
|
621 |
static int qemu_calculate_timeout(void); |
|
622 |
|
|
623 |
static inline int qemu_alarm_pending(void) |
|
624 |
{ |
|
625 |
return alarm_timer->pending; |
|
626 |
} |
|
627 |
|
|
628 |
static inline int alarm_has_dynticks(struct qemu_alarm_timer *t) |
|
629 |
{ |
|
630 |
return !!t->rearm; |
|
631 |
} |
|
632 |
|
|
633 |
static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t) |
|
634 |
{ |
|
635 |
if (!alarm_has_dynticks(t)) |
|
636 |
return; |
|
637 |
|
|
638 |
t->rearm(t); |
|
639 |
} |
|
640 |
|
|
641 |
/* TODO: MIN_TIMER_REARM_US should be optimized */ |
|
642 |
#define MIN_TIMER_REARM_US 250 |
|
643 |
|
|
644 |
#ifdef _WIN32 |
|
645 |
|
|
646 |
struct qemu_alarm_win32 { |
|
647 |
MMRESULT timerId; |
|
648 |
unsigned int period; |
|
649 |
} alarm_win32_data = {0, 0}; |
|
650 |
|
|
651 |
static int win32_start_timer(struct qemu_alarm_timer *t); |
|
652 |
static void win32_stop_timer(struct qemu_alarm_timer *t); |
|
653 |
static void win32_rearm_timer(struct qemu_alarm_timer *t); |
|
654 |
|
|
655 |
#else |
|
656 |
|
|
657 |
static int unix_start_timer(struct qemu_alarm_timer *t); |
|
658 |
static void unix_stop_timer(struct qemu_alarm_timer *t); |
|
659 |
|
|
660 |
#ifdef __linux__ |
|
661 |
|
|
662 |
static int dynticks_start_timer(struct qemu_alarm_timer *t); |
|
663 |
static void dynticks_stop_timer(struct qemu_alarm_timer *t); |
|
664 |
static void dynticks_rearm_timer(struct qemu_alarm_timer *t); |
|
665 |
|
|
666 |
static int hpet_start_timer(struct qemu_alarm_timer *t); |
|
667 |
static void hpet_stop_timer(struct qemu_alarm_timer *t); |
|
668 |
|
|
669 |
static int rtc_start_timer(struct qemu_alarm_timer *t); |
|
670 |
static void rtc_stop_timer(struct qemu_alarm_timer *t); |
|
671 |
|
|
672 |
#endif /* __linux__ */ |
|
673 |
|
|
674 |
#endif /* _WIN32 */ |
|
675 |
|
|
676 |
/* Correlation between real and virtual time is always going to be |
|
677 |
fairly approximate, so ignore small variation. |
|
678 |
When the guest is idle real and virtual time will be aligned in |
|
679 |
the IO wait loop. */ |
|
680 |
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10) |
|
681 |
|
|
682 |
static void icount_adjust(void) |
|
683 |
{ |
|
684 |
int64_t cur_time; |
|
685 |
int64_t cur_icount; |
|
686 |
int64_t delta; |
|
687 |
static int64_t last_delta; |
|
688 |
/* If the VM is not running, then do nothing. */ |
|
689 |
if (!vm_running) |
|
690 |
return; |
|
691 |
|
|
692 |
cur_time = cpu_get_clock(); |
|
693 |
cur_icount = qemu_get_clock(vm_clock); |
|
694 |
delta = cur_icount - cur_time; |
|
695 |
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ |
|
696 |
if (delta > 0 |
|
697 |
&& last_delta + ICOUNT_WOBBLE < delta * 2 |
|
698 |
&& icount_time_shift > 0) { |
|
699 |
/* The guest is getting too far ahead. Slow time down. */ |
|
700 |
icount_time_shift--; |
|
701 |
} |
|
702 |
if (delta < 0 |
|
703 |
&& last_delta - ICOUNT_WOBBLE > delta * 2 |
|
704 |
&& icount_time_shift < MAX_ICOUNT_SHIFT) { |
|
705 |
/* The guest is getting too far behind. Speed time up. */ |
|
706 |
icount_time_shift++; |
|
707 |
} |
|
708 |
last_delta = delta; |
|
709 |
qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift); |
|
710 |
} |
|
711 |
|
|
712 |
static void icount_adjust_rt(void * opaque) |
|
713 |
{ |
|
714 |
qemu_mod_timer(icount_rt_timer, |
|
715 |
qemu_get_clock(rt_clock) + 1000); |
|
716 |
icount_adjust(); |
|
717 |
} |
|
718 |
|
|
719 |
static void icount_adjust_vm(void * opaque) |
|
720 |
{ |
|
721 |
qemu_mod_timer(icount_vm_timer, |
|
722 |
qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); |
|
723 |
icount_adjust(); |
|
724 |
} |
|
725 |
|
|
726 |
static int64_t qemu_icount_round(int64_t count) |
|
727 |
{ |
|
728 |
return (count + (1 << icount_time_shift) - 1) >> icount_time_shift; |
|
729 |
} |
|
730 |
|
|
731 |
static struct qemu_alarm_timer alarm_timers[] = { |
|
732 |
#ifndef _WIN32 |
|
733 |
#ifdef __linux__ |
|
734 |
{"dynticks", dynticks_start_timer, |
|
735 |
dynticks_stop_timer, dynticks_rearm_timer, NULL}, |
|
736 |
/* HPET - if available - is preferred */ |
|
737 |
{"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL}, |
|
738 |
/* ...otherwise try RTC */ |
|
739 |
{"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL}, |
|
740 |
#endif |
|
741 |
{"unix", unix_start_timer, unix_stop_timer, NULL, NULL}, |
|
742 |
#else |
|
743 |
{"dynticks", win32_start_timer, |
|
744 |
win32_stop_timer, win32_rearm_timer, &alarm_win32_data}, |
|
745 |
{"win32", win32_start_timer, |
|
746 |
win32_stop_timer, NULL, &alarm_win32_data}, |
|
747 |
#endif |
|
748 |
{NULL, } |
|
749 |
}; |
|
750 |
|
|
751 |
static void show_available_alarms(void) |
|
752 |
{ |
|
753 |
int i; |
|
754 |
|
|
755 |
printf("Available alarm timers, in order of precedence:\n"); |
|
756 |
for (i = 0; alarm_timers[i].name; i++) |
|
757 |
printf("%s\n", alarm_timers[i].name); |
|
758 |
} |
|
759 |
|
|
760 |
static void configure_alarms(char const *opt) |
|
761 |
{ |
|
762 |
int i; |
|
763 |
int cur = 0; |
|
764 |
int count = ARRAY_SIZE(alarm_timers) - 1; |
|
765 |
char *arg; |
|
766 |
char *name; |
|
767 |
struct qemu_alarm_timer tmp; |
|
768 |
|
|
769 |
if (!strcmp(opt, "?")) { |
|
770 |
show_available_alarms(); |
|
771 |
exit(0); |
|
772 |
} |
|
773 |
|
|
774 |
arg = qemu_strdup(opt); |
|
775 |
|
|
776 |
/* Reorder the array */ |
|
777 |
name = strtok(arg, ","); |
|
778 |
while (name) { |
|
779 |
for (i = 0; i < count && alarm_timers[i].name; i++) { |
|
780 |
if (!strcmp(alarm_timers[i].name, name)) |
|
781 |
break; |
|
782 |
} |
|
783 |
|
|
784 |
if (i == count) { |
|
785 |
fprintf(stderr, "Unknown clock %s\n", name); |
|
786 |
goto next; |
|
787 |
} |
|
788 |
|
|
789 |
if (i < cur) |
|
790 |
/* Ignore */ |
|
791 |
goto next; |
|
792 |
|
|
793 |
/* Swap */ |
|
794 |
tmp = alarm_timers[i]; |
|
795 |
alarm_timers[i] = alarm_timers[cur]; |
|
796 |
alarm_timers[cur] = tmp; |
|
797 |
|
|
798 |
cur++; |
|
799 |
next: |
|
800 |
name = strtok(NULL, ","); |
|
801 |
} |
|
802 |
|
|
803 |
qemu_free(arg); |
|
804 |
|
|
805 |
if (cur) { |
|
806 |
/* Disable remaining timers */ |
|
807 |
for (i = cur; i < count; i++) |
|
808 |
alarm_timers[i].name = NULL; |
|
809 |
} else { |
|
810 |
show_available_alarms(); |
|
811 |
exit(1); |
|
812 |
} |
|
813 |
} |
|
814 |
|
|
815 |
#define QEMU_NUM_CLOCKS 3 |
|
816 |
|
|
817 |
QEMUClock *rt_clock; |
|
818 |
QEMUClock *vm_clock; |
|
819 |
QEMUClock *host_clock; |
|
820 |
|
|
821 |
static QEMUTimer *active_timers[QEMU_NUM_CLOCKS]; |
|
822 |
|
|
823 |
static QEMUClock *qemu_new_clock(int type) |
|
824 |
{ |
|
825 |
QEMUClock *clock; |
|
826 |
clock = qemu_mallocz(sizeof(QEMUClock)); |
|
827 |
clock->type = type; |
|
828 |
clock->enabled = 1; |
|
829 |
return clock; |
|
830 |
} |
|
831 |
|
|
832 |
static void qemu_clock_enable(QEMUClock *clock, int enabled) |
|
833 |
{ |
|
834 |
clock->enabled = enabled; |
|
835 |
} |
|
836 |
|
|
837 |
QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque) |
|
838 |
{ |
|
839 |
QEMUTimer *ts; |
|
840 |
|
|
841 |
ts = qemu_mallocz(sizeof(QEMUTimer)); |
|
842 |
ts->clock = clock; |
|
843 |
ts->cb = cb; |
|
844 |
ts->opaque = opaque; |
|
845 |
return ts; |
|
846 |
} |
|
847 |
|
|
848 |
void qemu_free_timer(QEMUTimer *ts) |
|
849 |
{ |
|
850 |
qemu_free(ts); |
|
851 |
} |
|
852 |
|
|
853 |
/* stop a timer, but do not dealloc it */ |
|
854 |
void qemu_del_timer(QEMUTimer *ts) |
|
855 |
{ |
|
856 |
QEMUTimer **pt, *t; |
|
857 |
|
|
858 |
/* NOTE: this code must be signal safe because |
|
859 |
qemu_timer_expired() can be called from a signal. */ |
|
860 |
pt = &active_timers[ts->clock->type]; |
|
861 |
for(;;) { |
|
862 |
t = *pt; |
|
863 |
if (!t) |
|
864 |
break; |
|
865 |
if (t == ts) { |
|
866 |
*pt = t->next; |
|
867 |
break; |
|
868 |
} |
|
869 |
pt = &t->next; |
|
870 |
} |
|
871 |
} |
|
872 |
|
|
873 |
/* modify the current timer so that it will be fired when current_time |
|
874 |
>= expire_time. The corresponding callback will be called. */ |
|
875 |
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time) |
|
876 |
{ |
|
877 |
QEMUTimer **pt, *t; |
|
878 |
|
|
879 |
qemu_del_timer(ts); |
|
880 |
|
|
881 |
/* add the timer in the sorted list */ |
|
882 |
/* NOTE: this code must be signal safe because |
|
883 |
qemu_timer_expired() can be called from a signal. */ |
|
884 |
pt = &active_timers[ts->clock->type]; |
|
885 |
for(;;) { |
|
886 |
t = *pt; |
|
887 |
if (!t) |
|
888 |
break; |
|
889 |
if (t->expire_time > expire_time) |
|
890 |
break; |
|
891 |
pt = &t->next; |
|
892 |
} |
|
893 |
ts->expire_time = expire_time; |
|
894 |
ts->next = *pt; |
|
895 |
*pt = ts; |
|
896 |
|
|
897 |
/* Rearm if necessary */ |
|
898 |
if (pt == &active_timers[ts->clock->type]) { |
|
899 |
if (!alarm_timer->pending) { |
|
900 |
qemu_rearm_alarm_timer(alarm_timer); |
|
901 |
} |
|
902 |
/* Interrupt execution to force deadline recalculation. */ |
|
903 |
if (use_icount) |
|
904 |
qemu_notify_event(); |
|
905 |
} |
|
906 |
} |
|
907 |
|
|
908 |
int qemu_timer_pending(QEMUTimer *ts) |
|
909 |
{ |
|
910 |
QEMUTimer *t; |
|
911 |
for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) { |
|
912 |
if (t == ts) |
|
913 |
return 1; |
|
914 |
} |
|
915 |
return 0; |
|
916 |
} |
|
917 |
|
|
918 |
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time) |
|
919 |
{ |
|
920 |
if (!timer_head) |
|
921 |
return 0; |
|
922 |
return (timer_head->expire_time <= current_time); |
|
923 |
} |
|
924 |
|
|
925 |
static void qemu_run_timers(QEMUClock *clock) |
|
926 |
{ |
|
927 |
QEMUTimer **ptimer_head, *ts; |
|
928 |
int64_t current_time; |
|
929 |
|
|
930 |
if (!clock->enabled) |
|
931 |
return; |
|
932 |
|
|
933 |
current_time = qemu_get_clock (clock); |
|
934 |
ptimer_head = &active_timers[clock->type]; |
|
935 |
for(;;) { |
|
936 |
ts = *ptimer_head; |
|
937 |
if (!ts || ts->expire_time > current_time) |
|
938 |
break; |
|
939 |
/* remove timer from the list before calling the callback */ |
|
940 |
*ptimer_head = ts->next; |
|
941 |
ts->next = NULL; |
|
942 |
|
|
943 |
/* run the callback (the timer list can be modified) */ |
|
944 |
ts->cb(ts->opaque); |
|
945 |
} |
|
946 |
} |
|
947 |
|
|
948 |
int64_t qemu_get_clock(QEMUClock *clock) |
|
949 |
{ |
|
950 |
switch(clock->type) { |
|
951 |
case QEMU_CLOCK_REALTIME: |
|
952 |
return get_clock() / 1000000; |
|
953 |
default: |
|
954 |
case QEMU_CLOCK_VIRTUAL: |
|
955 |
if (use_icount) { |
|
956 |
return cpu_get_icount(); |
|
957 |
} else { |
|
958 |
return cpu_get_clock(); |
|
959 |
} |
|
960 |
case QEMU_CLOCK_HOST: |
|
961 |
return get_clock_realtime(); |
|
962 |
} |
|
963 |
} |
|
964 |
|
|
965 |
int64_t qemu_get_clock_ns(QEMUClock *clock) |
|
966 |
{ |
|
967 |
switch(clock->type) { |
|
968 |
case QEMU_CLOCK_REALTIME: |
|
969 |
return get_clock(); |
|
970 |
default: |
|
971 |
case QEMU_CLOCK_VIRTUAL: |
|
972 |
if (use_icount) { |
|
973 |
return cpu_get_icount(); |
|
974 |
} else { |
|
975 |
return cpu_get_clock(); |
|
976 |
} |
|
977 |
case QEMU_CLOCK_HOST: |
|
978 |
return get_clock_realtime(); |
|
979 |
} |
|
980 |
} |
|
981 |
|
|
982 |
static void init_clocks(void) |
|
983 |
{ |
|
984 |
init_get_clock(); |
|
985 |
rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME); |
|
986 |
vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL); |
|
987 |
host_clock = qemu_new_clock(QEMU_CLOCK_HOST); |
|
988 |
|
|
989 |
rtc_clock = host_clock; |
|
990 |
} |
|
991 |
|
|
992 |
/* save a timer */ |
|
993 |
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts) |
|
994 |
{ |
|
995 |
uint64_t expire_time; |
|
996 |
|
|
997 |
if (qemu_timer_pending(ts)) { |
|
998 |
expire_time = ts->expire_time; |
|
999 |
} else { |
|
1000 |
expire_time = -1; |
|
1001 |
} |
|
1002 |
qemu_put_be64(f, expire_time); |
|
1003 |
} |
|
1004 |
|
|
1005 |
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts) |
|
1006 |
{ |
|
1007 |
uint64_t expire_time; |
|
1008 |
|
|
1009 |
expire_time = qemu_get_be64(f); |
|
1010 |
if (expire_time != -1) { |
|
1011 |
qemu_mod_timer(ts, expire_time); |
|
1012 |
} else { |
|
1013 |
qemu_del_timer(ts); |
|
1014 |
} |
|
1015 |
} |
|
1016 |
|
|
1017 |
static const VMStateDescription vmstate_timers = { |
|
1018 |
.name = "timer", |
|
1019 |
.version_id = 2, |
|
1020 |
.minimum_version_id = 1, |
|
1021 |
.minimum_version_id_old = 1, |
|
1022 |
.fields = (VMStateField []) { |
|
1023 |
VMSTATE_INT64(cpu_ticks_offset, TimersState), |
|
1024 |
VMSTATE_INT64(dummy, TimersState), |
|
1025 |
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), |
|
1026 |
VMSTATE_END_OF_LIST() |
|
1027 |
} |
|
1028 |
}; |
|
1029 |
|
|
1030 |
static void configure_icount(const char *option) |
|
1031 |
{ |
|
1032 |
vmstate_register(0, &vmstate_timers, &timers_state); |
|
1033 |
if (!option) |
|
1034 |
return; |
|
1035 |
|
|
1036 |
if (strcmp(option, "auto") != 0) { |
|
1037 |
icount_time_shift = strtol(option, NULL, 0); |
|
1038 |
use_icount = 1; |
|
1039 |
return; |
|
1040 |
} |
|
1041 |
|
|
1042 |
use_icount = 2; |
|
1043 |
|
|
1044 |
/* 125MIPS seems a reasonable initial guess at the guest speed. |
|
1045 |
It will be corrected fairly quickly anyway. */ |
|
1046 |
icount_time_shift = 3; |
|
1047 |
|
|
1048 |
/* Have both realtime and virtual time triggers for speed adjustment. |
|
1049 |
The realtime trigger catches emulated time passing too slowly, |
|
1050 |
the virtual time trigger catches emulated time passing too fast. |
|
1051 |
Realtime triggers occur even when idle, so use them less frequently |
|
1052 |
than VM triggers. */ |
|
1053 |
icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL); |
|
1054 |
qemu_mod_timer(icount_rt_timer, |
|
1055 |
qemu_get_clock(rt_clock) + 1000); |
|
1056 |
icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL); |
|
1057 |
qemu_mod_timer(icount_vm_timer, |
|
1058 |
qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); |
|
1059 |
} |
|
1060 |
|
|
1061 |
static void qemu_run_all_timers(void) |
|
1062 |
{ |
|
1063 |
/* rearm timer, if not periodic */ |
|
1064 |
if (alarm_timer->expired) { |
|
1065 |
alarm_timer->expired = 0; |
|
1066 |
qemu_rearm_alarm_timer(alarm_timer); |
|
1067 |
} |
|
1068 |
|
|
1069 |
alarm_timer->pending = 0; |
|
1070 |
|
|
1071 |
/* vm time timers */ |
|
1072 |
if (vm_running) { |
|
1073 |
qemu_run_timers(vm_clock); |
|
1074 |
} |
|
1075 |
|
|
1076 |
qemu_run_timers(rt_clock); |
|
1077 |
qemu_run_timers(host_clock); |
|
1078 |
} |
|
1079 |
|
|
1080 |
#ifdef _WIN32 |
|
1081 |
static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg, |
|
1082 |
DWORD_PTR dwUser, DWORD_PTR dw1, |
|
1083 |
DWORD_PTR dw2) |
|
1084 |
#else |
|
1085 |
static void host_alarm_handler(int host_signum) |
|
1086 |
#endif |
|
1087 |
{ |
|
1088 |
struct qemu_alarm_timer *t = alarm_timer; |
|
1089 |
if (!t) |
|
1090 |
return; |
|
1091 |
|
|
1092 |
#if 0 |
|
1093 |
#define DISP_FREQ 1000 |
|
1094 |
{ |
|
1095 |
static int64_t delta_min = INT64_MAX; |
|
1096 |
static int64_t delta_max, delta_cum, last_clock, delta, ti; |
|
1097 |
static int count; |
|
1098 |
ti = qemu_get_clock(vm_clock); |
|
1099 |
if (last_clock != 0) { |
|
1100 |
delta = ti - last_clock; |
|
1101 |
if (delta < delta_min) |
|
1102 |
delta_min = delta; |
|
1103 |
if (delta > delta_max) |
|
1104 |
delta_max = delta; |
|
1105 |
delta_cum += delta; |
|
1106 |
if (++count == DISP_FREQ) { |
|
1107 |
printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n", |
|
1108 |
muldiv64(delta_min, 1000000, get_ticks_per_sec()), |
|
1109 |
muldiv64(delta_max, 1000000, get_ticks_per_sec()), |
|
1110 |
muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()), |
|
1111 |
(double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ)); |
|
1112 |
count = 0; |
|
1113 |
delta_min = INT64_MAX; |
|
1114 |
delta_max = 0; |
|
1115 |
delta_cum = 0; |
|
1116 |
} |
|
1117 |
} |
|
1118 |
last_clock = ti; |
|
1119 |
} |
|
1120 |
#endif |
|
1121 |
if (alarm_has_dynticks(t) || |
|
1122 |
(!use_icount && |
|
1123 |
qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL], |
|
1124 |
qemu_get_clock(vm_clock))) || |
|
1125 |
qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME], |
|
1126 |
qemu_get_clock(rt_clock)) || |
|
1127 |
qemu_timer_expired(active_timers[QEMU_CLOCK_HOST], |
|
1128 |
qemu_get_clock(host_clock))) { |
|
1129 |
|
|
1130 |
t->expired = alarm_has_dynticks(t); |
|
1131 |
t->pending = 1; |
|
1132 |
qemu_notify_event(); |
|
1133 |
} |
|
1134 |
} |
|
1135 |
|
|
1136 |
static int64_t qemu_next_deadline(void) |
|
1137 |
{ |
|
1138 |
/* To avoid problems with overflow limit this to 2^32. */ |
|
1139 |
int64_t delta = INT32_MAX; |
|
1140 |
|
|
1141 |
if (active_timers[QEMU_CLOCK_VIRTUAL]) { |
|
1142 |
delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time - |
|
1143 |
qemu_get_clock(vm_clock); |
|
1144 |
} |
|
1145 |
if (active_timers[QEMU_CLOCK_HOST]) { |
|
1146 |
int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time - |
|
1147 |
qemu_get_clock(host_clock); |
|
1148 |
if (hdelta < delta) |
|
1149 |
delta = hdelta; |
|
1150 |
} |
|
1151 |
|
|
1152 |
if (delta < 0) |
|
1153 |
delta = 0; |
|
1154 |
|
|
1155 |
return delta; |
|
1156 |
} |
|
1157 |
|
|
1158 |
#if defined(__linux__) |
|
1159 |
static uint64_t qemu_next_deadline_dyntick(void) |
|
1160 |
{ |
|
1161 |
int64_t delta; |
|
1162 |
int64_t rtdelta; |
|
1163 |
|
|
1164 |
if (use_icount) |
|
1165 |
delta = INT32_MAX; |
|
1166 |
else |
|
1167 |
delta = (qemu_next_deadline() + 999) / 1000; |
|
1168 |
|
|
1169 |
if (active_timers[QEMU_CLOCK_REALTIME]) { |
|
1170 |
rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time - |
|
1171 |
qemu_get_clock(rt_clock))*1000; |
|
1172 |
if (rtdelta < delta) |
|
1173 |
delta = rtdelta; |
|
1174 |
} |
|
1175 |
|
|
1176 |
if (delta < MIN_TIMER_REARM_US) |
|
1177 |
delta = MIN_TIMER_REARM_US; |
|
1178 |
|
|
1179 |
return delta; |
|
1180 |
} |
|
1181 |
#endif |
|
1182 |
|
|
1183 |
#ifndef _WIN32 |
|
1184 |
|
|
1185 |
/* Sets a specific flag */ |
|
1186 |
static int fcntl_setfl(int fd, int flag) |
|
1187 |
{ |
|
1188 |
int flags; |
|
1189 |
|
|
1190 |
flags = fcntl(fd, F_GETFL); |
|
1191 |
if (flags == -1) |
|
1192 |
return -errno; |
|
1193 |
|
|
1194 |
if (fcntl(fd, F_SETFL, flags | flag) == -1) |
|
1195 |
return -errno; |
|
1196 |
|
|
1197 |
return 0; |
|
1198 |
} |
|
1199 |
|
|
1200 |
#if defined(__linux__) |
|
1201 |
|
|
1202 |
#define RTC_FREQ 1024 |
|
1203 |
|
|
1204 |
static void enable_sigio_timer(int fd) |
|
1205 |
{ |
|
1206 |
struct sigaction act; |
|
1207 |
|
|
1208 |
/* timer signal */ |
|
1209 |
sigfillset(&act.sa_mask); |
|
1210 |
act.sa_flags = 0; |
|
1211 |
act.sa_handler = host_alarm_handler; |
|
1212 |
|
|
1213 |
sigaction(SIGIO, &act, NULL); |
|
1214 |
fcntl_setfl(fd, O_ASYNC); |
|
1215 |
fcntl(fd, F_SETOWN, getpid()); |
|
1216 |
} |
|
1217 |
|
|
1218 |
static int hpet_start_timer(struct qemu_alarm_timer *t) |
|
1219 |
{ |
|
1220 |
struct hpet_info info; |
|
1221 |
int r, fd; |
|
1222 |
|
|
1223 |
fd = qemu_open("/dev/hpet", O_RDONLY); |
|
1224 |
if (fd < 0) |
|
1225 |
return -1; |
|
1226 |
|
|
1227 |
/* Set frequency */ |
|
1228 |
r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ); |
|
1229 |
if (r < 0) { |
|
1230 |
fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n" |
|
1231 |
"error, but for better emulation accuracy type:\n" |
|
1232 |
"'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n"); |
|
1233 |
goto fail; |
|
1234 |
} |
|
1235 |
|
|
1236 |
/* Check capabilities */ |
|
1237 |
r = ioctl(fd, HPET_INFO, &info); |
|
1238 |
if (r < 0) |
|
1239 |
goto fail; |
|
1240 |
|
|
1241 |
/* Enable periodic mode */ |
|
1242 |
r = ioctl(fd, HPET_EPI, 0); |
|
1243 |
if (info.hi_flags && (r < 0)) |
|
1244 |
goto fail; |
|
1245 |
|
|
1246 |
/* Enable interrupt */ |
|
1247 |
r = ioctl(fd, HPET_IE_ON, 0); |
|
1248 |
if (r < 0) |
|
1249 |
goto fail; |
|
1250 |
|
|
1251 |
enable_sigio_timer(fd); |
|
1252 |
t->priv = (void *)(long)fd; |
|
1253 |
|
|
1254 |
return 0; |
|
1255 |
fail: |
|
1256 |
close(fd); |
|
1257 |
return -1; |
|
1258 |
} |
|
1259 |
|
|
1260 |
static void hpet_stop_timer(struct qemu_alarm_timer *t) |
|
1261 |
{ |
|
1262 |
int fd = (long)t->priv; |
|
1263 |
|
|
1264 |
close(fd); |
|
1265 |
} |
|
1266 |
|
|
1267 |
static int rtc_start_timer(struct qemu_alarm_timer *t) |
|
1268 |
{ |
|
1269 |
int rtc_fd; |
|
1270 |
unsigned long current_rtc_freq = 0; |
|
1271 |
|
|
1272 |
TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY)); |
|
1273 |
if (rtc_fd < 0) |
|
1274 |
return -1; |
|
1275 |
ioctl(rtc_fd, RTC_IRQP_READ, ¤t_rtc_freq); |
|
1276 |
if (current_rtc_freq != RTC_FREQ && |
|
1277 |
ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) { |
|
1278 |
fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n" |
|
1279 |
"error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n" |
|
1280 |
"type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n"); |
|
1281 |
goto fail; |
|
1282 |
} |
|
1283 |
if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) { |
|
1284 |
fail: |
|
1285 |
close(rtc_fd); |
|
1286 |
return -1; |
|
1287 |
} |
|
1288 |
|
|
1289 |
enable_sigio_timer(rtc_fd); |
|
1290 |
|
|
1291 |
t->priv = (void *)(long)rtc_fd; |
|
1292 |
|
|
1293 |
return 0; |
|
1294 |
} |
|
1295 |
|
|
1296 |
static void rtc_stop_timer(struct qemu_alarm_timer *t) |
|
1297 |
{ |
|
1298 |
int rtc_fd = (long)t->priv; |
|
1299 |
|
|
1300 |
close(rtc_fd); |
|
1301 |
} |
|
1302 |
|
|
1303 |
static int dynticks_start_timer(struct qemu_alarm_timer *t) |
|
1304 |
{ |
|
1305 |
struct sigevent ev; |
|
1306 |
timer_t host_timer; |
|
1307 |
struct sigaction act; |
|
1308 |
|
|
1309 |
sigfillset(&act.sa_mask); |
|
1310 |
act.sa_flags = 0; |
|
1311 |
act.sa_handler = host_alarm_handler; |
|
1312 |
|
|
1313 |
sigaction(SIGALRM, &act, NULL); |
|
1314 |
|
|
1315 |
/* |
|
1316 |
* Initialize ev struct to 0 to avoid valgrind complaining |
|
1317 |
* about uninitialized data in timer_create call |
|
1318 |
*/ |
|
1319 |
memset(&ev, 0, sizeof(ev)); |
|
1320 |
ev.sigev_value.sival_int = 0; |
|
1321 |
ev.sigev_notify = SIGEV_SIGNAL; |
|
1322 |
ev.sigev_signo = SIGALRM; |
|
1323 |
|
|
1324 |
if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) { |
|
1325 |
perror("timer_create"); |
|
1326 |
|
|
1327 |
/* disable dynticks */ |
|
1328 |
fprintf(stderr, "Dynamic Ticks disabled\n"); |
|
1329 |
|
|
1330 |
return -1; |
|
1331 |
} |
|
1332 |
|
|
1333 |
t->priv = (void *)(long)host_timer; |
|
1334 |
|
|
1335 |
return 0; |
|
1336 |
} |
|
1337 |
|
|
1338 |
static void dynticks_stop_timer(struct qemu_alarm_timer *t) |
|
1339 |
{ |
|
1340 |
timer_t host_timer = (timer_t)(long)t->priv; |
|
1341 |
|
|
1342 |
timer_delete(host_timer); |
|
1343 |
} |
|
1344 |
|
|
1345 |
static void dynticks_rearm_timer(struct qemu_alarm_timer *t) |
|
1346 |
{ |
|
1347 |
timer_t host_timer = (timer_t)(long)t->priv; |
|
1348 |
struct itimerspec timeout; |
|
1349 |
int64_t nearest_delta_us = INT64_MAX; |
|
1350 |
int64_t current_us; |
|
1351 |
|
|
1352 |
assert(alarm_has_dynticks(t)); |
|
1353 |
if (!active_timers[QEMU_CLOCK_REALTIME] && |
|
1354 |
!active_timers[QEMU_CLOCK_VIRTUAL] && |
|
1355 |
!active_timers[QEMU_CLOCK_HOST]) |
|
1356 |
return; |
|
1357 |
|
|
1358 |
nearest_delta_us = qemu_next_deadline_dyntick(); |
|
1359 |
|
|
1360 |
/* check whether a timer is already running */ |
|
1361 |
if (timer_gettime(host_timer, &timeout)) { |
|
1362 |
perror("gettime"); |
|
1363 |
fprintf(stderr, "Internal timer error: aborting\n"); |
|
1364 |
exit(1); |
|
1365 |
} |
|
1366 |
current_us = timeout.it_value.tv_sec * 1000000 + timeout.it_value.tv_nsec/1000; |
|
1367 |
if (current_us && current_us <= nearest_delta_us) |
|
1368 |
return; |
|
1369 |
|
|
1370 |
timeout.it_interval.tv_sec = 0; |
|
1371 |
timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */ |
|
1372 |
timeout.it_value.tv_sec = nearest_delta_us / 1000000; |
|
1373 |
timeout.it_value.tv_nsec = (nearest_delta_us % 1000000) * 1000; |
|
1374 |
if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) { |
|
1375 |
perror("settime"); |
|
1376 |
fprintf(stderr, "Internal timer error: aborting\n"); |
|
1377 |
exit(1); |
|
1378 |
} |
|
1379 |
} |
|
1380 |
|
|
1381 |
#endif /* defined(__linux__) */ |
|
1382 |
|
|
1383 |
static int unix_start_timer(struct qemu_alarm_timer *t) |
|
1384 |
{ |
|
1385 |
struct sigaction act; |
|
1386 |
struct itimerval itv; |
|
1387 |
int err; |
|
1388 |
|
|
1389 |
/* timer signal */ |
|
1390 |
sigfillset(&act.sa_mask); |
|
1391 |
act.sa_flags = 0; |
|
1392 |
act.sa_handler = host_alarm_handler; |
|
1393 |
|
|
1394 |
sigaction(SIGALRM, &act, NULL); |
|
1395 |
|
|
1396 |
itv.it_interval.tv_sec = 0; |
|
1397 |
/* for i386 kernel 2.6 to get 1 ms */ |
|
1398 |
itv.it_interval.tv_usec = 999; |
|
1399 |
itv.it_value.tv_sec = 0; |
|
1400 |
itv.it_value.tv_usec = 10 * 1000; |
|
1401 |
|
|
1402 |
err = setitimer(ITIMER_REAL, &itv, NULL); |
|
1403 |
if (err) |
|
1404 |
return -1; |
|
1405 |
|
|
1406 |
return 0; |
|
1407 |
} |
|
1408 |
|
|
1409 |
static void unix_stop_timer(struct qemu_alarm_timer *t) |
|
1410 |
{ |
|
1411 |
struct itimerval itv; |
|
1412 |
|
|
1413 |
memset(&itv, 0, sizeof(itv)); |
|
1414 |
setitimer(ITIMER_REAL, &itv, NULL); |
|
1415 |
} |
|
1416 |
|
|
1417 |
#endif /* !defined(_WIN32) */ |
|
1418 |
|
|
1419 |
|
|
1420 |
#ifdef _WIN32 |
|
1421 |
|
|
1422 |
static int win32_start_timer(struct qemu_alarm_timer *t) |
|
1423 |
{ |
|
1424 |
TIMECAPS tc; |
|
1425 |
struct qemu_alarm_win32 *data = t->priv; |
|
1426 |
UINT flags; |
|
1427 |
|
|
1428 |
memset(&tc, 0, sizeof(tc)); |
|
1429 |
timeGetDevCaps(&tc, sizeof(tc)); |
|
1430 |
|
|
1431 |
data->period = tc.wPeriodMin; |
|
1432 |
timeBeginPeriod(data->period); |
|
1433 |
|
|
1434 |
flags = TIME_CALLBACK_FUNCTION; |
|
1435 |
if (alarm_has_dynticks(t)) |
|
1436 |
flags |= TIME_ONESHOT; |
|
1437 |
else |
|
1438 |
flags |= TIME_PERIODIC; |
|
1439 |
|
|
1440 |
data->timerId = timeSetEvent(1, // interval (ms) |
|
1441 |
data->period, // resolution |
|
1442 |
host_alarm_handler, // function |
|
1443 |
(DWORD)t, // parameter |
|
1444 |
flags); |
|
1445 |
|
|
1446 |
if (!data->timerId) { |
|
1447 |
fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n", |
|
1448 |
GetLastError()); |
|
1449 |
timeEndPeriod(data->period); |
|
1450 |
return -1; |
|
1451 |
} |
|
1452 |
|
|
1453 |
return 0; |
|
1454 |
} |
|
1455 |
|
|
1456 |
static void win32_stop_timer(struct qemu_alarm_timer *t) |
|
1457 |
{ |
|
1458 |
struct qemu_alarm_win32 *data = t->priv; |
|
1459 |
|
|
1460 |
timeKillEvent(data->timerId); |
|
1461 |
timeEndPeriod(data->period); |
|
1462 |
} |
|
1463 |
|
|
1464 |
static void win32_rearm_timer(struct qemu_alarm_timer *t) |
|
1465 |
{ |
|
1466 |
struct qemu_alarm_win32 *data = t->priv; |
|
1467 |
|
|
1468 |
assert(alarm_has_dynticks(t)); |
|
1469 |
if (!active_timers[QEMU_CLOCK_REALTIME] && |
|
1470 |
!active_timers[QEMU_CLOCK_VIRTUAL] && |
|
1471 |
!active_timers[QEMU_CLOCK_HOST]) |
|
1472 |
return; |
|
1473 |
|
|
1474 |
timeKillEvent(data->timerId); |
|
1475 |
|
|
1476 |
data->timerId = timeSetEvent(1, |
|
1477 |
data->period, |
|
1478 |
host_alarm_handler, |
|
1479 |
(DWORD)t, |
|
1480 |
TIME_ONESHOT | TIME_CALLBACK_FUNCTION); |
|
1481 |
|
|
1482 |
if (!data->timerId) { |
|
1483 |
fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n", |
|
1484 |
GetLastError()); |
|
1485 |
|
|
1486 |
timeEndPeriod(data->period); |
|
1487 |
exit(1); |
|
1488 |
} |
|
1489 |
} |
|
1490 |
|
|
1491 |
#endif /* _WIN32 */ |
|
1492 |
|
|
1493 |
static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason) |
|
1494 |
{ |
|
1495 |
if (running) |
|
1496 |
qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque); |
|
1497 |
} |
|
1498 |
|
|
1499 |
static int init_timer_alarm(void) |
|
1500 |
{ |
|
1501 |
struct qemu_alarm_timer *t = NULL; |
|
1502 |
int i, err = -1; |
|
1503 |
|
|
1504 |
for (i = 0; alarm_timers[i].name; i++) { |
|
1505 |
t = &alarm_timers[i]; |
|
1506 |
|
|
1507 |
err = t->start(t); |
|
1508 |
if (!err) |
|
1509 |
break; |
|
1510 |
} |
|
1511 |
|
|
1512 |
if (err) { |
|
1513 |
err = -ENOENT; |
|
1514 |
goto fail; |
|
1515 |
} |
|
1516 |
|
|
1517 |
/* first event is at time 0 */ |
|
1518 |
t->pending = 1; |
|
1519 |
alarm_timer = t; |
|
1520 |
qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t); |
|
1521 |
|
|
1522 |
return 0; |
|
1523 |
|
|
1524 |
fail: |
|
1525 |
return err; |
|
1526 |
} |
|
1527 |
|
|
1528 |
static void quit_timers(void) |
|
1529 |
{ |
|
1530 |
struct qemu_alarm_timer *t = alarm_timer; |
|
1531 |
alarm_timer = NULL; |
|
1532 |
t->stop(t); |
|
1533 |
} |
|
1534 |
|
|
1535 | 409 |
/***********************************************************/ |
1536 | 410 |
/* host time/date access */ |
1537 | 411 |
void qemu_get_timedate(struct tm *tm, int offset) |
... | ... | |
4063 | 2937 |
return tcg_has_work(); |
4064 | 2938 |
} |
4065 | 2939 |
|
4066 |
static int qemu_calculate_timeout(void) |
|
4067 |
{ |
|
4068 |
#ifndef CONFIG_IOTHREAD |
|
4069 |
int timeout; |
|
4070 |
|
|
4071 |
if (!vm_running) |
|
4072 |
timeout = 5000; |
|
4073 |
else { |
|
4074 |
/* XXX: use timeout computed from timers */ |
|
4075 |
int64_t add; |
|
4076 |
int64_t delta; |
|
4077 |
/* Advance virtual time to the next event. */ |
|
4078 |
delta = qemu_icount_delta(); |
|
4079 |
if (delta > 0) { |
|
4080 |
/* If virtual time is ahead of real time then just |
|
4081 |
wait for IO. */ |
|
4082 |
timeout = (delta + 999999) / 1000000; |
|
4083 |
} else { |
|
4084 |
/* Wait for either IO to occur or the next |
|
4085 |
timer event. */ |
|
4086 |
add = qemu_next_deadline(); |
|
4087 |
/* We advance the timer before checking for IO. |
|
4088 |
Limit the amount we advance so that early IO |
|
4089 |
activity won't get the guest too far ahead. */ |
|
4090 |
if (add > 10000000) |
|
4091 |
add = 10000000; |
|
4092 |
delta += add; |
|
4093 |
qemu_icount += qemu_icount_round (add); |
|
4094 |
timeout = delta / 1000000; |
|
4095 |
if (timeout < 0) |
|
4096 |
timeout = 0; |
|
4097 |
} |
|
4098 |
} |
|
4099 |
|
|
4100 |
return timeout; |
|
4101 |
#else /* CONFIG_IOTHREAD */ |
|
4102 |
return 1000; |
|
4103 |
#endif |
|
4104 |
} |
|
4105 |
|
|
4106 | 2940 |
static int vm_can_run(void) |
4107 | 2941 |
{ |
4108 | 2942 |
if (powerdown_requested) |
Also available in: Unified diff