Statistics
| Branch: | Revision:

root / qemu-timer.h @ 8135aeed

History | View | Annotate | Download (7 kB)

1 87ecb68b pbrook
#ifndef QEMU_TIMER_H
2 87ecb68b pbrook
#define QEMU_TIMER_H
3 87ecb68b pbrook
4 29e922b6 Blue Swirl
#include "qemu-common.h"
5 29e922b6 Blue Swirl
6 87ecb68b pbrook
/* timers */
7 87ecb68b pbrook
8 87ecb68b pbrook
typedef struct QEMUClock QEMUClock;
9 87ecb68b pbrook
typedef void QEMUTimerCB(void *opaque);
10 87ecb68b pbrook
11 87ecb68b pbrook
/* The real time clock should be used only for stuff which does not
12 87ecb68b pbrook
   change the virtual machine state, as it is run even if the virtual
13 87ecb68b pbrook
   machine is stopped. The real time clock has a frequency of 1000
14 87ecb68b pbrook
   Hz. */
15 87ecb68b pbrook
extern QEMUClock *rt_clock;
16 87ecb68b pbrook
17 87ecb68b pbrook
/* The virtual clock is only run during the emulation. It is stopped
18 87ecb68b pbrook
   when the virtual machine is stopped. Virtual timers use a high
19 87ecb68b pbrook
   precision clock, usually cpu cycles (use ticks_per_sec). */
20 87ecb68b pbrook
extern QEMUClock *vm_clock;
21 87ecb68b pbrook
22 21d5d12b Jan Kiszka
/* The host clock should be use for device models that emulate accurate
23 21d5d12b Jan Kiszka
   real time sources. It will continue to run when the virtual machine
24 21d5d12b Jan Kiszka
   is suspended, and it will reflect system time changes the host may
25 21d5d12b Jan Kiszka
   undergo (e.g. due to NTP). The host clock has the same precision as
26 21d5d12b Jan Kiszka
   the virtual clock. */
27 21d5d12b Jan Kiszka
extern QEMUClock *host_clock;
28 21d5d12b Jan Kiszka
29 87ecb68b pbrook
int64_t qemu_get_clock(QEMUClock *clock);
30 41c872b6 Paolo Bonzini
int64_t qemu_get_clock_ns(QEMUClock *clock);
31 db1a4972 Paolo Bonzini
void qemu_clock_enable(QEMUClock *clock, int enabled);
32 87ecb68b pbrook
33 87ecb68b pbrook
QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque);
34 87ecb68b pbrook
void qemu_free_timer(QEMUTimer *ts);
35 87ecb68b pbrook
void qemu_del_timer(QEMUTimer *ts);
36 87ecb68b pbrook
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
37 87ecb68b pbrook
int qemu_timer_pending(QEMUTimer *ts);
38 2430ffe4 Stefano Stabellini
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
39 87ecb68b pbrook
40 db1a4972 Paolo Bonzini
void qemu_run_all_timers(void);
41 db1a4972 Paolo Bonzini
int qemu_alarm_pending(void);
42 db1a4972 Paolo Bonzini
int64_t qemu_next_deadline(void);
43 db1a4972 Paolo Bonzini
void configure_alarms(char const *opt);
44 db1a4972 Paolo Bonzini
void configure_icount(const char *option);
45 db1a4972 Paolo Bonzini
int qemu_calculate_timeout(void);
46 db1a4972 Paolo Bonzini
void init_clocks(void);
47 db1a4972 Paolo Bonzini
int init_timer_alarm(void);
48 db1a4972 Paolo Bonzini
void quit_timers(void);
49 db1a4972 Paolo Bonzini
50 274dfed8 Anthony Liguori
static inline int64_t get_ticks_per_sec(void)
51 274dfed8 Anthony Liguori
{
52 274dfed8 Anthony Liguori
    return 1000000000LL;
53 274dfed8 Anthony Liguori
}
54 87ecb68b pbrook
55 db1a4972 Paolo Bonzini
56 87ecb68b pbrook
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
57 87ecb68b pbrook
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
58 87ecb68b pbrook
59 87ecb68b pbrook
/* ptimer.c */
60 87ecb68b pbrook
typedef struct ptimer_state ptimer_state;
61 87ecb68b pbrook
typedef void (*ptimer_cb)(void *opaque);
62 87ecb68b pbrook
63 87ecb68b pbrook
ptimer_state *ptimer_init(QEMUBH *bh);
64 87ecb68b pbrook
void ptimer_set_period(ptimer_state *s, int64_t period);
65 87ecb68b pbrook
void ptimer_set_freq(ptimer_state *s, uint32_t freq);
66 87ecb68b pbrook
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
67 87ecb68b pbrook
uint64_t ptimer_get_count(ptimer_state *s);
68 87ecb68b pbrook
void ptimer_set_count(ptimer_state *s, uint64_t count);
69 87ecb68b pbrook
void ptimer_run(ptimer_state *s, int oneshot);
70 87ecb68b pbrook
void ptimer_stop(ptimer_state *s);
71 87ecb68b pbrook
void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
72 87ecb68b pbrook
void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
73 87ecb68b pbrook
74 29e922b6 Blue Swirl
/* icount */
75 29e922b6 Blue Swirl
int64_t qemu_icount_round(int64_t count);
76 29e922b6 Blue Swirl
extern int64_t qemu_icount;
77 29e922b6 Blue Swirl
extern int use_icount;
78 29e922b6 Blue Swirl
extern int icount_time_shift;
79 29e922b6 Blue Swirl
extern int64_t qemu_icount_bias;
80 29e922b6 Blue Swirl
int64_t cpu_get_icount(void);
81 29e922b6 Blue Swirl
82 29e922b6 Blue Swirl
/*******************************************/
83 29e922b6 Blue Swirl
/* host CPU ticks (if available) */
84 29e922b6 Blue Swirl
85 29e922b6 Blue Swirl
#if defined(_ARCH_PPC)
86 29e922b6 Blue Swirl
87 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
88 29e922b6 Blue Swirl
{
89 29e922b6 Blue Swirl
    int64_t retval;
90 29e922b6 Blue Swirl
#ifdef _ARCH_PPC64
91 29e922b6 Blue Swirl
    /* This reads timebase in one 64bit go and includes Cell workaround from:
92 29e922b6 Blue Swirl
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
93 29e922b6 Blue Swirl
    */
94 29e922b6 Blue Swirl
    __asm__ __volatile__ ("mftb    %0\n\t"
95 29e922b6 Blue Swirl
                          "cmpwi   %0,0\n\t"
96 29e922b6 Blue Swirl
                          "beq-    $-8"
97 29e922b6 Blue Swirl
                          : "=r" (retval));
98 29e922b6 Blue Swirl
#else
99 29e922b6 Blue Swirl
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
100 29e922b6 Blue Swirl
    unsigned long junk;
101 4a9590f3 Alexander Graf
    __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
102 4a9590f3 Alexander Graf
                          "mfspr   %L0,268\n\t" /* mftb */
103 4a9590f3 Alexander Graf
                          "mfspr   %0,269\n\t"  /* mftbu */
104 29e922b6 Blue Swirl
                          "cmpw    %0,%1\n\t"
105 29e922b6 Blue Swirl
                          "bne     $-16"
106 29e922b6 Blue Swirl
                          : "=r" (retval), "=r" (junk));
107 29e922b6 Blue Swirl
#endif
108 29e922b6 Blue Swirl
    return retval;
109 29e922b6 Blue Swirl
}
110 29e922b6 Blue Swirl
111 29e922b6 Blue Swirl
#elif defined(__i386__)
112 29e922b6 Blue Swirl
113 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
114 29e922b6 Blue Swirl
{
115 29e922b6 Blue Swirl
    int64_t val;
116 29e922b6 Blue Swirl
    asm volatile ("rdtsc" : "=A" (val));
117 29e922b6 Blue Swirl
    return val;
118 29e922b6 Blue Swirl
}
119 29e922b6 Blue Swirl
120 29e922b6 Blue Swirl
#elif defined(__x86_64__)
121 29e922b6 Blue Swirl
122 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
123 29e922b6 Blue Swirl
{
124 29e922b6 Blue Swirl
    uint32_t low,high;
125 29e922b6 Blue Swirl
    int64_t val;
126 29e922b6 Blue Swirl
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
127 29e922b6 Blue Swirl
    val = high;
128 29e922b6 Blue Swirl
    val <<= 32;
129 29e922b6 Blue Swirl
    val |= low;
130 29e922b6 Blue Swirl
    return val;
131 29e922b6 Blue Swirl
}
132 29e922b6 Blue Swirl
133 29e922b6 Blue Swirl
#elif defined(__hppa__)
134 29e922b6 Blue Swirl
135 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
136 29e922b6 Blue Swirl
{
137 29e922b6 Blue Swirl
    int val;
138 29e922b6 Blue Swirl
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
139 29e922b6 Blue Swirl
    return val;
140 29e922b6 Blue Swirl
}
141 29e922b6 Blue Swirl
142 29e922b6 Blue Swirl
#elif defined(__ia64)
143 29e922b6 Blue Swirl
144 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
145 29e922b6 Blue Swirl
{
146 29e922b6 Blue Swirl
    int64_t val;
147 29e922b6 Blue Swirl
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
148 29e922b6 Blue Swirl
    return val;
149 29e922b6 Blue Swirl
}
150 29e922b6 Blue Swirl
151 29e922b6 Blue Swirl
#elif defined(__s390__)
152 29e922b6 Blue Swirl
153 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
154 29e922b6 Blue Swirl
{
155 29e922b6 Blue Swirl
    int64_t val;
156 29e922b6 Blue Swirl
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
157 29e922b6 Blue Swirl
    return val;
158 29e922b6 Blue Swirl
}
159 29e922b6 Blue Swirl
160 29e922b6 Blue Swirl
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
161 29e922b6 Blue Swirl
162 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks (void)
163 29e922b6 Blue Swirl
{
164 29e922b6 Blue Swirl
#if defined(_LP64)
165 29e922b6 Blue Swirl
    uint64_t        rval;
166 29e922b6 Blue Swirl
    asm volatile("rd %%tick,%0" : "=r"(rval));
167 29e922b6 Blue Swirl
    return rval;
168 29e922b6 Blue Swirl
#else
169 29e922b6 Blue Swirl
    union {
170 29e922b6 Blue Swirl
        uint64_t i64;
171 29e922b6 Blue Swirl
        struct {
172 29e922b6 Blue Swirl
            uint32_t high;
173 29e922b6 Blue Swirl
            uint32_t low;
174 29e922b6 Blue Swirl
        }       i32;
175 29e922b6 Blue Swirl
    } rval;
176 29e922b6 Blue Swirl
    asm volatile("rd %%tick,%1; srlx %1,32,%0"
177 29e922b6 Blue Swirl
                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
178 29e922b6 Blue Swirl
    return rval.i64;
179 29e922b6 Blue Swirl
#endif
180 29e922b6 Blue Swirl
}
181 29e922b6 Blue Swirl
182 29e922b6 Blue Swirl
#elif defined(__mips__) && \
183 29e922b6 Blue Swirl
    ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
184 29e922b6 Blue Swirl
/*
185 29e922b6 Blue Swirl
 * binutils wants to use rdhwr only on mips32r2
186 29e922b6 Blue Swirl
 * but as linux kernel emulate it, it's fine
187 29e922b6 Blue Swirl
 * to use it.
188 29e922b6 Blue Swirl
 *
189 29e922b6 Blue Swirl
 */
190 29e922b6 Blue Swirl
#define MIPS_RDHWR(rd, value) {                         \
191 29e922b6 Blue Swirl
        __asm__ __volatile__ (".set   push\n\t"         \
192 29e922b6 Blue Swirl
                              ".set mips32r2\n\t"       \
193 29e922b6 Blue Swirl
                              "rdhwr  %0, "rd"\n\t"     \
194 29e922b6 Blue Swirl
                              ".set   pop"              \
195 29e922b6 Blue Swirl
                              : "=r" (value));          \
196 29e922b6 Blue Swirl
    }
197 29e922b6 Blue Swirl
198 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks(void)
199 29e922b6 Blue Swirl
{
200 29e922b6 Blue Swirl
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
201 29e922b6 Blue Swirl
    uint32_t count;
202 29e922b6 Blue Swirl
    static uint32_t cyc_per_count = 0;
203 29e922b6 Blue Swirl
204 29e922b6 Blue Swirl
    if (!cyc_per_count) {
205 29e922b6 Blue Swirl
        MIPS_RDHWR("$3", cyc_per_count);
206 29e922b6 Blue Swirl
    }
207 29e922b6 Blue Swirl
208 29e922b6 Blue Swirl
    MIPS_RDHWR("$2", count);
209 29e922b6 Blue Swirl
    return (int64_t)(count * cyc_per_count);
210 29e922b6 Blue Swirl
}
211 29e922b6 Blue Swirl
212 14a6063a Richard Henderson
#elif defined(__alpha__)
213 14a6063a Richard Henderson
214 14a6063a Richard Henderson
static inline int64_t cpu_get_real_ticks(void)
215 14a6063a Richard Henderson
{
216 14a6063a Richard Henderson
    uint64_t cc;
217 14a6063a Richard Henderson
    uint32_t cur, ofs;
218 14a6063a Richard Henderson
219 14a6063a Richard Henderson
    asm volatile("rpcc %0" : "=r"(cc));
220 14a6063a Richard Henderson
    cur = cc;
221 14a6063a Richard Henderson
    ofs = cc >> 32;
222 14a6063a Richard Henderson
    return cur - ofs;
223 14a6063a Richard Henderson
}
224 14a6063a Richard Henderson
225 29e922b6 Blue Swirl
#else
226 29e922b6 Blue Swirl
/* The host CPU doesn't have an easily accessible cycle counter.
227 29e922b6 Blue Swirl
   Just return a monotonically increasing value.  This will be
228 29e922b6 Blue Swirl
   totally wrong, but hopefully better than nothing.  */
229 29e922b6 Blue Swirl
static inline int64_t cpu_get_real_ticks (void)
230 29e922b6 Blue Swirl
{
231 29e922b6 Blue Swirl
    static int64_t ticks = 0;
232 29e922b6 Blue Swirl
    return ticks++;
233 29e922b6 Blue Swirl
}
234 29e922b6 Blue Swirl
#endif
235 29e922b6 Blue Swirl
236 29e922b6 Blue Swirl
#ifdef NEED_CPU_H
237 29e922b6 Blue Swirl
/* Deterministic execution requires that IO only be performed on the last
238 29e922b6 Blue Swirl
   instruction of a TB so that interrupts take effect immediately.  */
239 29e922b6 Blue Swirl
static inline int can_do_io(CPUState *env)
240 29e922b6 Blue Swirl
{
241 29e922b6 Blue Swirl
    if (!use_icount)
242 29e922b6 Blue Swirl
        return 1;
243 29e922b6 Blue Swirl
244 29e922b6 Blue Swirl
    /* If not executing code then assume we are ok.  */
245 29e922b6 Blue Swirl
    if (!env->current_tb)
246 29e922b6 Blue Swirl
        return 1;
247 29e922b6 Blue Swirl
248 29e922b6 Blue Swirl
    return env->can_do_io != 0;
249 29e922b6 Blue Swirl
}
250 29e922b6 Blue Swirl
#endif
251 29e922b6 Blue Swirl
252 2d8ebcf9 Richard Henderson
#ifdef CONFIG_PROFILER
253 2d8ebcf9 Richard Henderson
static inline int64_t profile_getclock(void)
254 2d8ebcf9 Richard Henderson
{
255 2d8ebcf9 Richard Henderson
    return cpu_get_real_ticks();
256 2d8ebcf9 Richard Henderson
}
257 2d8ebcf9 Richard Henderson
258 2d8ebcf9 Richard Henderson
extern int64_t qemu_time, qemu_time_start;
259 2d8ebcf9 Richard Henderson
extern int64_t tlb_flush_time;
260 2d8ebcf9 Richard Henderson
extern int64_t dev_time;
261 2d8ebcf9 Richard Henderson
#endif
262 2d8ebcf9 Richard Henderson
263 87ecb68b pbrook
#endif