Statistics
| Branch: | Revision:

root / qemu-timer.h @ 4a998740

History | View | Annotate | Download (8.8 kB)

1
#ifndef QEMU_TIMER_H
2
#define QEMU_TIMER_H
3

    
4
#include "qemu-common.h"
5
#include <time.h>
6
#include <sys/time.h>
7

    
8
#ifdef _WIN32
9
#include <windows.h>
10
#include <mmsystem.h>
11
#endif
12

    
13
/* timers */
14

    
15
#define SCALE_MS 1000000
16
#define SCALE_US 1000
17
#define SCALE_NS 1
18

    
19
typedef struct QEMUClock QEMUClock;
20
typedef void QEMUTimerCB(void *opaque);
21

    
22
/* The real time clock should be used only for stuff which does not
23
   change the virtual machine state, as it is run even if the virtual
24
   machine is stopped. The real time clock has a frequency of 1000
25
   Hz. */
26
extern QEMUClock *rt_clock;
27

    
28
/* The virtual clock is only run during the emulation. It is stopped
29
   when the virtual machine is stopped. Virtual timers use a high
30
   precision clock, usually cpu cycles (use ticks_per_sec). */
31
extern QEMUClock *vm_clock;
32

    
33
/* The host clock should be use for device models that emulate accurate
34
   real time sources. It will continue to run when the virtual machine
35
   is suspended, and it will reflect system time changes the host may
36
   undergo (e.g. due to NTP). The host clock has the same precision as
37
   the virtual clock. */
38
extern QEMUClock *host_clock;
39

    
40
int64_t qemu_get_clock(QEMUClock *clock);
41
int64_t qemu_get_clock_ns(QEMUClock *clock);
42
void qemu_clock_enable(QEMUClock *clock, int enabled);
43

    
44
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
45
                          QEMUTimerCB *cb, void *opaque);
46
void qemu_free_timer(QEMUTimer *ts);
47
void qemu_del_timer(QEMUTimer *ts);
48
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
49
int qemu_timer_pending(QEMUTimer *ts);
50
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
51

    
52
void qemu_run_all_timers(void);
53
int qemu_alarm_pending(void);
54
int64_t qemu_next_deadline(void);
55
void configure_alarms(char const *opt);
56
void configure_icount(const char *option);
57
int qemu_calculate_timeout(void);
58
void init_clocks(void);
59
int init_timer_alarm(void);
60
void quit_timers(void);
61

    
62
static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
63
                                           void *opaque)
64
{
65
    return qemu_new_timer(clock, SCALE_NS, cb, opaque);
66
}
67

    
68
static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
69
                                           void *opaque)
70
{
71
    return qemu_new_timer(clock, SCALE_MS, cb, opaque);
72
}
73

    
74
static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
75
{
76
    return qemu_get_clock_ns(clock) / SCALE_MS;
77
}
78

    
79
static inline int64_t get_ticks_per_sec(void)
80
{
81
    return 1000000000LL;
82
}
83

    
84
/* real time host monotonic timer */
85
static inline int64_t get_clock_realtime(void)
86
{
87
    struct timeval tv;
88

    
89
    gettimeofday(&tv, NULL);
90
    return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
91
}
92

    
93
/* Warning: don't insert tracepoints into these functions, they are
94
   also used by simpletrace backend and tracepoints would cause
95
   an infinite recursion! */
96
#ifdef _WIN32
97
extern int64_t clock_freq;
98

    
99
static inline int64_t get_clock(void)
100
{
101
    LARGE_INTEGER ti;
102
    QueryPerformanceCounter(&ti);
103
    return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
104
}
105

    
106
#else
107

    
108
extern int use_rt_clock;
109

    
110
static inline int64_t get_clock(void)
111
{
112
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
113
    || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
114
    if (use_rt_clock) {
115
        struct timespec ts;
116
        clock_gettime(CLOCK_MONOTONIC, &ts);
117
        return ts.tv_sec * 1000000000LL + ts.tv_nsec;
118
    } else
119
#endif
120
    {
121
        /* XXX: using gettimeofday leads to problems if the date
122
           changes, so it should be avoided. */
123
        return get_clock_realtime();
124
    }
125
}
126
#endif
127

    
128
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
129
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
130

    
131
/* ptimer.c */
132
typedef struct ptimer_state ptimer_state;
133
typedef void (*ptimer_cb)(void *opaque);
134

    
135
ptimer_state *ptimer_init(QEMUBH *bh);
136
void ptimer_set_period(ptimer_state *s, int64_t period);
137
void ptimer_set_freq(ptimer_state *s, uint32_t freq);
138
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
139
uint64_t ptimer_get_count(ptimer_state *s);
140
void ptimer_set_count(ptimer_state *s, uint64_t count);
141
void ptimer_run(ptimer_state *s, int oneshot);
142
void ptimer_stop(ptimer_state *s);
143
void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
144
void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
145

    
146
/* icount */
147
int64_t qemu_icount_round(int64_t count);
148
extern int64_t qemu_icount;
149
extern int use_icount;
150
extern int icount_time_shift;
151
extern int64_t qemu_icount_bias;
152
int64_t cpu_get_icount(void);
153

    
154
/*******************************************/
155
/* host CPU ticks (if available) */
156

    
157
#if defined(_ARCH_PPC)
158

    
159
static inline int64_t cpu_get_real_ticks(void)
160
{
161
    int64_t retval;
162
#ifdef _ARCH_PPC64
163
    /* This reads timebase in one 64bit go and includes Cell workaround from:
164
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
165
    */
166
    __asm__ __volatile__ ("mftb    %0\n\t"
167
                          "cmpwi   %0,0\n\t"
168
                          "beq-    $-8"
169
                          : "=r" (retval));
170
#else
171
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
172
    unsigned long junk;
173
    __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
174
                          "mfspr   %L0,268\n\t" /* mftb */
175
                          "mfspr   %0,269\n\t"  /* mftbu */
176
                          "cmpw    %0,%1\n\t"
177
                          "bne     $-16"
178
                          : "=r" (retval), "=r" (junk));
179
#endif
180
    return retval;
181
}
182

    
183
#elif defined(__i386__)
184

    
185
static inline int64_t cpu_get_real_ticks(void)
186
{
187
    int64_t val;
188
    asm volatile ("rdtsc" : "=A" (val));
189
    return val;
190
}
191

    
192
#elif defined(__x86_64__)
193

    
194
static inline int64_t cpu_get_real_ticks(void)
195
{
196
    uint32_t low,high;
197
    int64_t val;
198
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
199
    val = high;
200
    val <<= 32;
201
    val |= low;
202
    return val;
203
}
204

    
205
#elif defined(__hppa__)
206

    
207
static inline int64_t cpu_get_real_ticks(void)
208
{
209
    int val;
210
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
211
    return val;
212
}
213

    
214
#elif defined(__ia64)
215

    
216
static inline int64_t cpu_get_real_ticks(void)
217
{
218
    int64_t val;
219
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
220
    return val;
221
}
222

    
223
#elif defined(__s390__)
224

    
225
static inline int64_t cpu_get_real_ticks(void)
226
{
227
    int64_t val;
228
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
229
    return val;
230
}
231

    
232
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
233

    
234
static inline int64_t cpu_get_real_ticks (void)
235
{
236
#if defined(_LP64)
237
    uint64_t        rval;
238
    asm volatile("rd %%tick,%0" : "=r"(rval));
239
    return rval;
240
#else
241
    union {
242
        uint64_t i64;
243
        struct {
244
            uint32_t high;
245
            uint32_t low;
246
        }       i32;
247
    } rval;
248
    asm volatile("rd %%tick,%1; srlx %1,32,%0"
249
                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
250
    return rval.i64;
251
#endif
252
}
253

    
254
#elif defined(__mips__) && \
255
    ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
256
/*
257
 * binutils wants to use rdhwr only on mips32r2
258
 * but as linux kernel emulate it, it's fine
259
 * to use it.
260
 *
261
 */
262
#define MIPS_RDHWR(rd, value) {                         \
263
        __asm__ __volatile__ (".set   push\n\t"         \
264
                              ".set mips32r2\n\t"       \
265
                              "rdhwr  %0, "rd"\n\t"     \
266
                              ".set   pop"              \
267
                              : "=r" (value));          \
268
    }
269

    
270
static inline int64_t cpu_get_real_ticks(void)
271
{
272
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
273
    uint32_t count;
274
    static uint32_t cyc_per_count = 0;
275

    
276
    if (!cyc_per_count) {
277
        MIPS_RDHWR("$3", cyc_per_count);
278
    }
279

    
280
    MIPS_RDHWR("$2", count);
281
    return (int64_t)(count * cyc_per_count);
282
}
283

    
284
#elif defined(__alpha__)
285

    
286
static inline int64_t cpu_get_real_ticks(void)
287
{
288
    uint64_t cc;
289
    uint32_t cur, ofs;
290

    
291
    asm volatile("rpcc %0" : "=r"(cc));
292
    cur = cc;
293
    ofs = cc >> 32;
294
    return cur - ofs;
295
}
296

    
297
#else
298
/* The host CPU doesn't have an easily accessible cycle counter.
299
   Just return a monotonically increasing value.  This will be
300
   totally wrong, but hopefully better than nothing.  */
301
static inline int64_t cpu_get_real_ticks (void)
302
{
303
    static int64_t ticks = 0;
304
    return ticks++;
305
}
306
#endif
307

    
308
#ifdef NEED_CPU_H
309
/* Deterministic execution requires that IO only be performed on the last
310
   instruction of a TB so that interrupts take effect immediately.  */
311
static inline int can_do_io(CPUState *env)
312
{
313
    if (!use_icount)
314
        return 1;
315

    
316
    /* If not executing code then assume we are ok.  */
317
    if (!env->current_tb)
318
        return 1;
319

    
320
    return env->can_do_io != 0;
321
}
322
#endif
323

    
324
#ifdef CONFIG_PROFILER
325
static inline int64_t profile_getclock(void)
326
{
327
    return cpu_get_real_ticks();
328
}
329

    
330
extern int64_t qemu_time, qemu_time_start;
331
extern int64_t tlb_flush_time;
332
extern int64_t dev_time;
333
#endif
334

    
335
#endif