Statistics
| Branch: | Revision:

root / include / qemu / timer.h @ 1de7afc9

History | View | Annotate | Download (8.2 kB)

1
#ifndef QEMU_TIMER_H
2
#define QEMU_TIMER_H
3

    
4
#include "qemu-common.h"
5
#include "qemu/main-loop.h"
6
#include "qemu/notify.h"
7

    
8
#ifdef __FreeBSD__
9
#include <sys/param.h>
10
#endif
11

    
12
/* timers */
13

    
14
#define SCALE_MS 1000000
15
#define SCALE_US 1000
16
#define SCALE_NS 1
17

    
18
typedef struct QEMUClock QEMUClock;
19
typedef void QEMUTimerCB(void *opaque);
20

    
21
/* The real time clock should be used only for stuff which does not
22
   change the virtual machine state, as it is run even if the virtual
23
   machine is stopped. The real time clock has a frequency of 1000
24
   Hz. */
25
extern QEMUClock *rt_clock;
26

    
27
/* The virtual clock is only run during the emulation. It is stopped
28
   when the virtual machine is stopped. Virtual timers use a high
29
   precision clock, usually cpu cycles (use ticks_per_sec). */
30
extern QEMUClock *vm_clock;
31

    
32
/* The host clock should be use for device models that emulate accurate
33
   real time sources. It will continue to run when the virtual machine
34
   is suspended, and it will reflect system time changes the host may
35
   undergo (e.g. due to NTP). The host clock has the same precision as
36
   the virtual clock. */
37
extern QEMUClock *host_clock;
38

    
39
int64_t qemu_get_clock_ns(QEMUClock *clock);
40
int64_t qemu_clock_has_timers(QEMUClock *clock);
41
int64_t qemu_clock_expired(QEMUClock *clock);
42
int64_t qemu_clock_deadline(QEMUClock *clock);
43
void qemu_clock_enable(QEMUClock *clock, bool enabled);
44
void qemu_clock_warp(QEMUClock *clock);
45

    
46
void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
47
void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
48
                                          Notifier *notifier);
49

    
50
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
51
                          QEMUTimerCB *cb, void *opaque);
52
void qemu_free_timer(QEMUTimer *ts);
53
void qemu_del_timer(QEMUTimer *ts);
54
void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
55
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
56
bool qemu_timer_pending(QEMUTimer *ts);
57
bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
58
uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
59

    
60
void qemu_run_timers(QEMUClock *clock);
61
void qemu_run_all_timers(void);
62
void configure_alarms(char const *opt);
63
void init_clocks(void);
64
int init_timer_alarm(void);
65

    
66
int64_t cpu_get_ticks(void);
67
void cpu_enable_ticks(void);
68
void cpu_disable_ticks(void);
69

    
70
static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
71
                                           void *opaque)
72
{
73
    return qemu_new_timer(clock, SCALE_NS, cb, opaque);
74
}
75

    
76
static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
77
                                           void *opaque)
78
{
79
    return qemu_new_timer(clock, SCALE_MS, cb, opaque);
80
}
81

    
82
static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
83
{
84
    return qemu_get_clock_ns(clock) / SCALE_MS;
85
}
86

    
87
static inline int64_t get_ticks_per_sec(void)
88
{
89
    return 1000000000LL;
90
}
91

    
92
/* real time host monotonic timer */
93
static inline int64_t get_clock_realtime(void)
94
{
95
    struct timeval tv;
96

    
97
    gettimeofday(&tv, NULL);
98
    return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
99
}
100

    
101
/* Warning: don't insert tracepoints into these functions, they are
102
   also used by simpletrace backend and tracepoints would cause
103
   an infinite recursion! */
104
#ifdef _WIN32
105
extern int64_t clock_freq;
106

    
107
static inline int64_t get_clock(void)
108
{
109
    LARGE_INTEGER ti;
110
    QueryPerformanceCounter(&ti);
111
    return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
112
}
113

    
114
#else
115

    
116
extern int use_rt_clock;
117

    
118
static inline int64_t get_clock(void)
119
{
120
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
121
    || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
122
    if (use_rt_clock) {
123
        struct timespec ts;
124
        clock_gettime(CLOCK_MONOTONIC, &ts);
125
        return ts.tv_sec * 1000000000LL + ts.tv_nsec;
126
    } else
127
#endif
128
    {
129
        /* XXX: using gettimeofday leads to problems if the date
130
           changes, so it should be avoided. */
131
        return get_clock_realtime();
132
    }
133
}
134
#endif
135

    
136
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
137
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
138

    
139
/* icount */
140
int64_t cpu_get_icount(void);
141
int64_t cpu_get_clock(void);
142

    
143
/*******************************************/
144
/* host CPU ticks (if available) */
145

    
146
#if defined(_ARCH_PPC)
147

    
148
static inline int64_t cpu_get_real_ticks(void)
149
{
150
    int64_t retval;
151
#ifdef _ARCH_PPC64
152
    /* This reads timebase in one 64bit go and includes Cell workaround from:
153
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
154
    */
155
    __asm__ __volatile__ ("mftb    %0\n\t"
156
                          "cmpwi   %0,0\n\t"
157
                          "beq-    $-8"
158
                          : "=r" (retval));
159
#else
160
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
161
    unsigned long junk;
162
    __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
163
                          "mfspr   %L0,268\n\t" /* mftb */
164
                          "mfspr   %0,269\n\t"  /* mftbu */
165
                          "cmpw    %0,%1\n\t"
166
                          "bne     $-16"
167
                          : "=r" (retval), "=r" (junk));
168
#endif
169
    return retval;
170
}
171

    
172
#elif defined(__i386__)
173

    
174
static inline int64_t cpu_get_real_ticks(void)
175
{
176
    int64_t val;
177
    asm volatile ("rdtsc" : "=A" (val));
178
    return val;
179
}
180

    
181
#elif defined(__x86_64__)
182

    
183
static inline int64_t cpu_get_real_ticks(void)
184
{
185
    uint32_t low,high;
186
    int64_t val;
187
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
188
    val = high;
189
    val <<= 32;
190
    val |= low;
191
    return val;
192
}
193

    
194
#elif defined(__hppa__)
195

    
196
static inline int64_t cpu_get_real_ticks(void)
197
{
198
    int val;
199
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
200
    return val;
201
}
202

    
203
#elif defined(__ia64)
204

    
205
static inline int64_t cpu_get_real_ticks(void)
206
{
207
    int64_t val;
208
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
209
    return val;
210
}
211

    
212
#elif defined(__s390__)
213

    
214
static inline int64_t cpu_get_real_ticks(void)
215
{
216
    int64_t val;
217
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
218
    return val;
219
}
220

    
221
#elif defined(__sparc__)
222

    
223
static inline int64_t cpu_get_real_ticks (void)
224
{
225
#if defined(_LP64)
226
    uint64_t        rval;
227
    asm volatile("rd %%tick,%0" : "=r"(rval));
228
    return rval;
229
#else
230
    /* We need an %o or %g register for this.  For recent enough gcc
231
       there is an "h" constraint for that.  Don't bother with that.  */
232
    union {
233
        uint64_t i64;
234
        struct {
235
            uint32_t high;
236
            uint32_t low;
237
        }       i32;
238
    } rval;
239
    asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
240
                 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
241
    return rval.i64;
242
#endif
243
}
244

    
245
#elif defined(__mips__) && \
246
    ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
247
/*
248
 * binutils wants to use rdhwr only on mips32r2
249
 * but as linux kernel emulate it, it's fine
250
 * to use it.
251
 *
252
 */
253
#define MIPS_RDHWR(rd, value) {                         \
254
        __asm__ __volatile__ (".set   push\n\t"         \
255
                              ".set mips32r2\n\t"       \
256
                              "rdhwr  %0, "rd"\n\t"     \
257
                              ".set   pop"              \
258
                              : "=r" (value));          \
259
    }
260

    
261
static inline int64_t cpu_get_real_ticks(void)
262
{
263
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
264
    uint32_t count;
265
    static uint32_t cyc_per_count = 0;
266

    
267
    if (!cyc_per_count) {
268
        MIPS_RDHWR("$3", cyc_per_count);
269
    }
270

    
271
    MIPS_RDHWR("$2", count);
272
    return (int64_t)(count * cyc_per_count);
273
}
274

    
275
#elif defined(__alpha__)
276

    
277
static inline int64_t cpu_get_real_ticks(void)
278
{
279
    uint64_t cc;
280
    uint32_t cur, ofs;
281

    
282
    asm volatile("rpcc %0" : "=r"(cc));
283
    cur = cc;
284
    ofs = cc >> 32;
285
    return cur - ofs;
286
}
287

    
288
#else
289
/* The host CPU doesn't have an easily accessible cycle counter.
290
   Just return a monotonically increasing value.  This will be
291
   totally wrong, but hopefully better than nothing.  */
292
static inline int64_t cpu_get_real_ticks (void)
293
{
294
    static int64_t ticks = 0;
295
    return ticks++;
296
}
297
#endif
298

    
299
#ifdef CONFIG_PROFILER
300
static inline int64_t profile_getclock(void)
301
{
302
    return cpu_get_real_ticks();
303
}
304

    
305
extern int64_t qemu_time, qemu_time_start;
306
extern int64_t tlb_flush_time;
307
extern int64_t dev_time;
308
#endif
309

    
310
#endif