Statistics
| Branch: | Revision:

root / qemu-timer.h @ 371c6489

History | View | Annotate | Download (8.1 kB)

1
#ifndef QEMU_TIMER_H
2
#define QEMU_TIMER_H
3

    
4
#include "qemu-common.h"
5
#include "main-loop.h"
6
#include "notify.h"
7
#include <time.h>
8
#include <sys/time.h>
9

    
10
#ifdef _WIN32
11
#include <windows.h>
12
#endif
13

    
14
/* timers */
15

    
16
#define SCALE_MS 1000000
17
#define SCALE_US 1000
18
#define SCALE_NS 1
19

    
20
typedef struct QEMUClock QEMUClock;
21
typedef void QEMUTimerCB(void *opaque);
22

    
23
/* The real time clock should be used only for stuff which does not
24
   change the virtual machine state, as it is run even if the virtual
25
   machine is stopped. The real time clock has a frequency of 1000
26
   Hz. */
27
extern QEMUClock *rt_clock;
28

    
29
/* The virtual clock is only run during the emulation. It is stopped
30
   when the virtual machine is stopped. Virtual timers use a high
31
   precision clock, usually cpu cycles (use ticks_per_sec). */
32
extern QEMUClock *vm_clock;
33

    
34
/* The host clock should be use for device models that emulate accurate
35
   real time sources. It will continue to run when the virtual machine
36
   is suspended, and it will reflect system time changes the host may
37
   undergo (e.g. due to NTP). The host clock has the same precision as
38
   the virtual clock. */
39
extern QEMUClock *host_clock;
40

    
41
int64_t qemu_get_clock_ns(QEMUClock *clock);
42
int64_t qemu_clock_has_timers(QEMUClock *clock);
43
int64_t qemu_clock_expired(QEMUClock *clock);
44
int64_t qemu_clock_deadline(QEMUClock *clock);
45
void qemu_clock_enable(QEMUClock *clock, int enabled);
46
void qemu_clock_warp(QEMUClock *clock);
47

    
48
void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
49
void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
50
                                          Notifier *notifier);
51

    
52
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
53
                          QEMUTimerCB *cb, void *opaque);
54
void qemu_free_timer(QEMUTimer *ts);
55
void qemu_del_timer(QEMUTimer *ts);
56
void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
57
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
58
int qemu_timer_pending(QEMUTimer *ts);
59
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
60
uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
61

    
62
void qemu_run_all_timers(void);
63
int qemu_alarm_pending(void);
64
void configure_alarms(char const *opt);
65
int qemu_calculate_timeout(void);
66
void init_clocks(void);
67
int init_timer_alarm(void);
68

    
69
int64_t cpu_get_ticks(void);
70
void cpu_enable_ticks(void);
71
void cpu_disable_ticks(void);
72

    
73
static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
74
                                           void *opaque)
75
{
76
    return qemu_new_timer(clock, SCALE_NS, cb, opaque);
77
}
78

    
79
static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
80
                                           void *opaque)
81
{
82
    return qemu_new_timer(clock, SCALE_MS, cb, opaque);
83
}
84

    
85
static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
86
{
87
    return qemu_get_clock_ns(clock) / SCALE_MS;
88
}
89

    
90
static inline int64_t get_ticks_per_sec(void)
91
{
92
    return 1000000000LL;
93
}
94

    
95
/* real time host monotonic timer */
96
static inline int64_t get_clock_realtime(void)
97
{
98
    struct timeval tv;
99

    
100
    gettimeofday(&tv, NULL);
101
    return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
102
}
103

    
104
/* Warning: don't insert tracepoints into these functions, they are
105
   also used by simpletrace backend and tracepoints would cause
106
   an infinite recursion! */
107
#ifdef _WIN32
108
extern int64_t clock_freq;
109

    
110
static inline int64_t get_clock(void)
111
{
112
    LARGE_INTEGER ti;
113
    QueryPerformanceCounter(&ti);
114
    return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
115
}
116

    
117
#else
118

    
119
extern int use_rt_clock;
120

    
121
static inline int64_t get_clock(void)
122
{
123
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
124
    || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
125
    if (use_rt_clock) {
126
        struct timespec ts;
127
        clock_gettime(CLOCK_MONOTONIC, &ts);
128
        return ts.tv_sec * 1000000000LL + ts.tv_nsec;
129
    } else
130
#endif
131
    {
132
        /* XXX: using gettimeofday leads to problems if the date
133
           changes, so it should be avoided. */
134
        return get_clock_realtime();
135
    }
136
}
137
#endif
138

    
139
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
140
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
141

    
142
/* icount */
143
int64_t cpu_get_icount(void);
144
int64_t cpu_get_clock(void);
145

    
146
/*******************************************/
147
/* host CPU ticks (if available) */
148

    
149
#if defined(_ARCH_PPC)
150

    
151
static inline int64_t cpu_get_real_ticks(void)
152
{
153
    int64_t retval;
154
#ifdef _ARCH_PPC64
155
    /* This reads timebase in one 64bit go and includes Cell workaround from:
156
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
157
    */
158
    __asm__ __volatile__ ("mftb    %0\n\t"
159
                          "cmpwi   %0,0\n\t"
160
                          "beq-    $-8"
161
                          : "=r" (retval));
162
#else
163
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
164
    unsigned long junk;
165
    __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
166
                          "mfspr   %L0,268\n\t" /* mftb */
167
                          "mfspr   %0,269\n\t"  /* mftbu */
168
                          "cmpw    %0,%1\n\t"
169
                          "bne     $-16"
170
                          : "=r" (retval), "=r" (junk));
171
#endif
172
    return retval;
173
}
174

    
175
#elif defined(__i386__)
176

    
177
static inline int64_t cpu_get_real_ticks(void)
178
{
179
    int64_t val;
180
    asm volatile ("rdtsc" : "=A" (val));
181
    return val;
182
}
183

    
184
#elif defined(__x86_64__)
185

    
186
static inline int64_t cpu_get_real_ticks(void)
187
{
188
    uint32_t low,high;
189
    int64_t val;
190
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
191
    val = high;
192
    val <<= 32;
193
    val |= low;
194
    return val;
195
}
196

    
197
#elif defined(__hppa__)
198

    
199
static inline int64_t cpu_get_real_ticks(void)
200
{
201
    int val;
202
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
203
    return val;
204
}
205

    
206
#elif defined(__ia64)
207

    
208
static inline int64_t cpu_get_real_ticks(void)
209
{
210
    int64_t val;
211
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
212
    return val;
213
}
214

    
215
#elif defined(__s390__)
216

    
217
static inline int64_t cpu_get_real_ticks(void)
218
{
219
    int64_t val;
220
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
221
    return val;
222
}
223

    
224
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
225

    
226
static inline int64_t cpu_get_real_ticks (void)
227
{
228
#if defined(_LP64)
229
    uint64_t        rval;
230
    asm volatile("rd %%tick,%0" : "=r"(rval));
231
    return rval;
232
#else
233
    union {
234
        uint64_t i64;
235
        struct {
236
            uint32_t high;
237
            uint32_t low;
238
        }       i32;
239
    } rval;
240
    asm volatile("rd %%tick,%1; srlx %1,32,%0"
241
                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
242
    return rval.i64;
243
#endif
244
}
245

    
246
#elif defined(__mips__) && \
247
    ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
248
/*
249
 * binutils wants to use rdhwr only on mips32r2
250
 * but as linux kernel emulate it, it's fine
251
 * to use it.
252
 *
253
 */
254
#define MIPS_RDHWR(rd, value) {                         \
255
        __asm__ __volatile__ (".set   push\n\t"         \
256
                              ".set mips32r2\n\t"       \
257
                              "rdhwr  %0, "rd"\n\t"     \
258
                              ".set   pop"              \
259
                              : "=r" (value));          \
260
    }
261

    
262
static inline int64_t cpu_get_real_ticks(void)
263
{
264
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
265
    uint32_t count;
266
    static uint32_t cyc_per_count = 0;
267

    
268
    if (!cyc_per_count) {
269
        MIPS_RDHWR("$3", cyc_per_count);
270
    }
271

    
272
    MIPS_RDHWR("$2", count);
273
    return (int64_t)(count * cyc_per_count);
274
}
275

    
276
#elif defined(__alpha__)
277

    
278
static inline int64_t cpu_get_real_ticks(void)
279
{
280
    uint64_t cc;
281
    uint32_t cur, ofs;
282

    
283
    asm volatile("rpcc %0" : "=r"(cc));
284
    cur = cc;
285
    ofs = cc >> 32;
286
    return cur - ofs;
287
}
288

    
289
#else
290
/* The host CPU doesn't have an easily accessible cycle counter.
291
   Just return a monotonically increasing value.  This will be
292
   totally wrong, but hopefully better than nothing.  */
293
static inline int64_t cpu_get_real_ticks (void)
294
{
295
    static int64_t ticks = 0;
296
    return ticks++;
297
}
298
#endif
299

    
300
#ifdef CONFIG_PROFILER
301
static inline int64_t profile_getclock(void)
302
{
303
    return cpu_get_real_ticks();
304
}
305

    
306
extern int64_t qemu_time, qemu_time_start;
307
extern int64_t tlb_flush_time;
308
extern int64_t dev_time;
309
#endif
310

    
311
#endif