Statistics
| Branch: | Revision:

root / qemu-timer.h @ 6d5ad9bf

History | View | Annotate | Download (8.7 kB)

1
#ifndef QEMU_TIMER_H
2
#define QEMU_TIMER_H
3

    
4
#include "qemu-common.h"
5
#include <time.h>
6
#include <sys/time.h>
7

    
8
#ifdef _WIN32
9
#include <windows.h>
10
#include <mmsystem.h>
11
#endif
12

    
13
/* timers */
14

    
15
#define SCALE_MS 1000000
16
#define SCALE_US 1000
17
#define SCALE_NS 1
18

    
19
typedef struct QEMUClock QEMUClock;
20
typedef void QEMUTimerCB(void *opaque);
21

    
22
/* The real time clock should be used only for stuff which does not
23
   change the virtual machine state, as it is run even if the virtual
24
   machine is stopped. The real time clock has a frequency of 1000
25
   Hz. */
26
extern QEMUClock *rt_clock;
27

    
28
/* The virtual clock is only run during the emulation. It is stopped
29
   when the virtual machine is stopped. Virtual timers use a high
30
   precision clock, usually cpu cycles (use ticks_per_sec). */
31
extern QEMUClock *vm_clock;
32

    
33
/* The host clock should be use for device models that emulate accurate
34
   real time sources. It will continue to run when the virtual machine
35
   is suspended, and it will reflect system time changes the host may
36
   undergo (e.g. due to NTP). The host clock has the same precision as
37
   the virtual clock. */
38
extern QEMUClock *host_clock;
39

    
40
int64_t qemu_get_clock_ns(QEMUClock *clock);
41
void qemu_clock_enable(QEMUClock *clock, int enabled);
42

    
43
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
44
                          QEMUTimerCB *cb, void *opaque);
45
void qemu_free_timer(QEMUTimer *ts);
46
void qemu_del_timer(QEMUTimer *ts);
47
void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
48
int qemu_timer_pending(QEMUTimer *ts);
49
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
50

    
51
void qemu_run_all_timers(void);
52
int qemu_alarm_pending(void);
53
int64_t qemu_next_deadline(void);
54
void configure_alarms(char const *opt);
55
void configure_icount(const char *option);
56
int qemu_calculate_timeout(void);
57
void init_clocks(void);
58
int init_timer_alarm(void);
59
void quit_timers(void);
60

    
61
static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
62
                                           void *opaque)
63
{
64
    return qemu_new_timer(clock, SCALE_NS, cb, opaque);
65
}
66

    
67
static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
68
                                           void *opaque)
69
{
70
    return qemu_new_timer(clock, SCALE_MS, cb, opaque);
71
}
72

    
73
static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
74
{
75
    return qemu_get_clock_ns(clock) / SCALE_MS;
76
}
77

    
78
static inline int64_t get_ticks_per_sec(void)
79
{
80
    return 1000000000LL;
81
}
82

    
83
/* real time host monotonic timer */
84
static inline int64_t get_clock_realtime(void)
85
{
86
    struct timeval tv;
87

    
88
    gettimeofday(&tv, NULL);
89
    return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
90
}
91

    
92
/* Warning: don't insert tracepoints into these functions, they are
93
   also used by simpletrace backend and tracepoints would cause
94
   an infinite recursion! */
95
#ifdef _WIN32
96
extern int64_t clock_freq;
97

    
98
static inline int64_t get_clock(void)
99
{
100
    LARGE_INTEGER ti;
101
    QueryPerformanceCounter(&ti);
102
    return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
103
}
104

    
105
#else
106

    
107
extern int use_rt_clock;
108

    
109
static inline int64_t get_clock(void)
110
{
111
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
112
    || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
113
    if (use_rt_clock) {
114
        struct timespec ts;
115
        clock_gettime(CLOCK_MONOTONIC, &ts);
116
        return ts.tv_sec * 1000000000LL + ts.tv_nsec;
117
    } else
118
#endif
119
    {
120
        /* XXX: using gettimeofday leads to problems if the date
121
           changes, so it should be avoided. */
122
        return get_clock_realtime();
123
    }
124
}
125
#endif
126

    
127
void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
128
void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
129

    
130
/* ptimer.c */
131
typedef struct ptimer_state ptimer_state;
132
typedef void (*ptimer_cb)(void *opaque);
133

    
134
ptimer_state *ptimer_init(QEMUBH *bh);
135
void ptimer_set_period(ptimer_state *s, int64_t period);
136
void ptimer_set_freq(ptimer_state *s, uint32_t freq);
137
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
138
uint64_t ptimer_get_count(ptimer_state *s);
139
void ptimer_set_count(ptimer_state *s, uint64_t count);
140
void ptimer_run(ptimer_state *s, int oneshot);
141
void ptimer_stop(ptimer_state *s);
142
void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
143
void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
144

    
145
/* icount */
146
int64_t qemu_icount_round(int64_t count);
147
extern int64_t qemu_icount;
148
extern int use_icount;
149
extern int icount_time_shift;
150
extern int64_t qemu_icount_bias;
151
int64_t cpu_get_icount(void);
152

    
153
/*******************************************/
154
/* host CPU ticks (if available) */
155

    
156
#if defined(_ARCH_PPC)
157

    
158
static inline int64_t cpu_get_real_ticks(void)
159
{
160
    int64_t retval;
161
#ifdef _ARCH_PPC64
162
    /* This reads timebase in one 64bit go and includes Cell workaround from:
163
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
164
    */
165
    __asm__ __volatile__ ("mftb    %0\n\t"
166
                          "cmpwi   %0,0\n\t"
167
                          "beq-    $-8"
168
                          : "=r" (retval));
169
#else
170
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
171
    unsigned long junk;
172
    __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
173
                          "mfspr   %L0,268\n\t" /* mftb */
174
                          "mfspr   %0,269\n\t"  /* mftbu */
175
                          "cmpw    %0,%1\n\t"
176
                          "bne     $-16"
177
                          : "=r" (retval), "=r" (junk));
178
#endif
179
    return retval;
180
}
181

    
182
#elif defined(__i386__)
183

    
184
static inline int64_t cpu_get_real_ticks(void)
185
{
186
    int64_t val;
187
    asm volatile ("rdtsc" : "=A" (val));
188
    return val;
189
}
190

    
191
#elif defined(__x86_64__)
192

    
193
static inline int64_t cpu_get_real_ticks(void)
194
{
195
    uint32_t low,high;
196
    int64_t val;
197
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
198
    val = high;
199
    val <<= 32;
200
    val |= low;
201
    return val;
202
}
203

    
204
#elif defined(__hppa__)
205

    
206
static inline int64_t cpu_get_real_ticks(void)
207
{
208
    int val;
209
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
210
    return val;
211
}
212

    
213
#elif defined(__ia64)
214

    
215
static inline int64_t cpu_get_real_ticks(void)
216
{
217
    int64_t val;
218
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
219
    return val;
220
}
221

    
222
#elif defined(__s390__)
223

    
224
static inline int64_t cpu_get_real_ticks(void)
225
{
226
    int64_t val;
227
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
228
    return val;
229
}
230

    
231
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
232

    
233
static inline int64_t cpu_get_real_ticks (void)
234
{
235
#if defined(_LP64)
236
    uint64_t        rval;
237
    asm volatile("rd %%tick,%0" : "=r"(rval));
238
    return rval;
239
#else
240
    union {
241
        uint64_t i64;
242
        struct {
243
            uint32_t high;
244
            uint32_t low;
245
        }       i32;
246
    } rval;
247
    asm volatile("rd %%tick,%1; srlx %1,32,%0"
248
                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
249
    return rval.i64;
250
#endif
251
}
252

    
253
#elif defined(__mips__) && \
254
    ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
255
/*
256
 * binutils wants to use rdhwr only on mips32r2
257
 * but as linux kernel emulate it, it's fine
258
 * to use it.
259
 *
260
 */
261
#define MIPS_RDHWR(rd, value) {                         \
262
        __asm__ __volatile__ (".set   push\n\t"         \
263
                              ".set mips32r2\n\t"       \
264
                              "rdhwr  %0, "rd"\n\t"     \
265
                              ".set   pop"              \
266
                              : "=r" (value));          \
267
    }
268

    
269
static inline int64_t cpu_get_real_ticks(void)
270
{
271
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
272
    uint32_t count;
273
    static uint32_t cyc_per_count = 0;
274

    
275
    if (!cyc_per_count) {
276
        MIPS_RDHWR("$3", cyc_per_count);
277
    }
278

    
279
    MIPS_RDHWR("$2", count);
280
    return (int64_t)(count * cyc_per_count);
281
}
282

    
283
#elif defined(__alpha__)
284

    
285
static inline int64_t cpu_get_real_ticks(void)
286
{
287
    uint64_t cc;
288
    uint32_t cur, ofs;
289

    
290
    asm volatile("rpcc %0" : "=r"(cc));
291
    cur = cc;
292
    ofs = cc >> 32;
293
    return cur - ofs;
294
}
295

    
296
#else
297
/* The host CPU doesn't have an easily accessible cycle counter.
298
   Just return a monotonically increasing value.  This will be
299
   totally wrong, but hopefully better than nothing.  */
300
static inline int64_t cpu_get_real_ticks (void)
301
{
302
    static int64_t ticks = 0;
303
    return ticks++;
304
}
305
#endif
306

    
307
#ifdef NEED_CPU_H
308
/* Deterministic execution requires that IO only be performed on the last
309
   instruction of a TB so that interrupts take effect immediately.  */
310
static inline int can_do_io(CPUState *env)
311
{
312
    if (!use_icount)
313
        return 1;
314

    
315
    /* If not executing code then assume we are ok.  */
316
    if (!env->current_tb)
317
        return 1;
318

    
319
    return env->can_do_io != 0;
320
}
321
#endif
322

    
323
#ifdef CONFIG_PROFILER
324
static inline int64_t profile_getclock(void)
325
{
326
    return cpu_get_real_ticks();
327
}
328

    
329
extern int64_t qemu_time, qemu_time_start;
330
extern int64_t tlb_flush_time;
331
extern int64_t dev_time;
332
#endif
333

    
334
#endif