Revision 29e922b6 qemu-timer.h
b/qemu-timer.h | ||
---|---|---|
1 | 1 |
#ifndef QEMU_TIMER_H |
2 | 2 |
#define QEMU_TIMER_H |
3 | 3 |
|
4 |
#include "qemu-common.h" |
|
5 |
|
|
4 | 6 |
/* timers */ |
5 | 7 |
|
6 | 8 |
typedef struct QEMUClock QEMUClock; |
... | ... | |
69 | 71 |
void qemu_put_ptimer(QEMUFile *f, ptimer_state *s); |
70 | 72 |
void qemu_get_ptimer(QEMUFile *f, ptimer_state *s); |
71 | 73 |
|
74 |
/* icount */ |
|
75 |
int64_t qemu_icount_round(int64_t count); |
|
76 |
extern int64_t qemu_icount; |
|
77 |
extern int use_icount; |
|
78 |
extern int icount_time_shift; |
|
79 |
extern int64_t qemu_icount_bias; |
|
80 |
int64_t cpu_get_icount(void); |
|
81 |
|
|
82 |
/*******************************************/ |
|
83 |
/* host CPU ticks (if available) */ |
|
84 |
|
|
85 |
#if defined(_ARCH_PPC) |
|
86 |
|
|
87 |
static inline int64_t cpu_get_real_ticks(void) |
|
88 |
{ |
|
89 |
int64_t retval; |
|
90 |
#ifdef _ARCH_PPC64 |
|
91 |
/* This reads timebase in one 64bit go and includes Cell workaround from: |
|
92 |
http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html |
|
93 |
*/ |
|
94 |
__asm__ __volatile__ ("mftb %0\n\t" |
|
95 |
"cmpwi %0,0\n\t" |
|
96 |
"beq- $-8" |
|
97 |
: "=r" (retval)); |
|
98 |
#else |
|
99 |
/* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ |
|
100 |
unsigned long junk; |
|
101 |
__asm__ __volatile__ ("mftbu %1\n\t" |
|
102 |
"mftb %L0\n\t" |
|
103 |
"mftbu %0\n\t" |
|
104 |
"cmpw %0,%1\n\t" |
|
105 |
"bne $-16" |
|
106 |
: "=r" (retval), "=r" (junk)); |
|
107 |
#endif |
|
108 |
return retval; |
|
109 |
} |
|
110 |
|
|
111 |
#elif defined(__i386__) |
|
112 |
|
|
113 |
static inline int64_t cpu_get_real_ticks(void) |
|
114 |
{ |
|
115 |
int64_t val; |
|
116 |
asm volatile ("rdtsc" : "=A" (val)); |
|
117 |
return val; |
|
118 |
} |
|
119 |
|
|
120 |
#elif defined(__x86_64__) |
|
121 |
|
|
122 |
static inline int64_t cpu_get_real_ticks(void) |
|
123 |
{ |
|
124 |
uint32_t low,high; |
|
125 |
int64_t val; |
|
126 |
asm volatile("rdtsc" : "=a" (low), "=d" (high)); |
|
127 |
val = high; |
|
128 |
val <<= 32; |
|
129 |
val |= low; |
|
130 |
return val; |
|
131 |
} |
|
132 |
|
|
133 |
#elif defined(__hppa__) |
|
134 |
|
|
135 |
static inline int64_t cpu_get_real_ticks(void) |
|
136 |
{ |
|
137 |
int val; |
|
138 |
asm volatile ("mfctl %%cr16, %0" : "=r"(val)); |
|
139 |
return val; |
|
140 |
} |
|
141 |
|
|
142 |
#elif defined(__ia64) |
|
143 |
|
|
144 |
static inline int64_t cpu_get_real_ticks(void) |
|
145 |
{ |
|
146 |
int64_t val; |
|
147 |
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); |
|
148 |
return val; |
|
149 |
} |
|
150 |
|
|
151 |
#elif defined(__s390__) |
|
152 |
|
|
153 |
static inline int64_t cpu_get_real_ticks(void) |
|
154 |
{ |
|
155 |
int64_t val; |
|
156 |
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); |
|
157 |
return val; |
|
158 |
} |
|
159 |
|
|
160 |
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) |
|
161 |
|
|
162 |
static inline int64_t cpu_get_real_ticks (void) |
|
163 |
{ |
|
164 |
#if defined(_LP64) |
|
165 |
uint64_t rval; |
|
166 |
asm volatile("rd %%tick,%0" : "=r"(rval)); |
|
167 |
return rval; |
|
168 |
#else |
|
169 |
union { |
|
170 |
uint64_t i64; |
|
171 |
struct { |
|
172 |
uint32_t high; |
|
173 |
uint32_t low; |
|
174 |
} i32; |
|
175 |
} rval; |
|
176 |
asm volatile("rd %%tick,%1; srlx %1,32,%0" |
|
177 |
: "=r"(rval.i32.high), "=r"(rval.i32.low)); |
|
178 |
return rval.i64; |
|
179 |
#endif |
|
180 |
} |
|
181 |
|
|
182 |
#elif defined(__mips__) && \ |
|
183 |
((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) |
|
184 |
/* |
|
185 |
* binutils wants to use rdhwr only on mips32r2 |
|
186 |
* but as linux kernel emulate it, it's fine |
|
187 |
* to use it. |
|
188 |
* |
|
189 |
*/ |
|
190 |
#define MIPS_RDHWR(rd, value) { \ |
|
191 |
__asm__ __volatile__ (".set push\n\t" \ |
|
192 |
".set mips32r2\n\t" \ |
|
193 |
"rdhwr %0, "rd"\n\t" \ |
|
194 |
".set pop" \ |
|
195 |
: "=r" (value)); \ |
|
196 |
} |
|
197 |
|
|
198 |
static inline int64_t cpu_get_real_ticks(void) |
|
199 |
{ |
|
200 |
/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ |
|
201 |
uint32_t count; |
|
202 |
static uint32_t cyc_per_count = 0; |
|
203 |
|
|
204 |
if (!cyc_per_count) { |
|
205 |
MIPS_RDHWR("$3", cyc_per_count); |
|
206 |
} |
|
207 |
|
|
208 |
MIPS_RDHWR("$2", count); |
|
209 |
return (int64_t)(count * cyc_per_count); |
|
210 |
} |
|
211 |
|
|
212 |
#else |
|
213 |
/* The host CPU doesn't have an easily accessible cycle counter. |
|
214 |
Just return a monotonically increasing value. This will be |
|
215 |
totally wrong, but hopefully better than nothing. */ |
|
216 |
static inline int64_t cpu_get_real_ticks (void) |
|
217 |
{ |
|
218 |
static int64_t ticks = 0; |
|
219 |
return ticks++; |
|
220 |
} |
|
221 |
#endif |
|
222 |
|
|
223 |
#ifdef NEED_CPU_H |
|
224 |
/* Deterministic execution requires that IO only be performed on the last |
|
225 |
instruction of a TB so that interrupts take effect immediately. */ |
|
226 |
static inline int can_do_io(CPUState *env) |
|
227 |
{ |
|
228 |
if (!use_icount) |
|
229 |
return 1; |
|
230 |
|
|
231 |
/* If not executing code then assume we are ok. */ |
|
232 |
if (!env->current_tb) |
|
233 |
return 1; |
|
234 |
|
|
235 |
return env->can_do_io != 0; |
|
236 |
} |
|
237 |
#endif |
|
238 |
|
|
72 | 239 |
#endif |
Also available in: Unified diff