Statistics
| Branch: | Revision:

root / cpus.c @ 3f24a58f

History | View | Annotate | Download (33.7 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
29 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
30 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/qtest.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
39 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
40 0ff0fc19 Jan Kiszka
41 0ff0fc19 Jan Kiszka
#ifndef _WIN32
42 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
43 0ff0fc19 Jan Kiszka
#endif
44 296af7c9 Blue Swirl
45 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
46 6d9cb73c Jan Kiszka
47 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
48 6d9cb73c Jan Kiszka
49 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
50 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
51 c0532a76 Marcelo Tosatti
#endif
52 c0532a76 Marcelo Tosatti
53 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
54 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
55 6d9cb73c Jan Kiszka
#endif
56 6d9cb73c Jan Kiszka
57 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
58 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
59 6d9cb73c Jan Kiszka
#endif
60 6d9cb73c Jan Kiszka
61 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
62 6d9cb73c Jan Kiszka
63 9349b4f9 Andreas Färber
static CPUArchState *next_cpu;
64 296af7c9 Blue Swirl
65 ac873f1e Peter Maydell
static bool cpu_thread_is_idle(CPUArchState *env)
66 ac873f1e Peter Maydell
{
67 4fdeee7c Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
68 4fdeee7c Andreas Färber
69 c64ca814 Andreas Färber
    if (cpu->stop || cpu->queued_work_first) {
70 ac873f1e Peter Maydell
        return false;
71 ac873f1e Peter Maydell
    }
72 f324e766 Andreas Färber
    if (cpu->stopped || !runstate_is_running()) {
73 ac873f1e Peter Maydell
        return true;
74 ac873f1e Peter Maydell
    }
75 259186a7 Andreas Färber
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
76 7ae26bd4 Peter Maydell
        kvm_async_interrupts_enabled()) {
77 ac873f1e Peter Maydell
        return false;
78 ac873f1e Peter Maydell
    }
79 ac873f1e Peter Maydell
    return true;
80 ac873f1e Peter Maydell
}
81 ac873f1e Peter Maydell
82 ac873f1e Peter Maydell
static bool all_cpu_threads_idle(void)
83 ac873f1e Peter Maydell
{
84 ac873f1e Peter Maydell
    CPUArchState *env;
85 ac873f1e Peter Maydell
86 ac873f1e Peter Maydell
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
87 ac873f1e Peter Maydell
        if (!cpu_thread_is_idle(env)) {
88 ac873f1e Peter Maydell
            return false;
89 ac873f1e Peter Maydell
        }
90 ac873f1e Peter Maydell
    }
91 ac873f1e Peter Maydell
    return true;
92 ac873f1e Peter Maydell
}
93 ac873f1e Peter Maydell
94 296af7c9 Blue Swirl
/***********************************************************/
95 946fb27c Paolo Bonzini
/* guest cycle counter */
96 946fb27c Paolo Bonzini
97 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
98 946fb27c Paolo Bonzini
static int icount_time_shift;
99 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
100 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
101 946fb27c Paolo Bonzini
/* Compensate for varying guest execution speed.  */
102 946fb27c Paolo Bonzini
static int64_t qemu_icount_bias;
103 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
104 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
105 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
106 946fb27c Paolo Bonzini
static int64_t vm_clock_warp_start;
107 946fb27c Paolo Bonzini
static int64_t qemu_icount;
108 946fb27c Paolo Bonzini
109 946fb27c Paolo Bonzini
typedef struct TimersState {
110 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
111 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
112 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
113 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
114 946fb27c Paolo Bonzini
    int64_t dummy;
115 946fb27c Paolo Bonzini
} TimersState;
116 946fb27c Paolo Bonzini
117 946fb27c Paolo Bonzini
TimersState timers_state;
118 946fb27c Paolo Bonzini
119 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
120 946fb27c Paolo Bonzini
int64_t cpu_get_icount(void)
121 946fb27c Paolo Bonzini
{
122 946fb27c Paolo Bonzini
    int64_t icount;
123 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
124 946fb27c Paolo Bonzini
125 946fb27c Paolo Bonzini
    icount = qemu_icount;
126 946fb27c Paolo Bonzini
    if (env) {
127 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
128 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
129 946fb27c Paolo Bonzini
        }
130 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
131 946fb27c Paolo Bonzini
    }
132 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
133 946fb27c Paolo Bonzini
}
134 946fb27c Paolo Bonzini
135 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
136 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
137 946fb27c Paolo Bonzini
{
138 946fb27c Paolo Bonzini
    if (use_icount) {
139 946fb27c Paolo Bonzini
        return cpu_get_icount();
140 946fb27c Paolo Bonzini
    }
141 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
142 946fb27c Paolo Bonzini
        return timers_state.cpu_ticks_offset;
143 946fb27c Paolo Bonzini
    } else {
144 946fb27c Paolo Bonzini
        int64_t ticks;
145 946fb27c Paolo Bonzini
        ticks = cpu_get_real_ticks();
146 946fb27c Paolo Bonzini
        if (timers_state.cpu_ticks_prev > ticks) {
147 946fb27c Paolo Bonzini
            /* Note: non increasing ticks may happen if the host uses
148 946fb27c Paolo Bonzini
               software suspend */
149 946fb27c Paolo Bonzini
            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
150 946fb27c Paolo Bonzini
        }
151 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_prev = ticks;
152 946fb27c Paolo Bonzini
        return ticks + timers_state.cpu_ticks_offset;
153 946fb27c Paolo Bonzini
    }
154 946fb27c Paolo Bonzini
}
155 946fb27c Paolo Bonzini
156 946fb27c Paolo Bonzini
/* return the host CPU monotonic timer and handle stop/restart */
157 946fb27c Paolo Bonzini
int64_t cpu_get_clock(void)
158 946fb27c Paolo Bonzini
{
159 946fb27c Paolo Bonzini
    int64_t ti;
160 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
161 946fb27c Paolo Bonzini
        return timers_state.cpu_clock_offset;
162 946fb27c Paolo Bonzini
    } else {
163 946fb27c Paolo Bonzini
        ti = get_clock();
164 946fb27c Paolo Bonzini
        return ti + timers_state.cpu_clock_offset;
165 946fb27c Paolo Bonzini
    }
166 946fb27c Paolo Bonzini
}
167 946fb27c Paolo Bonzini
168 946fb27c Paolo Bonzini
/* enable cpu_get_ticks() */
169 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
170 946fb27c Paolo Bonzini
{
171 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
172 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
173 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
174 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
175 946fb27c Paolo Bonzini
    }
176 946fb27c Paolo Bonzini
}
177 946fb27c Paolo Bonzini
178 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
179 946fb27c Paolo Bonzini
   cpu_get_ticks() after that.  */
180 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
181 946fb27c Paolo Bonzini
{
182 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
183 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset = cpu_get_ticks();
184 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset = cpu_get_clock();
185 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
186 946fb27c Paolo Bonzini
    }
187 946fb27c Paolo Bonzini
}
188 946fb27c Paolo Bonzini
189 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
190 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
191 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
192 946fb27c Paolo Bonzini
   the IO wait loop.  */
193 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
194 946fb27c Paolo Bonzini
195 946fb27c Paolo Bonzini
static void icount_adjust(void)
196 946fb27c Paolo Bonzini
{
197 946fb27c Paolo Bonzini
    int64_t cur_time;
198 946fb27c Paolo Bonzini
    int64_t cur_icount;
199 946fb27c Paolo Bonzini
    int64_t delta;
200 946fb27c Paolo Bonzini
    static int64_t last_delta;
201 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
202 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
203 946fb27c Paolo Bonzini
        return;
204 946fb27c Paolo Bonzini
    }
205 946fb27c Paolo Bonzini
    cur_time = cpu_get_clock();
206 946fb27c Paolo Bonzini
    cur_icount = qemu_get_clock_ns(vm_clock);
207 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
208 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
209 946fb27c Paolo Bonzini
    if (delta > 0
210 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
211 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
212 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
213 946fb27c Paolo Bonzini
        icount_time_shift--;
214 946fb27c Paolo Bonzini
    }
215 946fb27c Paolo Bonzini
    if (delta < 0
216 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
217 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
218 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
219 946fb27c Paolo Bonzini
        icount_time_shift++;
220 946fb27c Paolo Bonzini
    }
221 946fb27c Paolo Bonzini
    last_delta = delta;
222 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
223 946fb27c Paolo Bonzini
}
224 946fb27c Paolo Bonzini
225 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
226 946fb27c Paolo Bonzini
{
227 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
228 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
229 946fb27c Paolo Bonzini
    icount_adjust();
230 946fb27c Paolo Bonzini
}
231 946fb27c Paolo Bonzini
232 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
233 946fb27c Paolo Bonzini
{
234 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
235 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
236 946fb27c Paolo Bonzini
    icount_adjust();
237 946fb27c Paolo Bonzini
}
238 946fb27c Paolo Bonzini
239 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
240 946fb27c Paolo Bonzini
{
241 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
242 946fb27c Paolo Bonzini
}
243 946fb27c Paolo Bonzini
244 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
245 946fb27c Paolo Bonzini
{
246 946fb27c Paolo Bonzini
    if (vm_clock_warp_start == -1) {
247 946fb27c Paolo Bonzini
        return;
248 946fb27c Paolo Bonzini
    }
249 946fb27c Paolo Bonzini
250 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
251 946fb27c Paolo Bonzini
        int64_t clock = qemu_get_clock_ns(rt_clock);
252 946fb27c Paolo Bonzini
        int64_t warp_delta = clock - vm_clock_warp_start;
253 946fb27c Paolo Bonzini
        if (use_icount == 1) {
254 946fb27c Paolo Bonzini
            qemu_icount_bias += warp_delta;
255 946fb27c Paolo Bonzini
        } else {
256 946fb27c Paolo Bonzini
            /*
257 946fb27c Paolo Bonzini
             * In adaptive mode, do not let the vm_clock run too
258 946fb27c Paolo Bonzini
             * far ahead of real time.
259 946fb27c Paolo Bonzini
             */
260 946fb27c Paolo Bonzini
            int64_t cur_time = cpu_get_clock();
261 946fb27c Paolo Bonzini
            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
262 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
263 946fb27c Paolo Bonzini
            qemu_icount_bias += MIN(warp_delta, delta);
264 946fb27c Paolo Bonzini
        }
265 946fb27c Paolo Bonzini
        if (qemu_clock_expired(vm_clock)) {
266 946fb27c Paolo Bonzini
            qemu_notify_event();
267 946fb27c Paolo Bonzini
        }
268 946fb27c Paolo Bonzini
    }
269 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
270 946fb27c Paolo Bonzini
}
271 946fb27c Paolo Bonzini
272 8156be56 Paolo Bonzini
void qtest_clock_warp(int64_t dest)
273 8156be56 Paolo Bonzini
{
274 8156be56 Paolo Bonzini
    int64_t clock = qemu_get_clock_ns(vm_clock);
275 8156be56 Paolo Bonzini
    assert(qtest_enabled());
276 8156be56 Paolo Bonzini
    while (clock < dest) {
277 8156be56 Paolo Bonzini
        int64_t deadline = qemu_clock_deadline(vm_clock);
278 8156be56 Paolo Bonzini
        int64_t warp = MIN(dest - clock, deadline);
279 8156be56 Paolo Bonzini
        qemu_icount_bias += warp;
280 8156be56 Paolo Bonzini
        qemu_run_timers(vm_clock);
281 8156be56 Paolo Bonzini
        clock = qemu_get_clock_ns(vm_clock);
282 8156be56 Paolo Bonzini
    }
283 8156be56 Paolo Bonzini
    qemu_notify_event();
284 8156be56 Paolo Bonzini
}
285 8156be56 Paolo Bonzini
286 946fb27c Paolo Bonzini
void qemu_clock_warp(QEMUClock *clock)
287 946fb27c Paolo Bonzini
{
288 946fb27c Paolo Bonzini
    int64_t deadline;
289 946fb27c Paolo Bonzini
290 946fb27c Paolo Bonzini
    /*
291 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
292 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
293 946fb27c Paolo Bonzini
     * need for if statements all over the place.
294 946fb27c Paolo Bonzini
     */
295 946fb27c Paolo Bonzini
    if (clock != vm_clock || !use_icount) {
296 946fb27c Paolo Bonzini
        return;
297 946fb27c Paolo Bonzini
    }
298 946fb27c Paolo Bonzini
299 946fb27c Paolo Bonzini
    /*
300 946fb27c Paolo Bonzini
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
301 946fb27c Paolo Bonzini
     * ensures that the deadline for the timer is computed correctly below.
302 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
303 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
304 946fb27c Paolo Bonzini
     * the earliest vm_clock timer.
305 946fb27c Paolo Bonzini
     */
306 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
307 946fb27c Paolo Bonzini
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
308 946fb27c Paolo Bonzini
        qemu_del_timer(icount_warp_timer);
309 946fb27c Paolo Bonzini
        return;
310 946fb27c Paolo Bonzini
    }
311 946fb27c Paolo Bonzini
312 8156be56 Paolo Bonzini
    if (qtest_enabled()) {
313 8156be56 Paolo Bonzini
        /* When testing, qtest commands advance icount.  */
314 8156be56 Paolo Bonzini
        return;
315 8156be56 Paolo Bonzini
    }
316 8156be56 Paolo Bonzini
317 946fb27c Paolo Bonzini
    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
318 946fb27c Paolo Bonzini
    deadline = qemu_clock_deadline(vm_clock);
319 946fb27c Paolo Bonzini
    if (deadline > 0) {
320 946fb27c Paolo Bonzini
        /*
321 946fb27c Paolo Bonzini
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
322 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
323 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
324 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
325 946fb27c Paolo Bonzini
         * vm_clock.
326 946fb27c Paolo Bonzini
         *
327 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
328 946fb27c Paolo Bonzini
         * sleep in icount mode if there is a pending vm_clock timer; rather
329 946fb27c Paolo Bonzini
         * time could just advance to the next vm_clock event.  Instead, we
330 946fb27c Paolo Bonzini
         * do stop VCPUs and only advance vm_clock after some "real" time,
331 946fb27c Paolo Bonzini
         * (related to the time left until the next event) has passed.  This
332 946fb27c Paolo Bonzini
         * rt_clock timer will do this.  This avoids that the warps are too
333 946fb27c Paolo Bonzini
         * visible externally---for example, you will not be sending network
334 07f35073 Dong Xu Wang
         * packets continuously instead of every 100ms.
335 946fb27c Paolo Bonzini
         */
336 946fb27c Paolo Bonzini
        qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
337 946fb27c Paolo Bonzini
    } else {
338 946fb27c Paolo Bonzini
        qemu_notify_event();
339 946fb27c Paolo Bonzini
    }
340 946fb27c Paolo Bonzini
}
341 946fb27c Paolo Bonzini
342 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
343 946fb27c Paolo Bonzini
    .name = "timer",
344 946fb27c Paolo Bonzini
    .version_id = 2,
345 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
346 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
347 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
348 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
349 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
350 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
351 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
352 946fb27c Paolo Bonzini
    }
353 946fb27c Paolo Bonzini
};
354 946fb27c Paolo Bonzini
355 946fb27c Paolo Bonzini
void configure_icount(const char *option)
356 946fb27c Paolo Bonzini
{
357 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
358 946fb27c Paolo Bonzini
    if (!option) {
359 946fb27c Paolo Bonzini
        return;
360 946fb27c Paolo Bonzini
    }
361 946fb27c Paolo Bonzini
362 946fb27c Paolo Bonzini
    icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
363 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
364 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
365 946fb27c Paolo Bonzini
        use_icount = 1;
366 946fb27c Paolo Bonzini
        return;
367 946fb27c Paolo Bonzini
    }
368 946fb27c Paolo Bonzini
369 946fb27c Paolo Bonzini
    use_icount = 2;
370 946fb27c Paolo Bonzini
371 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
372 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
373 946fb27c Paolo Bonzini
    icount_time_shift = 3;
374 946fb27c Paolo Bonzini
375 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
376 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
377 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
378 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
379 946fb27c Paolo Bonzini
       than VM triggers.  */
380 946fb27c Paolo Bonzini
    icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
381 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
382 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
383 946fb27c Paolo Bonzini
    icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
384 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
385 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
386 946fb27c Paolo Bonzini
}
387 946fb27c Paolo Bonzini
388 946fb27c Paolo Bonzini
/***********************************************************/
389 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
390 296af7c9 Blue Swirl
{
391 296af7c9 Blue Swirl
    va_list ap;
392 9349b4f9 Andreas Färber
    CPUArchState *env;
393 55e5c285 Andreas Färber
    CPUState *cpu;
394 296af7c9 Blue Swirl
395 296af7c9 Blue Swirl
    va_start(ap, fmt);
396 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
397 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
398 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
399 55e5c285 Andreas Färber
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
400 55e5c285 Andreas Färber
        cpu = ENV_GET_CPU(env);
401 55e5c285 Andreas Färber
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
402 6fd2a026 Peter Maydell
        cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU);
403 296af7c9 Blue Swirl
    }
404 296af7c9 Blue Swirl
    va_end(ap);
405 296af7c9 Blue Swirl
    abort();
406 296af7c9 Blue Swirl
}
407 296af7c9 Blue Swirl
408 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
409 296af7c9 Blue Swirl
{
410 9349b4f9 Andreas Färber
    CPUArchState *cpu;
411 296af7c9 Blue Swirl
412 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
413 296af7c9 Blue Swirl
        cpu_synchronize_state(cpu);
414 296af7c9 Blue Swirl
    }
415 296af7c9 Blue Swirl
}
416 296af7c9 Blue Swirl
417 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
418 296af7c9 Blue Swirl
{
419 9349b4f9 Andreas Färber
    CPUArchState *cpu;
420 296af7c9 Blue Swirl
421 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
422 3f24a58f Igor Mammedov
        cpu_synchronize_post_reset(ENV_GET_CPU(cpu));
423 296af7c9 Blue Swirl
    }
424 296af7c9 Blue Swirl
}
425 296af7c9 Blue Swirl
426 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
427 296af7c9 Blue Swirl
{
428 9349b4f9 Andreas Färber
    CPUArchState *cpu;
429 296af7c9 Blue Swirl
430 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
431 3f24a58f Igor Mammedov
        cpu_synchronize_post_init(ENV_GET_CPU(cpu));
432 296af7c9 Blue Swirl
    }
433 296af7c9 Blue Swirl
}
434 296af7c9 Blue Swirl
435 2fa45344 Andreas Färber
bool cpu_is_stopped(CPUState *cpu)
436 3ae9501c Marcelo Tosatti
{
437 f324e766 Andreas Färber
    return !runstate_is_running() || cpu->stopped;
438 3ae9501c Marcelo Tosatti
}
439 3ae9501c Marcelo Tosatti
440 1dfb4dd9 Luiz Capitulino
static void do_vm_stop(RunState state)
441 296af7c9 Blue Swirl
{
442 1354869c Luiz Capitulino
    if (runstate_is_running()) {
443 296af7c9 Blue Swirl
        cpu_disable_ticks();
444 296af7c9 Blue Swirl
        pause_all_vcpus();
445 f5bbfba1 Luiz Capitulino
        runstate_set(state);
446 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
447 922453bc Stefan Hajnoczi
        bdrv_drain_all();
448 55df6f33 Michael S. Tsirkin
        bdrv_flush_all();
449 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
450 296af7c9 Blue Swirl
    }
451 296af7c9 Blue Swirl
}
452 296af7c9 Blue Swirl
453 a1fcaa73 Andreas Färber
static bool cpu_can_run(CPUState *cpu)
454 296af7c9 Blue Swirl
{
455 4fdeee7c Andreas Färber
    if (cpu->stop) {
456 a1fcaa73 Andreas Färber
        return false;
457 0ab07c62 Jan Kiszka
    }
458 f324e766 Andreas Färber
    if (cpu->stopped || !runstate_is_running()) {
459 a1fcaa73 Andreas Färber
        return false;
460 0ab07c62 Jan Kiszka
    }
461 a1fcaa73 Andreas Färber
    return true;
462 296af7c9 Blue Swirl
}
463 296af7c9 Blue Swirl
464 9349b4f9 Andreas Färber
static void cpu_handle_guest_debug(CPUArchState *env)
465 83f338f7 Jan Kiszka
{
466 f324e766 Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
467 f324e766 Andreas Färber
468 3c638d06 Jan Kiszka
    gdb_set_stop_cpu(env);
469 8cf71710 Jan Kiszka
    qemu_system_debug_request();
470 f324e766 Andreas Färber
    cpu->stopped = true;
471 3c638d06 Jan Kiszka
}
472 3c638d06 Jan Kiszka
473 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
474 714bd040 Paolo Bonzini
{
475 714bd040 Paolo Bonzini
    if (cpu_single_env) {
476 714bd040 Paolo Bonzini
        cpu_exit(cpu_single_env);
477 714bd040 Paolo Bonzini
    }
478 714bd040 Paolo Bonzini
    exit_request = 1;
479 714bd040 Paolo Bonzini
}
480 714bd040 Paolo Bonzini
481 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
482 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
483 6d9cb73c Jan Kiszka
{
484 6d9cb73c Jan Kiszka
    sigset_t set;
485 6d9cb73c Jan Kiszka
    struct sigaction action;
486 6d9cb73c Jan Kiszka
487 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
488 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
489 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
490 6d9cb73c Jan Kiszka
        raise(SIGBUS);
491 6d9cb73c Jan Kiszka
        sigemptyset(&set);
492 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
493 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
494 6d9cb73c Jan Kiszka
    }
495 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
496 6d9cb73c Jan Kiszka
    abort();
497 6d9cb73c Jan Kiszka
}
498 6d9cb73c Jan Kiszka
499 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
500 6d9cb73c Jan Kiszka
                           void *ctx)
501 6d9cb73c Jan Kiszka
{
502 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
503 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
504 6d9cb73c Jan Kiszka
        sigbus_reraise();
505 6d9cb73c Jan Kiszka
    }
506 6d9cb73c Jan Kiszka
}
507 6d9cb73c Jan Kiszka
508 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
509 6d9cb73c Jan Kiszka
{
510 6d9cb73c Jan Kiszka
    struct sigaction action;
511 6d9cb73c Jan Kiszka
512 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
513 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
514 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
515 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
516 6d9cb73c Jan Kiszka
517 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
518 6d9cb73c Jan Kiszka
}
519 6d9cb73c Jan Kiszka
520 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
521 1ab3c6c0 Jan Kiszka
{
522 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
523 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
524 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
525 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
526 1ab3c6c0 Jan Kiszka
    int r;
527 1ab3c6c0 Jan Kiszka
528 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
529 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
530 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
531 1ab3c6c0 Jan Kiszka
532 1ab3c6c0 Jan Kiszka
    do {
533 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
534 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
535 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
536 1ab3c6c0 Jan Kiszka
            exit(1);
537 1ab3c6c0 Jan Kiszka
        }
538 1ab3c6c0 Jan Kiszka
539 1ab3c6c0 Jan Kiszka
        switch (r) {
540 1ab3c6c0 Jan Kiszka
        case SIGBUS:
541 290adf38 Andreas Färber
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
542 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
543 1ab3c6c0 Jan Kiszka
            }
544 1ab3c6c0 Jan Kiszka
            break;
545 1ab3c6c0 Jan Kiszka
        default:
546 1ab3c6c0 Jan Kiszka
            break;
547 1ab3c6c0 Jan Kiszka
        }
548 1ab3c6c0 Jan Kiszka
549 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
550 1ab3c6c0 Jan Kiszka
        if (r == -1) {
551 1ab3c6c0 Jan Kiszka
            perror("sigpending");
552 1ab3c6c0 Jan Kiszka
            exit(1);
553 1ab3c6c0 Jan Kiszka
        }
554 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
555 1ab3c6c0 Jan Kiszka
}
556 1ab3c6c0 Jan Kiszka
557 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
558 6d9cb73c Jan Kiszka
559 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
560 6d9cb73c Jan Kiszka
{
561 6d9cb73c Jan Kiszka
}
562 1ab3c6c0 Jan Kiszka
563 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
564 1ab3c6c0 Jan Kiszka
{
565 1ab3c6c0 Jan Kiszka
}
566 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
567 6d9cb73c Jan Kiszka
568 296af7c9 Blue Swirl
#ifndef _WIN32
569 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
570 55f8d6ac Jan Kiszka
{
571 55f8d6ac Jan Kiszka
}
572 55f8d6ac Jan Kiszka
573 9349b4f9 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUArchState *env)
574 714bd040 Paolo Bonzini
{
575 714bd040 Paolo Bonzini
    int r;
576 714bd040 Paolo Bonzini
    sigset_t set;
577 714bd040 Paolo Bonzini
    struct sigaction sigact;
578 714bd040 Paolo Bonzini
579 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
580 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
581 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
582 714bd040 Paolo Bonzini
583 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
584 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
585 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
586 714bd040 Paolo Bonzini
    r = kvm_set_signal_mask(env, &set);
587 714bd040 Paolo Bonzini
    if (r) {
588 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
589 714bd040 Paolo Bonzini
        exit(1);
590 714bd040 Paolo Bonzini
    }
591 714bd040 Paolo Bonzini
}
592 714bd040 Paolo Bonzini
593 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
594 714bd040 Paolo Bonzini
{
595 714bd040 Paolo Bonzini
    sigset_t set;
596 714bd040 Paolo Bonzini
    struct sigaction sigact;
597 714bd040 Paolo Bonzini
598 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
599 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
600 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
601 714bd040 Paolo Bonzini
602 714bd040 Paolo Bonzini
    sigemptyset(&set);
603 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
604 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
605 714bd040 Paolo Bonzini
}
606 714bd040 Paolo Bonzini
607 55f8d6ac Jan Kiszka
#else /* _WIN32 */
608 9349b4f9 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUArchState *env)
609 ff48eb5f Jan Kiszka
{
610 714bd040 Paolo Bonzini
    abort();
611 714bd040 Paolo Bonzini
}
612 ff48eb5f Jan Kiszka
613 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
614 714bd040 Paolo Bonzini
{
615 ff48eb5f Jan Kiszka
}
616 714bd040 Paolo Bonzini
#endif /* _WIN32 */
617 ff48eb5f Jan Kiszka
618 b2532d88 Stefan Weil
static QemuMutex qemu_global_mutex;
619 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
620 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
621 296af7c9 Blue Swirl
622 296af7c9 Blue Swirl
static QemuThread io_thread;
623 296af7c9 Blue Swirl
624 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
625 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
626 296af7c9 Blue Swirl
627 296af7c9 Blue Swirl
/* cpu creation */
628 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
629 296af7c9 Blue Swirl
/* system init */
630 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
631 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
632 296af7c9 Blue Swirl
633 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
634 296af7c9 Blue Swirl
{
635 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
636 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
637 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
638 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
639 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
640 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
641 296af7c9 Blue Swirl
642 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
643 296af7c9 Blue Swirl
}
644 296af7c9 Blue Swirl
645 f100f0b3 Andreas Färber
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
646 e82bcec2 Marcelo Tosatti
{
647 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
648 e82bcec2 Marcelo Tosatti
649 60e82579 Andreas Färber
    if (qemu_cpu_is_self(cpu)) {
650 e82bcec2 Marcelo Tosatti
        func(data);
651 e82bcec2 Marcelo Tosatti
        return;
652 e82bcec2 Marcelo Tosatti
    }
653 e82bcec2 Marcelo Tosatti
654 e82bcec2 Marcelo Tosatti
    wi.func = func;
655 e82bcec2 Marcelo Tosatti
    wi.data = data;
656 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
657 c64ca814 Andreas Färber
        cpu->queued_work_first = &wi;
658 0ab07c62 Jan Kiszka
    } else {
659 c64ca814 Andreas Färber
        cpu->queued_work_last->next = &wi;
660 0ab07c62 Jan Kiszka
    }
661 c64ca814 Andreas Färber
    cpu->queued_work_last = &wi;
662 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
663 e82bcec2 Marcelo Tosatti
    wi.done = false;
664 e82bcec2 Marcelo Tosatti
665 c08d7424 Andreas Färber
    qemu_cpu_kick(cpu);
666 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
667 9349b4f9 Andreas Färber
        CPUArchState *self_env = cpu_single_env;
668 e82bcec2 Marcelo Tosatti
669 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
670 e82bcec2 Marcelo Tosatti
        cpu_single_env = self_env;
671 e82bcec2 Marcelo Tosatti
    }
672 e82bcec2 Marcelo Tosatti
}
673 e82bcec2 Marcelo Tosatti
674 6d45b109 Andreas Färber
static void flush_queued_work(CPUState *cpu)
675 e82bcec2 Marcelo Tosatti
{
676 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
677 e82bcec2 Marcelo Tosatti
678 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
679 e82bcec2 Marcelo Tosatti
        return;
680 0ab07c62 Jan Kiszka
    }
681 e82bcec2 Marcelo Tosatti
682 c64ca814 Andreas Färber
    while ((wi = cpu->queued_work_first)) {
683 c64ca814 Andreas Färber
        cpu->queued_work_first = wi->next;
684 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
685 e82bcec2 Marcelo Tosatti
        wi->done = true;
686 e82bcec2 Marcelo Tosatti
    }
687 c64ca814 Andreas Färber
    cpu->queued_work_last = NULL;
688 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
689 e82bcec2 Marcelo Tosatti
}
690 e82bcec2 Marcelo Tosatti
691 509a0d78 Andreas Färber
static void qemu_wait_io_event_common(CPUState *cpu)
692 296af7c9 Blue Swirl
{
693 4fdeee7c Andreas Färber
    if (cpu->stop) {
694 4fdeee7c Andreas Färber
        cpu->stop = false;
695 f324e766 Andreas Färber
        cpu->stopped = true;
696 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
697 296af7c9 Blue Swirl
    }
698 6d45b109 Andreas Färber
    flush_queued_work(cpu);
699 216fc9a4 Andreas Färber
    cpu->thread_kicked = false;
700 296af7c9 Blue Swirl
}
701 296af7c9 Blue Swirl
702 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
703 296af7c9 Blue Swirl
{
704 9349b4f9 Andreas Färber
    CPUArchState *env;
705 6cabe1f3 Jan Kiszka
706 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
707 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
708 ab33fcda Paolo Bonzini
          are idle.  */
709 ab33fcda Paolo Bonzini
        qemu_clock_warp(vm_clock);
710 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
711 16400322 Jan Kiszka
    }
712 296af7c9 Blue Swirl
713 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
714 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
715 46daff13 Paolo Bonzini
    }
716 6cabe1f3 Jan Kiszka
717 6cabe1f3 Jan Kiszka
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
718 509a0d78 Andreas Färber
        qemu_wait_io_event_common(ENV_GET_CPU(env));
719 6cabe1f3 Jan Kiszka
    }
720 296af7c9 Blue Swirl
}
721 296af7c9 Blue Swirl
722 9349b4f9 Andreas Färber
static void qemu_kvm_wait_io_event(CPUArchState *env)
723 296af7c9 Blue Swirl
{
724 f5c121b8 Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
725 f5c121b8 Andreas Färber
726 16400322 Jan Kiszka
    while (cpu_thread_is_idle(env)) {
727 f5c121b8 Andreas Färber
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
728 16400322 Jan Kiszka
    }
729 296af7c9 Blue Swirl
730 290adf38 Andreas Färber
    qemu_kvm_eat_signals(cpu);
731 509a0d78 Andreas Färber
    qemu_wait_io_event_common(cpu);
732 296af7c9 Blue Swirl
}
733 296af7c9 Blue Swirl
734 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
735 296af7c9 Blue Swirl
{
736 9349b4f9 Andreas Färber
    CPUArchState *env = arg;
737 814e612e Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
738 84b4915d Jan Kiszka
    int r;
739 296af7c9 Blue Swirl
740 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
741 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
742 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
743 e479c207 Jan Kiszka
    cpu_single_env = env;
744 296af7c9 Blue Swirl
745 504134d2 Andreas Färber
    r = kvm_init_vcpu(cpu);
746 84b4915d Jan Kiszka
    if (r < 0) {
747 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
748 84b4915d Jan Kiszka
        exit(1);
749 84b4915d Jan Kiszka
    }
750 296af7c9 Blue Swirl
751 55f8d6ac Jan Kiszka
    qemu_kvm_init_cpu_signals(env);
752 296af7c9 Blue Swirl
753 296af7c9 Blue Swirl
    /* signal CPU creation */
754 61a46217 Andreas Färber
    cpu->created = true;
755 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
756 296af7c9 Blue Swirl
757 296af7c9 Blue Swirl
    while (1) {
758 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
759 6792a57b Jan Kiszka
            r = kvm_cpu_exec(env);
760 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
761 1009d2ed Jan Kiszka
                cpu_handle_guest_debug(env);
762 83f338f7 Jan Kiszka
            }
763 0ab07c62 Jan Kiszka
        }
764 296af7c9 Blue Swirl
        qemu_kvm_wait_io_event(env);
765 296af7c9 Blue Swirl
    }
766 296af7c9 Blue Swirl
767 296af7c9 Blue Swirl
    return NULL;
768 296af7c9 Blue Swirl
}
769 296af7c9 Blue Swirl
770 c7f0f3b1 Anthony Liguori
static void *qemu_dummy_cpu_thread_fn(void *arg)
771 c7f0f3b1 Anthony Liguori
{
772 c7f0f3b1 Anthony Liguori
#ifdef _WIN32
773 c7f0f3b1 Anthony Liguori
    fprintf(stderr, "qtest is not supported under Windows\n");
774 c7f0f3b1 Anthony Liguori
    exit(1);
775 c7f0f3b1 Anthony Liguori
#else
776 c7f0f3b1 Anthony Liguori
    CPUArchState *env = arg;
777 814e612e Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
778 c7f0f3b1 Anthony Liguori
    sigset_t waitset;
779 c7f0f3b1 Anthony Liguori
    int r;
780 c7f0f3b1 Anthony Liguori
781 c7f0f3b1 Anthony Liguori
    qemu_mutex_lock_iothread();
782 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
783 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
784 c7f0f3b1 Anthony Liguori
785 c7f0f3b1 Anthony Liguori
    sigemptyset(&waitset);
786 c7f0f3b1 Anthony Liguori
    sigaddset(&waitset, SIG_IPI);
787 c7f0f3b1 Anthony Liguori
788 c7f0f3b1 Anthony Liguori
    /* signal CPU creation */
789 61a46217 Andreas Färber
    cpu->created = true;
790 c7f0f3b1 Anthony Liguori
    qemu_cond_signal(&qemu_cpu_cond);
791 c7f0f3b1 Anthony Liguori
792 c7f0f3b1 Anthony Liguori
    cpu_single_env = env;
793 c7f0f3b1 Anthony Liguori
    while (1) {
794 c7f0f3b1 Anthony Liguori
        cpu_single_env = NULL;
795 c7f0f3b1 Anthony Liguori
        qemu_mutex_unlock_iothread();
796 c7f0f3b1 Anthony Liguori
        do {
797 c7f0f3b1 Anthony Liguori
            int sig;
798 c7f0f3b1 Anthony Liguori
            r = sigwait(&waitset, &sig);
799 c7f0f3b1 Anthony Liguori
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
800 c7f0f3b1 Anthony Liguori
        if (r == -1) {
801 c7f0f3b1 Anthony Liguori
            perror("sigwait");
802 c7f0f3b1 Anthony Liguori
            exit(1);
803 c7f0f3b1 Anthony Liguori
        }
804 c7f0f3b1 Anthony Liguori
        qemu_mutex_lock_iothread();
805 c7f0f3b1 Anthony Liguori
        cpu_single_env = env;
806 509a0d78 Andreas Färber
        qemu_wait_io_event_common(cpu);
807 c7f0f3b1 Anthony Liguori
    }
808 c7f0f3b1 Anthony Liguori
809 c7f0f3b1 Anthony Liguori
    return NULL;
810 c7f0f3b1 Anthony Liguori
#endif
811 c7f0f3b1 Anthony Liguori
}
812 c7f0f3b1 Anthony Liguori
813 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
814 bdb7ca67 Jan Kiszka
815 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
816 296af7c9 Blue Swirl
{
817 c3586ba7 Andreas Färber
    CPUState *cpu = arg;
818 c3586ba7 Andreas Färber
    CPUArchState *env;
819 296af7c9 Blue Swirl
820 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
821 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
822 296af7c9 Blue Swirl
823 296af7c9 Blue Swirl
    /* signal CPU creation */
824 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
825 0ab07c62 Jan Kiszka
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
826 61a46217 Andreas Färber
        cpu = ENV_GET_CPU(env);
827 9f09e18a Andreas Färber
        cpu->thread_id = qemu_get_thread_id();
828 61a46217 Andreas Färber
        cpu->created = true;
829 0ab07c62 Jan Kiszka
    }
830 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
831 296af7c9 Blue Swirl
832 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
833 f324e766 Andreas Färber
    while (ENV_GET_CPU(first_cpu)->stopped) {
834 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
835 8e564b4e Jan Kiszka
836 8e564b4e Jan Kiszka
        /* process any pending work */
837 8e564b4e Jan Kiszka
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
838 509a0d78 Andreas Färber
            qemu_wait_io_event_common(ENV_GET_CPU(env));
839 8e564b4e Jan Kiszka
        }
840 0ab07c62 Jan Kiszka
    }
841 296af7c9 Blue Swirl
842 296af7c9 Blue Swirl
    while (1) {
843 bdb7ca67 Jan Kiszka
        tcg_exec_all();
844 946fb27c Paolo Bonzini
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
845 3b2319a3 Paolo Bonzini
            qemu_notify_event();
846 3b2319a3 Paolo Bonzini
        }
847 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
848 296af7c9 Blue Swirl
    }
849 296af7c9 Blue Swirl
850 296af7c9 Blue Swirl
    return NULL;
851 296af7c9 Blue Swirl
}
852 296af7c9 Blue Swirl
853 2ff09a40 Andreas Färber
static void qemu_cpu_kick_thread(CPUState *cpu)
854 cc015e9a Paolo Bonzini
{
855 cc015e9a Paolo Bonzini
#ifndef _WIN32
856 cc015e9a Paolo Bonzini
    int err;
857 cc015e9a Paolo Bonzini
858 814e612e Andreas Färber
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
859 cc015e9a Paolo Bonzini
    if (err) {
860 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
861 cc015e9a Paolo Bonzini
        exit(1);
862 cc015e9a Paolo Bonzini
    }
863 cc015e9a Paolo Bonzini
#else /* _WIN32 */
864 60e82579 Andreas Färber
    if (!qemu_cpu_is_self(cpu)) {
865 ed9164a3 Olivier Hainque
        CONTEXT tcgContext;
866 ed9164a3 Olivier Hainque
867 ed9164a3 Olivier Hainque
        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
868 ed9164a3 Olivier Hainque
            fprintf(stderr, "qemu:%s: GetLastError:%d\n", __func__,
869 ed9164a3 Olivier Hainque
                    GetLastError());
870 ed9164a3 Olivier Hainque
            exit(1);
871 ed9164a3 Olivier Hainque
        }
872 ed9164a3 Olivier Hainque
873 ed9164a3 Olivier Hainque
        /* On multi-core systems, we are not sure that the thread is actually
874 ed9164a3 Olivier Hainque
         * suspended until we can get the context.
875 ed9164a3 Olivier Hainque
         */
876 ed9164a3 Olivier Hainque
        tcgContext.ContextFlags = CONTEXT_CONTROL;
877 ed9164a3 Olivier Hainque
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
878 ed9164a3 Olivier Hainque
            continue;
879 ed9164a3 Olivier Hainque
        }
880 ed9164a3 Olivier Hainque
881 cc015e9a Paolo Bonzini
        cpu_signal(0);
882 ed9164a3 Olivier Hainque
883 ed9164a3 Olivier Hainque
        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
884 ed9164a3 Olivier Hainque
            fprintf(stderr, "qemu:%s: GetLastError:%d\n", __func__,
885 ed9164a3 Olivier Hainque
                    GetLastError());
886 ed9164a3 Olivier Hainque
            exit(1);
887 ed9164a3 Olivier Hainque
        }
888 cc015e9a Paolo Bonzini
    }
889 cc015e9a Paolo Bonzini
#endif
890 cc015e9a Paolo Bonzini
}
891 cc015e9a Paolo Bonzini
892 c08d7424 Andreas Färber
void qemu_cpu_kick(CPUState *cpu)
893 296af7c9 Blue Swirl
{
894 f5c121b8 Andreas Färber
    qemu_cond_broadcast(cpu->halt_cond);
895 216fc9a4 Andreas Färber
    if (!tcg_enabled() && !cpu->thread_kicked) {
896 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu);
897 216fc9a4 Andreas Färber
        cpu->thread_kicked = true;
898 aa2c364b Jan Kiszka
    }
899 296af7c9 Blue Swirl
}
900 296af7c9 Blue Swirl
901 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
902 296af7c9 Blue Swirl
{
903 b55c22c6 Paolo Bonzini
#ifndef _WIN32
904 46d62fac Jan Kiszka
    assert(cpu_single_env);
905 216fc9a4 Andreas Färber
    CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
906 296af7c9 Blue Swirl
907 216fc9a4 Andreas Färber
    if (!cpu_single_cpu->thread_kicked) {
908 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu_single_cpu);
909 216fc9a4 Andreas Färber
        cpu_single_cpu->thread_kicked = true;
910 296af7c9 Blue Swirl
    }
911 b55c22c6 Paolo Bonzini
#else
912 b55c22c6 Paolo Bonzini
    abort();
913 b55c22c6 Paolo Bonzini
#endif
914 296af7c9 Blue Swirl
}
915 296af7c9 Blue Swirl
916 60e82579 Andreas Färber
bool qemu_cpu_is_self(CPUState *cpu)
917 296af7c9 Blue Swirl
{
918 814e612e Andreas Färber
    return qemu_thread_is_self(cpu->thread);
919 296af7c9 Blue Swirl
}
920 296af7c9 Blue Swirl
921 aa723c23 Juan Quintela
static bool qemu_in_vcpu_thread(void)
922 aa723c23 Juan Quintela
{
923 60e82579 Andreas Färber
    return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env));
924 aa723c23 Juan Quintela
}
925 aa723c23 Juan Quintela
926 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
927 296af7c9 Blue Swirl
{
928 c7f0f3b1 Anthony Liguori
    if (!tcg_enabled()) {
929 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
930 1a28cac3 Marcelo Tosatti
    } else {
931 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
932 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
933 2ff09a40 Andreas Färber
            qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
934 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
935 1a28cac3 Marcelo Tosatti
        }
936 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
937 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
938 1a28cac3 Marcelo Tosatti
    }
939 296af7c9 Blue Swirl
}
940 296af7c9 Blue Swirl
941 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
942 296af7c9 Blue Swirl
{
943 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
944 296af7c9 Blue Swirl
}
945 296af7c9 Blue Swirl
946 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
947 296af7c9 Blue Swirl
{
948 9349b4f9 Andreas Färber
    CPUArchState *penv = first_cpu;
949 296af7c9 Blue Swirl
950 296af7c9 Blue Swirl
    while (penv) {
951 f324e766 Andreas Färber
        CPUState *pcpu = ENV_GET_CPU(penv);
952 f324e766 Andreas Färber
        if (!pcpu->stopped) {
953 296af7c9 Blue Swirl
            return 0;
954 0ab07c62 Jan Kiszka
        }
955 5207a5e0 Jan Kiszka
        penv = penv->next_cpu;
956 296af7c9 Blue Swirl
    }
957 296af7c9 Blue Swirl
958 296af7c9 Blue Swirl
    return 1;
959 296af7c9 Blue Swirl
}
960 296af7c9 Blue Swirl
961 296af7c9 Blue Swirl
void pause_all_vcpus(void)
962 296af7c9 Blue Swirl
{
963 9349b4f9 Andreas Färber
    CPUArchState *penv = first_cpu;
964 296af7c9 Blue Swirl
965 a5c57d64 Paolo Bonzini
    qemu_clock_enable(vm_clock, false);
966 296af7c9 Blue Swirl
    while (penv) {
967 4fdeee7c Andreas Färber
        CPUState *pcpu = ENV_GET_CPU(penv);
968 4fdeee7c Andreas Färber
        pcpu->stop = true;
969 c08d7424 Andreas Färber
        qemu_cpu_kick(pcpu);
970 5207a5e0 Jan Kiszka
        penv = penv->next_cpu;
971 296af7c9 Blue Swirl
    }
972 296af7c9 Blue Swirl
973 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
974 d798e974 Jan Kiszka
        cpu_stop_current();
975 d798e974 Jan Kiszka
        if (!kvm_enabled()) {
976 d798e974 Jan Kiszka
            while (penv) {
977 4fdeee7c Andreas Färber
                CPUState *pcpu = ENV_GET_CPU(penv);
978 4fdeee7c Andreas Färber
                pcpu->stop = 0;
979 f324e766 Andreas Färber
                pcpu->stopped = true;
980 d798e974 Jan Kiszka
                penv = penv->next_cpu;
981 d798e974 Jan Kiszka
            }
982 d798e974 Jan Kiszka
            return;
983 d798e974 Jan Kiszka
        }
984 d798e974 Jan Kiszka
    }
985 d798e974 Jan Kiszka
986 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
987 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
988 296af7c9 Blue Swirl
        penv = first_cpu;
989 296af7c9 Blue Swirl
        while (penv) {
990 c08d7424 Andreas Färber
            qemu_cpu_kick(ENV_GET_CPU(penv));
991 5207a5e0 Jan Kiszka
            penv = penv->next_cpu;
992 296af7c9 Blue Swirl
        }
993 296af7c9 Blue Swirl
    }
994 296af7c9 Blue Swirl
}
995 296af7c9 Blue Swirl
996 296af7c9 Blue Swirl
void resume_all_vcpus(void)
997 296af7c9 Blue Swirl
{
998 9349b4f9 Andreas Färber
    CPUArchState *penv = first_cpu;
999 296af7c9 Blue Swirl
1000 47113ab6 Wen Congyang
    qemu_clock_enable(vm_clock, true);
1001 296af7c9 Blue Swirl
    while (penv) {
1002 4fdeee7c Andreas Färber
        CPUState *pcpu = ENV_GET_CPU(penv);
1003 4fdeee7c Andreas Färber
        pcpu->stop = false;
1004 f324e766 Andreas Färber
        pcpu->stopped = false;
1005 c08d7424 Andreas Färber
        qemu_cpu_kick(pcpu);
1006 5207a5e0 Jan Kiszka
        penv = penv->next_cpu;
1007 296af7c9 Blue Swirl
    }
1008 296af7c9 Blue Swirl
}
1009 296af7c9 Blue Swirl
1010 e5ab30a2 Andreas Färber
static void qemu_tcg_init_vcpu(CPUState *cpu)
1011 296af7c9 Blue Swirl
{
1012 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
1013 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
1014 814e612e Andreas Färber
        cpu->thread = g_malloc0(sizeof(QemuThread));
1015 f5c121b8 Andreas Färber
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1016 f5c121b8 Andreas Färber
        qemu_cond_init(cpu->halt_cond);
1017 f5c121b8 Andreas Färber
        tcg_halt_cond = cpu->halt_cond;
1018 c3586ba7 Andreas Färber
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1019 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
1020 1ecf47bf Paolo Bonzini
#ifdef _WIN32
1021 814e612e Andreas Färber
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
1022 1ecf47bf Paolo Bonzini
#endif
1023 61a46217 Andreas Färber
        while (!cpu->created) {
1024 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1025 0ab07c62 Jan Kiszka
        }
1026 814e612e Andreas Färber
        tcg_cpu_thread = cpu->thread;
1027 296af7c9 Blue Swirl
    } else {
1028 814e612e Andreas Färber
        cpu->thread = tcg_cpu_thread;
1029 f5c121b8 Andreas Färber
        cpu->halt_cond = tcg_halt_cond;
1030 296af7c9 Blue Swirl
    }
1031 296af7c9 Blue Swirl
}
1032 296af7c9 Blue Swirl
1033 9349b4f9 Andreas Färber
static void qemu_kvm_start_vcpu(CPUArchState *env)
1034 296af7c9 Blue Swirl
{
1035 814e612e Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
1036 814e612e Andreas Färber
1037 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1038 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1039 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1040 814e612e Andreas Färber
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
1041 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
1042 61a46217 Andreas Färber
    while (!cpu->created) {
1043 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1044 0ab07c62 Jan Kiszka
    }
1045 296af7c9 Blue Swirl
}
1046 296af7c9 Blue Swirl
1047 c7f0f3b1 Anthony Liguori
static void qemu_dummy_start_vcpu(CPUArchState *env)
1048 c7f0f3b1 Anthony Liguori
{
1049 814e612e Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
1050 814e612e Andreas Färber
1051 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1052 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1053 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1054 814e612e Andreas Färber
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
1055 c7f0f3b1 Anthony Liguori
                       QEMU_THREAD_JOINABLE);
1056 61a46217 Andreas Färber
    while (!cpu->created) {
1057 c7f0f3b1 Anthony Liguori
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1058 c7f0f3b1 Anthony Liguori
    }
1059 c7f0f3b1 Anthony Liguori
}
1060 c7f0f3b1 Anthony Liguori
1061 296af7c9 Blue Swirl
void qemu_init_vcpu(void *_env)
1062 296af7c9 Blue Swirl
{
1063 9349b4f9 Andreas Färber
    CPUArchState *env = _env;
1064 f324e766 Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
1065 296af7c9 Blue Swirl
1066 ce3960eb Andreas Färber
    cpu->nr_cores = smp_cores;
1067 ce3960eb Andreas Färber
    cpu->nr_threads = smp_threads;
1068 f324e766 Andreas Färber
    cpu->stopped = true;
1069 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
1070 7e97cd88 Jan Kiszka
        qemu_kvm_start_vcpu(env);
1071 c7f0f3b1 Anthony Liguori
    } else if (tcg_enabled()) {
1072 e5ab30a2 Andreas Färber
        qemu_tcg_init_vcpu(cpu);
1073 c7f0f3b1 Anthony Liguori
    } else {
1074 c7f0f3b1 Anthony Liguori
        qemu_dummy_start_vcpu(env);
1075 0ab07c62 Jan Kiszka
    }
1076 296af7c9 Blue Swirl
}
1077 296af7c9 Blue Swirl
1078 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
1079 296af7c9 Blue Swirl
{
1080 b4a3d965 Jan Kiszka
    if (cpu_single_env) {
1081 4fdeee7c Andreas Färber
        CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
1082 4fdeee7c Andreas Färber
        cpu_single_cpu->stop = false;
1083 f324e766 Andreas Färber
        cpu_single_cpu->stopped = true;
1084 b4a3d965 Jan Kiszka
        cpu_exit(cpu_single_env);
1085 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
1086 b4a3d965 Jan Kiszka
    }
1087 296af7c9 Blue Swirl
}
1088 296af7c9 Blue Swirl
1089 1dfb4dd9 Luiz Capitulino
void vm_stop(RunState state)
1090 296af7c9 Blue Swirl
{
1091 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1092 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
1093 296af7c9 Blue Swirl
        /*
1094 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
1095 296af7c9 Blue Swirl
         * vm_stop() has been requested.
1096 296af7c9 Blue Swirl
         */
1097 b4a3d965 Jan Kiszka
        cpu_stop_current();
1098 296af7c9 Blue Swirl
        return;
1099 296af7c9 Blue Swirl
    }
1100 1dfb4dd9 Luiz Capitulino
    do_vm_stop(state);
1101 296af7c9 Blue Swirl
}
1102 296af7c9 Blue Swirl
1103 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
1104 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
1105 8a9236f1 Luiz Capitulino
void vm_stop_force_state(RunState state)
1106 8a9236f1 Luiz Capitulino
{
1107 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
1108 8a9236f1 Luiz Capitulino
        vm_stop(state);
1109 8a9236f1 Luiz Capitulino
    } else {
1110 8a9236f1 Luiz Capitulino
        runstate_set(state);
1111 8a9236f1 Luiz Capitulino
    }
1112 8a9236f1 Luiz Capitulino
}
1113 8a9236f1 Luiz Capitulino
1114 9349b4f9 Andreas Färber
static int tcg_cpu_exec(CPUArchState *env)
1115 296af7c9 Blue Swirl
{
1116 296af7c9 Blue Swirl
    int ret;
1117 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1118 296af7c9 Blue Swirl
    int64_t ti;
1119 296af7c9 Blue Swirl
#endif
1120 296af7c9 Blue Swirl
1121 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1122 296af7c9 Blue Swirl
    ti = profile_getclock();
1123 296af7c9 Blue Swirl
#endif
1124 296af7c9 Blue Swirl
    if (use_icount) {
1125 296af7c9 Blue Swirl
        int64_t count;
1126 296af7c9 Blue Swirl
        int decr;
1127 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1128 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1129 296af7c9 Blue Swirl
        env->icount_extra = 0;
1130 946fb27c Paolo Bonzini
        count = qemu_icount_round(qemu_clock_deadline(vm_clock));
1131 296af7c9 Blue Swirl
        qemu_icount += count;
1132 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1133 296af7c9 Blue Swirl
        count -= decr;
1134 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1135 296af7c9 Blue Swirl
        env->icount_extra = count;
1136 296af7c9 Blue Swirl
    }
1137 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1138 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1139 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1140 296af7c9 Blue Swirl
#endif
1141 296af7c9 Blue Swirl
    if (use_icount) {
1142 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1143 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1144 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1145 296af7c9 Blue Swirl
                        + env->icount_extra);
1146 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1147 296af7c9 Blue Swirl
        env->icount_extra = 0;
1148 296af7c9 Blue Swirl
    }
1149 296af7c9 Blue Swirl
    return ret;
1150 296af7c9 Blue Swirl
}
1151 296af7c9 Blue Swirl
1152 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1153 296af7c9 Blue Swirl
{
1154 9a36085b Jan Kiszka
    int r;
1155 9a36085b Jan Kiszka
1156 ab33fcda Paolo Bonzini
    /* Account partial waits to the vm_clock.  */
1157 ab33fcda Paolo Bonzini
    qemu_clock_warp(vm_clock);
1158 ab33fcda Paolo Bonzini
1159 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1160 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1161 0ab07c62 Jan Kiszka
    }
1162 c629a4bc Jan Kiszka
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1163 9349b4f9 Andreas Färber
        CPUArchState *env = next_cpu;
1164 4fdeee7c Andreas Färber
        CPUState *cpu = ENV_GET_CPU(env);
1165 296af7c9 Blue Swirl
1166 296af7c9 Blue Swirl
        qemu_clock_enable(vm_clock,
1167 345f4426 Jan Kiszka
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1168 296af7c9 Blue Swirl
1169 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
1170 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1171 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1172 1009d2ed Jan Kiszka
                cpu_handle_guest_debug(env);
1173 3c638d06 Jan Kiszka
                break;
1174 3c638d06 Jan Kiszka
            }
1175 f324e766 Andreas Färber
        } else if (cpu->stop || cpu->stopped) {
1176 296af7c9 Blue Swirl
            break;
1177 296af7c9 Blue Swirl
        }
1178 296af7c9 Blue Swirl
    }
1179 c629a4bc Jan Kiszka
    exit_request = 0;
1180 296af7c9 Blue Swirl
}
1181 296af7c9 Blue Swirl
1182 296af7c9 Blue Swirl
void set_numa_modes(void)
1183 296af7c9 Blue Swirl
{
1184 9349b4f9 Andreas Färber
    CPUArchState *env;
1185 1b1ed8dc Andreas Färber
    CPUState *cpu;
1186 296af7c9 Blue Swirl
    int i;
1187 296af7c9 Blue Swirl
1188 296af7c9 Blue Swirl
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1189 1b1ed8dc Andreas Färber
        cpu = ENV_GET_CPU(env);
1190 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1191 55e5c285 Andreas Färber
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1192 1b1ed8dc Andreas Färber
                cpu->numa_node = i;
1193 296af7c9 Blue Swirl
            }
1194 296af7c9 Blue Swirl
        }
1195 296af7c9 Blue Swirl
    }
1196 296af7c9 Blue Swirl
}
1197 296af7c9 Blue Swirl
1198 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1199 262353cb Blue Swirl
{
1200 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1201 e916cbf8 Peter Maydell
#if defined(cpu_list)
1202 e916cbf8 Peter Maydell
    cpu_list(f, cpu_fprintf);
1203 262353cb Blue Swirl
#endif
1204 262353cb Blue Swirl
}
1205 de0b36b6 Luiz Capitulino
1206 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1207 de0b36b6 Luiz Capitulino
{
1208 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1209 9349b4f9 Andreas Färber
    CPUArchState *env;
1210 de0b36b6 Luiz Capitulino
1211 9f09e18a Andreas Färber
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1212 9f09e18a Andreas Färber
        CPUState *cpu = ENV_GET_CPU(env);
1213 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1214 de0b36b6 Luiz Capitulino
1215 de0b36b6 Luiz Capitulino
        cpu_synchronize_state(env);
1216 de0b36b6 Luiz Capitulino
1217 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1218 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1219 55e5c285 Andreas Färber
        info->value->CPU = cpu->cpu_index;
1220 de0b36b6 Luiz Capitulino
        info->value->current = (env == first_cpu);
1221 259186a7 Andreas Färber
        info->value->halted = cpu->halted;
1222 9f09e18a Andreas Färber
        info->value->thread_id = cpu->thread_id;
1223 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1224 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1225 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1226 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1227 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1228 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1229 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1230 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1231 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1232 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1233 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1234 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1235 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1236 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1237 de0b36b6 Luiz Capitulino
#endif
1238 de0b36b6 Luiz Capitulino
1239 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1240 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1241 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1242 de0b36b6 Luiz Capitulino
        } else {
1243 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1244 de0b36b6 Luiz Capitulino
            cur_item = info;
1245 de0b36b6 Luiz Capitulino
        }
1246 de0b36b6 Luiz Capitulino
    }
1247 de0b36b6 Luiz Capitulino
1248 de0b36b6 Luiz Capitulino
    return head;
1249 de0b36b6 Luiz Capitulino
}
1250 0cfd6a9a Luiz Capitulino
1251 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1252 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1253 0cfd6a9a Luiz Capitulino
{
1254 0cfd6a9a Luiz Capitulino
    FILE *f;
1255 0cfd6a9a Luiz Capitulino
    uint32_t l;
1256 9349b4f9 Andreas Färber
    CPUArchState *env;
1257 55e5c285 Andreas Färber
    CPUState *cpu;
1258 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1259 0cfd6a9a Luiz Capitulino
1260 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1261 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1262 0cfd6a9a Luiz Capitulino
    }
1263 0cfd6a9a Luiz Capitulino
1264 151d1322 Andreas Färber
    cpu = qemu_get_cpu(cpu_index);
1265 151d1322 Andreas Färber
    if (cpu == NULL) {
1266 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1267 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1268 0cfd6a9a Luiz Capitulino
        return;
1269 0cfd6a9a Luiz Capitulino
    }
1270 151d1322 Andreas Färber
    env = cpu->env_ptr;
1271 0cfd6a9a Luiz Capitulino
1272 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1273 0cfd6a9a Luiz Capitulino
    if (!f) {
1274 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1275 0cfd6a9a Luiz Capitulino
        return;
1276 0cfd6a9a Luiz Capitulino
    }
1277 0cfd6a9a Luiz Capitulino
1278 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1279 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1280 0cfd6a9a Luiz Capitulino
        if (l > size)
1281 0cfd6a9a Luiz Capitulino
            l = size;
1282 0cfd6a9a Luiz Capitulino
        cpu_memory_rw_debug(env, addr, buf, l, 0);
1283 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1284 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1285 0cfd6a9a Luiz Capitulino
            goto exit;
1286 0cfd6a9a Luiz Capitulino
        }
1287 0cfd6a9a Luiz Capitulino
        addr += l;
1288 0cfd6a9a Luiz Capitulino
        size -= l;
1289 0cfd6a9a Luiz Capitulino
    }
1290 0cfd6a9a Luiz Capitulino
1291 0cfd6a9a Luiz Capitulino
exit:
1292 0cfd6a9a Luiz Capitulino
    fclose(f);
1293 0cfd6a9a Luiz Capitulino
}
1294 6d3962bf Luiz Capitulino
1295 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1296 6d3962bf Luiz Capitulino
                  Error **errp)
1297 6d3962bf Luiz Capitulino
{
1298 6d3962bf Luiz Capitulino
    FILE *f;
1299 6d3962bf Luiz Capitulino
    uint32_t l;
1300 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1301 6d3962bf Luiz Capitulino
1302 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1303 6d3962bf Luiz Capitulino
    if (!f) {
1304 6d3962bf Luiz Capitulino
        error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1305 6d3962bf Luiz Capitulino
        return;
1306 6d3962bf Luiz Capitulino
    }
1307 6d3962bf Luiz Capitulino
1308 6d3962bf Luiz Capitulino
    while (size != 0) {
1309 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1310 6d3962bf Luiz Capitulino
        if (l > size)
1311 6d3962bf Luiz Capitulino
            l = size;
1312 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1313 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1314 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1315 6d3962bf Luiz Capitulino
            goto exit;
1316 6d3962bf Luiz Capitulino
        }
1317 6d3962bf Luiz Capitulino
        addr += l;
1318 6d3962bf Luiz Capitulino
        size -= l;
1319 6d3962bf Luiz Capitulino
    }
1320 6d3962bf Luiz Capitulino
1321 6d3962bf Luiz Capitulino
exit:
1322 6d3962bf Luiz Capitulino
    fclose(f);
1323 6d3962bf Luiz Capitulino
}
1324 ab49ab5c Luiz Capitulino
1325 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1326 ab49ab5c Luiz Capitulino
{
1327 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1328 9349b4f9 Andreas Färber
    CPUArchState *env;
1329 ab49ab5c Luiz Capitulino
1330 ab49ab5c Luiz Capitulino
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1331 02c09195 Jan Kiszka
        if (!env->apic_state) {
1332 c3affe56 Andreas Färber
            cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_NMI);
1333 02c09195 Jan Kiszka
        } else {
1334 02c09195 Jan Kiszka
            apic_deliver_nmi(env->apic_state);
1335 02c09195 Jan Kiszka
        }
1336 ab49ab5c Luiz Capitulino
    }
1337 ab49ab5c Luiz Capitulino
#else
1338 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1339 ab49ab5c Luiz Capitulino
#endif
1340 ab49ab5c Luiz Capitulino
}