Statistics
| Branch: | Revision:

root / cpus.c @ bdf7ae5b

History | View | Annotate | Download (34 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
29 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
30 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/qtest.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
39 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
40 0ff0fc19 Jan Kiszka
41 0ff0fc19 Jan Kiszka
#ifndef _WIN32
42 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
43 0ff0fc19 Jan Kiszka
#endif
44 296af7c9 Blue Swirl
45 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
46 6d9cb73c Jan Kiszka
47 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
48 6d9cb73c Jan Kiszka
49 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
50 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
51 c0532a76 Marcelo Tosatti
#endif
52 c0532a76 Marcelo Tosatti
53 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
54 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
55 6d9cb73c Jan Kiszka
#endif
56 6d9cb73c Jan Kiszka
57 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
58 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
59 6d9cb73c Jan Kiszka
#endif
60 6d9cb73c Jan Kiszka
61 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
62 6d9cb73c Jan Kiszka
63 182735ef Andreas Färber
static CPUState *next_cpu;
64 296af7c9 Blue Swirl
65 a98ae1d8 Andreas Färber
static bool cpu_thread_is_idle(CPUState *cpu)
66 ac873f1e Peter Maydell
{
67 c64ca814 Andreas Färber
    if (cpu->stop || cpu->queued_work_first) {
68 ac873f1e Peter Maydell
        return false;
69 ac873f1e Peter Maydell
    }
70 f324e766 Andreas Färber
    if (cpu->stopped || !runstate_is_running()) {
71 ac873f1e Peter Maydell
        return true;
72 ac873f1e Peter Maydell
    }
73 259186a7 Andreas Färber
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
74 215e79c0 Alexander Graf
        kvm_halt_in_kernel()) {
75 ac873f1e Peter Maydell
        return false;
76 ac873f1e Peter Maydell
    }
77 ac873f1e Peter Maydell
    return true;
78 ac873f1e Peter Maydell
}
79 ac873f1e Peter Maydell
80 ac873f1e Peter Maydell
static bool all_cpu_threads_idle(void)
81 ac873f1e Peter Maydell
{
82 182735ef Andreas Färber
    CPUState *cpu;
83 ac873f1e Peter Maydell
84 182735ef Andreas Färber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
85 182735ef Andreas Färber
        if (!cpu_thread_is_idle(cpu)) {
86 ac873f1e Peter Maydell
            return false;
87 ac873f1e Peter Maydell
        }
88 ac873f1e Peter Maydell
    }
89 ac873f1e Peter Maydell
    return true;
90 ac873f1e Peter Maydell
}
91 ac873f1e Peter Maydell
92 296af7c9 Blue Swirl
/***********************************************************/
93 946fb27c Paolo Bonzini
/* guest cycle counter */
94 946fb27c Paolo Bonzini
95 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
96 946fb27c Paolo Bonzini
static int icount_time_shift;
97 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
98 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
99 946fb27c Paolo Bonzini
/* Compensate for varying guest execution speed.  */
100 946fb27c Paolo Bonzini
static int64_t qemu_icount_bias;
101 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
102 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
103 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
104 946fb27c Paolo Bonzini
static int64_t vm_clock_warp_start;
105 946fb27c Paolo Bonzini
static int64_t qemu_icount;
106 946fb27c Paolo Bonzini
107 946fb27c Paolo Bonzini
typedef struct TimersState {
108 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
109 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
110 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
111 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
112 946fb27c Paolo Bonzini
    int64_t dummy;
113 946fb27c Paolo Bonzini
} TimersState;
114 946fb27c Paolo Bonzini
115 946fb27c Paolo Bonzini
TimersState timers_state;
116 946fb27c Paolo Bonzini
117 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
118 946fb27c Paolo Bonzini
int64_t cpu_get_icount(void)
119 946fb27c Paolo Bonzini
{
120 946fb27c Paolo Bonzini
    int64_t icount;
121 4917cf44 Andreas Färber
    CPUState *cpu = current_cpu;
122 946fb27c Paolo Bonzini
123 946fb27c Paolo Bonzini
    icount = qemu_icount;
124 4917cf44 Andreas Färber
    if (cpu) {
125 4917cf44 Andreas Färber
        CPUArchState *env = cpu->env_ptr;
126 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
127 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
128 946fb27c Paolo Bonzini
        }
129 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
130 946fb27c Paolo Bonzini
    }
131 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
132 946fb27c Paolo Bonzini
}
133 946fb27c Paolo Bonzini
134 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
135 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
136 946fb27c Paolo Bonzini
{
137 946fb27c Paolo Bonzini
    if (use_icount) {
138 946fb27c Paolo Bonzini
        return cpu_get_icount();
139 946fb27c Paolo Bonzini
    }
140 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
141 946fb27c Paolo Bonzini
        return timers_state.cpu_ticks_offset;
142 946fb27c Paolo Bonzini
    } else {
143 946fb27c Paolo Bonzini
        int64_t ticks;
144 946fb27c Paolo Bonzini
        ticks = cpu_get_real_ticks();
145 946fb27c Paolo Bonzini
        if (timers_state.cpu_ticks_prev > ticks) {
146 946fb27c Paolo Bonzini
            /* Note: non increasing ticks may happen if the host uses
147 946fb27c Paolo Bonzini
               software suspend */
148 946fb27c Paolo Bonzini
            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
149 946fb27c Paolo Bonzini
        }
150 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_prev = ticks;
151 946fb27c Paolo Bonzini
        return ticks + timers_state.cpu_ticks_offset;
152 946fb27c Paolo Bonzini
    }
153 946fb27c Paolo Bonzini
}
154 946fb27c Paolo Bonzini
155 946fb27c Paolo Bonzini
/* return the host CPU monotonic timer and handle stop/restart */
156 946fb27c Paolo Bonzini
int64_t cpu_get_clock(void)
157 946fb27c Paolo Bonzini
{
158 946fb27c Paolo Bonzini
    int64_t ti;
159 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
160 946fb27c Paolo Bonzini
        return timers_state.cpu_clock_offset;
161 946fb27c Paolo Bonzini
    } else {
162 946fb27c Paolo Bonzini
        ti = get_clock();
163 946fb27c Paolo Bonzini
        return ti + timers_state.cpu_clock_offset;
164 946fb27c Paolo Bonzini
    }
165 946fb27c Paolo Bonzini
}
166 946fb27c Paolo Bonzini
167 946fb27c Paolo Bonzini
/* enable cpu_get_ticks() */
168 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
169 946fb27c Paolo Bonzini
{
170 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
171 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
172 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
173 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
174 946fb27c Paolo Bonzini
    }
175 946fb27c Paolo Bonzini
}
176 946fb27c Paolo Bonzini
177 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
178 946fb27c Paolo Bonzini
   cpu_get_ticks() after that.  */
179 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
180 946fb27c Paolo Bonzini
{
181 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
182 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset = cpu_get_ticks();
183 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset = cpu_get_clock();
184 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
185 946fb27c Paolo Bonzini
    }
186 946fb27c Paolo Bonzini
}
187 946fb27c Paolo Bonzini
188 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
189 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
190 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
191 946fb27c Paolo Bonzini
   the IO wait loop.  */
192 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
193 946fb27c Paolo Bonzini
194 946fb27c Paolo Bonzini
static void icount_adjust(void)
195 946fb27c Paolo Bonzini
{
196 946fb27c Paolo Bonzini
    int64_t cur_time;
197 946fb27c Paolo Bonzini
    int64_t cur_icount;
198 946fb27c Paolo Bonzini
    int64_t delta;
199 946fb27c Paolo Bonzini
    static int64_t last_delta;
200 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
201 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
202 946fb27c Paolo Bonzini
        return;
203 946fb27c Paolo Bonzini
    }
204 946fb27c Paolo Bonzini
    cur_time = cpu_get_clock();
205 946fb27c Paolo Bonzini
    cur_icount = qemu_get_clock_ns(vm_clock);
206 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
207 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
208 946fb27c Paolo Bonzini
    if (delta > 0
209 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
210 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
211 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
212 946fb27c Paolo Bonzini
        icount_time_shift--;
213 946fb27c Paolo Bonzini
    }
214 946fb27c Paolo Bonzini
    if (delta < 0
215 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
216 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
217 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
218 946fb27c Paolo Bonzini
        icount_time_shift++;
219 946fb27c Paolo Bonzini
    }
220 946fb27c Paolo Bonzini
    last_delta = delta;
221 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
222 946fb27c Paolo Bonzini
}
223 946fb27c Paolo Bonzini
224 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
225 946fb27c Paolo Bonzini
{
226 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
227 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
228 946fb27c Paolo Bonzini
    icount_adjust();
229 946fb27c Paolo Bonzini
}
230 946fb27c Paolo Bonzini
231 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
232 946fb27c Paolo Bonzini
{
233 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
234 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
235 946fb27c Paolo Bonzini
    icount_adjust();
236 946fb27c Paolo Bonzini
}
237 946fb27c Paolo Bonzini
238 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
239 946fb27c Paolo Bonzini
{
240 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
241 946fb27c Paolo Bonzini
}
242 946fb27c Paolo Bonzini
243 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
244 946fb27c Paolo Bonzini
{
245 946fb27c Paolo Bonzini
    if (vm_clock_warp_start == -1) {
246 946fb27c Paolo Bonzini
        return;
247 946fb27c Paolo Bonzini
    }
248 946fb27c Paolo Bonzini
249 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
250 946fb27c Paolo Bonzini
        int64_t clock = qemu_get_clock_ns(rt_clock);
251 946fb27c Paolo Bonzini
        int64_t warp_delta = clock - vm_clock_warp_start;
252 946fb27c Paolo Bonzini
        if (use_icount == 1) {
253 946fb27c Paolo Bonzini
            qemu_icount_bias += warp_delta;
254 946fb27c Paolo Bonzini
        } else {
255 946fb27c Paolo Bonzini
            /*
256 946fb27c Paolo Bonzini
             * In adaptive mode, do not let the vm_clock run too
257 946fb27c Paolo Bonzini
             * far ahead of real time.
258 946fb27c Paolo Bonzini
             */
259 946fb27c Paolo Bonzini
            int64_t cur_time = cpu_get_clock();
260 946fb27c Paolo Bonzini
            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
261 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
262 946fb27c Paolo Bonzini
            qemu_icount_bias += MIN(warp_delta, delta);
263 946fb27c Paolo Bonzini
        }
264 946fb27c Paolo Bonzini
        if (qemu_clock_expired(vm_clock)) {
265 946fb27c Paolo Bonzini
            qemu_notify_event();
266 946fb27c Paolo Bonzini
        }
267 946fb27c Paolo Bonzini
    }
268 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
269 946fb27c Paolo Bonzini
}
270 946fb27c Paolo Bonzini
271 8156be56 Paolo Bonzini
void qtest_clock_warp(int64_t dest)
272 8156be56 Paolo Bonzini
{
273 8156be56 Paolo Bonzini
    int64_t clock = qemu_get_clock_ns(vm_clock);
274 8156be56 Paolo Bonzini
    assert(qtest_enabled());
275 8156be56 Paolo Bonzini
    while (clock < dest) {
276 8156be56 Paolo Bonzini
        int64_t deadline = qemu_clock_deadline(vm_clock);
277 8156be56 Paolo Bonzini
        int64_t warp = MIN(dest - clock, deadline);
278 8156be56 Paolo Bonzini
        qemu_icount_bias += warp;
279 8156be56 Paolo Bonzini
        qemu_run_timers(vm_clock);
280 8156be56 Paolo Bonzini
        clock = qemu_get_clock_ns(vm_clock);
281 8156be56 Paolo Bonzini
    }
282 8156be56 Paolo Bonzini
    qemu_notify_event();
283 8156be56 Paolo Bonzini
}
284 8156be56 Paolo Bonzini
285 946fb27c Paolo Bonzini
void qemu_clock_warp(QEMUClock *clock)
286 946fb27c Paolo Bonzini
{
287 946fb27c Paolo Bonzini
    int64_t deadline;
288 946fb27c Paolo Bonzini
289 946fb27c Paolo Bonzini
    /*
290 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
291 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
292 946fb27c Paolo Bonzini
     * need for if statements all over the place.
293 946fb27c Paolo Bonzini
     */
294 946fb27c Paolo Bonzini
    if (clock != vm_clock || !use_icount) {
295 946fb27c Paolo Bonzini
        return;
296 946fb27c Paolo Bonzini
    }
297 946fb27c Paolo Bonzini
298 946fb27c Paolo Bonzini
    /*
299 946fb27c Paolo Bonzini
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
300 946fb27c Paolo Bonzini
     * ensures that the deadline for the timer is computed correctly below.
301 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
302 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
303 946fb27c Paolo Bonzini
     * the earliest vm_clock timer.
304 946fb27c Paolo Bonzini
     */
305 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
306 946fb27c Paolo Bonzini
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
307 946fb27c Paolo Bonzini
        qemu_del_timer(icount_warp_timer);
308 946fb27c Paolo Bonzini
        return;
309 946fb27c Paolo Bonzini
    }
310 946fb27c Paolo Bonzini
311 8156be56 Paolo Bonzini
    if (qtest_enabled()) {
312 8156be56 Paolo Bonzini
        /* When testing, qtest commands advance icount.  */
313 8156be56 Paolo Bonzini
        return;
314 8156be56 Paolo Bonzini
    }
315 8156be56 Paolo Bonzini
316 946fb27c Paolo Bonzini
    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
317 946fb27c Paolo Bonzini
    deadline = qemu_clock_deadline(vm_clock);
318 946fb27c Paolo Bonzini
    if (deadline > 0) {
319 946fb27c Paolo Bonzini
        /*
320 946fb27c Paolo Bonzini
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
321 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
322 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
323 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
324 946fb27c Paolo Bonzini
         * vm_clock.
325 946fb27c Paolo Bonzini
         *
326 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
327 946fb27c Paolo Bonzini
         * sleep in icount mode if there is a pending vm_clock timer; rather
328 946fb27c Paolo Bonzini
         * time could just advance to the next vm_clock event.  Instead, we
329 946fb27c Paolo Bonzini
         * do stop VCPUs and only advance vm_clock after some "real" time,
330 946fb27c Paolo Bonzini
         * (related to the time left until the next event) has passed.  This
331 946fb27c Paolo Bonzini
         * rt_clock timer will do this.  This avoids that the warps are too
332 946fb27c Paolo Bonzini
         * visible externally---for example, you will not be sending network
333 07f35073 Dong Xu Wang
         * packets continuously instead of every 100ms.
334 946fb27c Paolo Bonzini
         */
335 946fb27c Paolo Bonzini
        qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
336 946fb27c Paolo Bonzini
    } else {
337 946fb27c Paolo Bonzini
        qemu_notify_event();
338 946fb27c Paolo Bonzini
    }
339 946fb27c Paolo Bonzini
}
340 946fb27c Paolo Bonzini
341 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
342 946fb27c Paolo Bonzini
    .name = "timer",
343 946fb27c Paolo Bonzini
    .version_id = 2,
344 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
345 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
346 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
347 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
348 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
349 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
350 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
351 946fb27c Paolo Bonzini
    }
352 946fb27c Paolo Bonzini
};
353 946fb27c Paolo Bonzini
354 946fb27c Paolo Bonzini
void configure_icount(const char *option)
355 946fb27c Paolo Bonzini
{
356 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
357 946fb27c Paolo Bonzini
    if (!option) {
358 946fb27c Paolo Bonzini
        return;
359 946fb27c Paolo Bonzini
    }
360 946fb27c Paolo Bonzini
361 946fb27c Paolo Bonzini
    icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
362 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
363 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
364 946fb27c Paolo Bonzini
        use_icount = 1;
365 946fb27c Paolo Bonzini
        return;
366 946fb27c Paolo Bonzini
    }
367 946fb27c Paolo Bonzini
368 946fb27c Paolo Bonzini
    use_icount = 2;
369 946fb27c Paolo Bonzini
370 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
371 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
372 946fb27c Paolo Bonzini
    icount_time_shift = 3;
373 946fb27c Paolo Bonzini
374 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
375 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
376 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
377 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
378 946fb27c Paolo Bonzini
       than VM triggers.  */
379 946fb27c Paolo Bonzini
    icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
380 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
381 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
382 946fb27c Paolo Bonzini
    icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
383 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
384 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
385 946fb27c Paolo Bonzini
}
386 946fb27c Paolo Bonzini
387 946fb27c Paolo Bonzini
/***********************************************************/
388 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
389 296af7c9 Blue Swirl
{
390 296af7c9 Blue Swirl
    va_list ap;
391 55e5c285 Andreas Färber
    CPUState *cpu;
392 296af7c9 Blue Swirl
393 296af7c9 Blue Swirl
    va_start(ap, fmt);
394 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
395 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
396 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
397 182735ef Andreas Färber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
398 55e5c285 Andreas Färber
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
399 878096ee Andreas Färber
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
400 296af7c9 Blue Swirl
    }
401 296af7c9 Blue Swirl
    va_end(ap);
402 296af7c9 Blue Swirl
    abort();
403 296af7c9 Blue Swirl
}
404 296af7c9 Blue Swirl
405 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
406 296af7c9 Blue Swirl
{
407 182735ef Andreas Färber
    CPUState *cpu;
408 296af7c9 Blue Swirl
409 182735ef Andreas Färber
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
410 182735ef Andreas Färber
        cpu_synchronize_state(cpu);
411 296af7c9 Blue Swirl
    }
412 296af7c9 Blue Swirl
}
413 296af7c9 Blue Swirl
414 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
415 296af7c9 Blue Swirl
{
416 182735ef Andreas Färber
    CPUState *cpu;
417 296af7c9 Blue Swirl
418 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
419 182735ef Andreas Färber
        cpu_synchronize_post_reset(cpu);
420 296af7c9 Blue Swirl
    }
421 296af7c9 Blue Swirl
}
422 296af7c9 Blue Swirl
423 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
424 296af7c9 Blue Swirl
{
425 182735ef Andreas Färber
    CPUState *cpu;
426 296af7c9 Blue Swirl
427 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
428 182735ef Andreas Färber
        cpu_synchronize_post_init(cpu);
429 296af7c9 Blue Swirl
    }
430 296af7c9 Blue Swirl
}
431 296af7c9 Blue Swirl
432 2fa45344 Andreas Färber
bool cpu_is_stopped(CPUState *cpu)
433 3ae9501c Marcelo Tosatti
{
434 f324e766 Andreas Färber
    return !runstate_is_running() || cpu->stopped;
435 3ae9501c Marcelo Tosatti
}
436 3ae9501c Marcelo Tosatti
437 56983463 Kevin Wolf
static int do_vm_stop(RunState state)
438 296af7c9 Blue Swirl
{
439 56983463 Kevin Wolf
    int ret = 0;
440 56983463 Kevin Wolf
441 1354869c Luiz Capitulino
    if (runstate_is_running()) {
442 296af7c9 Blue Swirl
        cpu_disable_ticks();
443 296af7c9 Blue Swirl
        pause_all_vcpus();
444 f5bbfba1 Luiz Capitulino
        runstate_set(state);
445 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
446 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
447 296af7c9 Blue Swirl
    }
448 56983463 Kevin Wolf
449 594a45ce Kevin Wolf
    bdrv_drain_all();
450 594a45ce Kevin Wolf
    ret = bdrv_flush_all();
451 594a45ce Kevin Wolf
452 56983463 Kevin Wolf
    return ret;
453 296af7c9 Blue Swirl
}
454 296af7c9 Blue Swirl
455 a1fcaa73 Andreas Färber
static bool cpu_can_run(CPUState *cpu)
456 296af7c9 Blue Swirl
{
457 4fdeee7c Andreas Färber
    if (cpu->stop) {
458 a1fcaa73 Andreas Färber
        return false;
459 0ab07c62 Jan Kiszka
    }
460 f324e766 Andreas Färber
    if (cpu->stopped || !runstate_is_running()) {
461 a1fcaa73 Andreas Färber
        return false;
462 0ab07c62 Jan Kiszka
    }
463 a1fcaa73 Andreas Färber
    return true;
464 296af7c9 Blue Swirl
}
465 296af7c9 Blue Swirl
466 91325046 Andreas Färber
static void cpu_handle_guest_debug(CPUState *cpu)
467 83f338f7 Jan Kiszka
{
468 64f6b346 Andreas Färber
    gdb_set_stop_cpu(cpu);
469 8cf71710 Jan Kiszka
    qemu_system_debug_request();
470 f324e766 Andreas Färber
    cpu->stopped = true;
471 3c638d06 Jan Kiszka
}
472 3c638d06 Jan Kiszka
473 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
474 714bd040 Paolo Bonzini
{
475 4917cf44 Andreas Färber
    if (current_cpu) {
476 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
477 714bd040 Paolo Bonzini
    }
478 714bd040 Paolo Bonzini
    exit_request = 1;
479 714bd040 Paolo Bonzini
}
480 714bd040 Paolo Bonzini
481 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
482 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
483 6d9cb73c Jan Kiszka
{
484 6d9cb73c Jan Kiszka
    sigset_t set;
485 6d9cb73c Jan Kiszka
    struct sigaction action;
486 6d9cb73c Jan Kiszka
487 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
488 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
489 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
490 6d9cb73c Jan Kiszka
        raise(SIGBUS);
491 6d9cb73c Jan Kiszka
        sigemptyset(&set);
492 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
493 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
494 6d9cb73c Jan Kiszka
    }
495 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
496 6d9cb73c Jan Kiszka
    abort();
497 6d9cb73c Jan Kiszka
}
498 6d9cb73c Jan Kiszka
499 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
500 6d9cb73c Jan Kiszka
                           void *ctx)
501 6d9cb73c Jan Kiszka
{
502 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
503 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
504 6d9cb73c Jan Kiszka
        sigbus_reraise();
505 6d9cb73c Jan Kiszka
    }
506 6d9cb73c Jan Kiszka
}
507 6d9cb73c Jan Kiszka
508 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
509 6d9cb73c Jan Kiszka
{
510 6d9cb73c Jan Kiszka
    struct sigaction action;
511 6d9cb73c Jan Kiszka
512 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
513 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
514 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
515 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
516 6d9cb73c Jan Kiszka
517 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
518 6d9cb73c Jan Kiszka
}
519 6d9cb73c Jan Kiszka
520 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
521 1ab3c6c0 Jan Kiszka
{
522 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
523 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
524 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
525 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
526 1ab3c6c0 Jan Kiszka
    int r;
527 1ab3c6c0 Jan Kiszka
528 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
529 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
530 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
531 1ab3c6c0 Jan Kiszka
532 1ab3c6c0 Jan Kiszka
    do {
533 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
534 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
535 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
536 1ab3c6c0 Jan Kiszka
            exit(1);
537 1ab3c6c0 Jan Kiszka
        }
538 1ab3c6c0 Jan Kiszka
539 1ab3c6c0 Jan Kiszka
        switch (r) {
540 1ab3c6c0 Jan Kiszka
        case SIGBUS:
541 290adf38 Andreas Färber
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
542 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
543 1ab3c6c0 Jan Kiszka
            }
544 1ab3c6c0 Jan Kiszka
            break;
545 1ab3c6c0 Jan Kiszka
        default:
546 1ab3c6c0 Jan Kiszka
            break;
547 1ab3c6c0 Jan Kiszka
        }
548 1ab3c6c0 Jan Kiszka
549 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
550 1ab3c6c0 Jan Kiszka
        if (r == -1) {
551 1ab3c6c0 Jan Kiszka
            perror("sigpending");
552 1ab3c6c0 Jan Kiszka
            exit(1);
553 1ab3c6c0 Jan Kiszka
        }
554 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
555 1ab3c6c0 Jan Kiszka
}
556 1ab3c6c0 Jan Kiszka
557 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
558 6d9cb73c Jan Kiszka
559 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
560 6d9cb73c Jan Kiszka
{
561 6d9cb73c Jan Kiszka
}
562 1ab3c6c0 Jan Kiszka
563 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
564 1ab3c6c0 Jan Kiszka
{
565 1ab3c6c0 Jan Kiszka
}
566 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
567 6d9cb73c Jan Kiszka
568 296af7c9 Blue Swirl
#ifndef _WIN32
569 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
570 55f8d6ac Jan Kiszka
{
571 55f8d6ac Jan Kiszka
}
572 55f8d6ac Jan Kiszka
573 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
574 714bd040 Paolo Bonzini
{
575 714bd040 Paolo Bonzini
    int r;
576 714bd040 Paolo Bonzini
    sigset_t set;
577 714bd040 Paolo Bonzini
    struct sigaction sigact;
578 714bd040 Paolo Bonzini
579 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
580 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
581 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
582 714bd040 Paolo Bonzini
583 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
584 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
585 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
586 491d6e80 Andreas Färber
    r = kvm_set_signal_mask(cpu, &set);
587 714bd040 Paolo Bonzini
    if (r) {
588 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
589 714bd040 Paolo Bonzini
        exit(1);
590 714bd040 Paolo Bonzini
    }
591 714bd040 Paolo Bonzini
}
592 714bd040 Paolo Bonzini
593 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
594 714bd040 Paolo Bonzini
{
595 714bd040 Paolo Bonzini
    sigset_t set;
596 714bd040 Paolo Bonzini
    struct sigaction sigact;
597 714bd040 Paolo Bonzini
598 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
599 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
600 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
601 714bd040 Paolo Bonzini
602 714bd040 Paolo Bonzini
    sigemptyset(&set);
603 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
604 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
605 714bd040 Paolo Bonzini
}
606 714bd040 Paolo Bonzini
607 55f8d6ac Jan Kiszka
#else /* _WIN32 */
608 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
609 ff48eb5f Jan Kiszka
{
610 714bd040 Paolo Bonzini
    abort();
611 714bd040 Paolo Bonzini
}
612 ff48eb5f Jan Kiszka
613 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
614 714bd040 Paolo Bonzini
{
615 ff48eb5f Jan Kiszka
}
616 714bd040 Paolo Bonzini
#endif /* _WIN32 */
617 ff48eb5f Jan Kiszka
618 b2532d88 Stefan Weil
static QemuMutex qemu_global_mutex;
619 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
620 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
621 296af7c9 Blue Swirl
622 296af7c9 Blue Swirl
static QemuThread io_thread;
623 296af7c9 Blue Swirl
624 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
625 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
626 296af7c9 Blue Swirl
627 296af7c9 Blue Swirl
/* cpu creation */
628 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
629 296af7c9 Blue Swirl
/* system init */
630 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
631 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
632 296af7c9 Blue Swirl
633 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
634 296af7c9 Blue Swirl
{
635 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
636 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
637 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
638 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
639 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
640 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
641 296af7c9 Blue Swirl
642 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
643 296af7c9 Blue Swirl
}
644 296af7c9 Blue Swirl
645 f100f0b3 Andreas Färber
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
646 e82bcec2 Marcelo Tosatti
{
647 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
648 e82bcec2 Marcelo Tosatti
649 60e82579 Andreas Färber
    if (qemu_cpu_is_self(cpu)) {
650 e82bcec2 Marcelo Tosatti
        func(data);
651 e82bcec2 Marcelo Tosatti
        return;
652 e82bcec2 Marcelo Tosatti
    }
653 e82bcec2 Marcelo Tosatti
654 e82bcec2 Marcelo Tosatti
    wi.func = func;
655 e82bcec2 Marcelo Tosatti
    wi.data = data;
656 3c02270d Chegu Vinod
    wi.free = false;
657 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
658 c64ca814 Andreas Färber
        cpu->queued_work_first = &wi;
659 0ab07c62 Jan Kiszka
    } else {
660 c64ca814 Andreas Färber
        cpu->queued_work_last->next = &wi;
661 0ab07c62 Jan Kiszka
    }
662 c64ca814 Andreas Färber
    cpu->queued_work_last = &wi;
663 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
664 e82bcec2 Marcelo Tosatti
    wi.done = false;
665 e82bcec2 Marcelo Tosatti
666 c08d7424 Andreas Färber
    qemu_cpu_kick(cpu);
667 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
668 4917cf44 Andreas Färber
        CPUState *self_cpu = current_cpu;
669 e82bcec2 Marcelo Tosatti
670 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
671 4917cf44 Andreas Färber
        current_cpu = self_cpu;
672 e82bcec2 Marcelo Tosatti
    }
673 e82bcec2 Marcelo Tosatti
}
674 e82bcec2 Marcelo Tosatti
675 3c02270d Chegu Vinod
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
676 3c02270d Chegu Vinod
{
677 3c02270d Chegu Vinod
    struct qemu_work_item *wi;
678 3c02270d Chegu Vinod
679 3c02270d Chegu Vinod
    if (qemu_cpu_is_self(cpu)) {
680 3c02270d Chegu Vinod
        func(data);
681 3c02270d Chegu Vinod
        return;
682 3c02270d Chegu Vinod
    }
683 3c02270d Chegu Vinod
684 3c02270d Chegu Vinod
    wi = g_malloc0(sizeof(struct qemu_work_item));
685 3c02270d Chegu Vinod
    wi->func = func;
686 3c02270d Chegu Vinod
    wi->data = data;
687 3c02270d Chegu Vinod
    wi->free = true;
688 3c02270d Chegu Vinod
    if (cpu->queued_work_first == NULL) {
689 3c02270d Chegu Vinod
        cpu->queued_work_first = wi;
690 3c02270d Chegu Vinod
    } else {
691 3c02270d Chegu Vinod
        cpu->queued_work_last->next = wi;
692 3c02270d Chegu Vinod
    }
693 3c02270d Chegu Vinod
    cpu->queued_work_last = wi;
694 3c02270d Chegu Vinod
    wi->next = NULL;
695 3c02270d Chegu Vinod
    wi->done = false;
696 3c02270d Chegu Vinod
697 3c02270d Chegu Vinod
    qemu_cpu_kick(cpu);
698 3c02270d Chegu Vinod
}
699 3c02270d Chegu Vinod
700 6d45b109 Andreas Färber
static void flush_queued_work(CPUState *cpu)
701 e82bcec2 Marcelo Tosatti
{
702 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
703 e82bcec2 Marcelo Tosatti
704 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
705 e82bcec2 Marcelo Tosatti
        return;
706 0ab07c62 Jan Kiszka
    }
707 e82bcec2 Marcelo Tosatti
708 c64ca814 Andreas Färber
    while ((wi = cpu->queued_work_first)) {
709 c64ca814 Andreas Färber
        cpu->queued_work_first = wi->next;
710 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
711 e82bcec2 Marcelo Tosatti
        wi->done = true;
712 3c02270d Chegu Vinod
        if (wi->free) {
713 3c02270d Chegu Vinod
            g_free(wi);
714 3c02270d Chegu Vinod
        }
715 e82bcec2 Marcelo Tosatti
    }
716 c64ca814 Andreas Färber
    cpu->queued_work_last = NULL;
717 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
718 e82bcec2 Marcelo Tosatti
}
719 e82bcec2 Marcelo Tosatti
720 509a0d78 Andreas Färber
static void qemu_wait_io_event_common(CPUState *cpu)
721 296af7c9 Blue Swirl
{
722 4fdeee7c Andreas Färber
    if (cpu->stop) {
723 4fdeee7c Andreas Färber
        cpu->stop = false;
724 f324e766 Andreas Färber
        cpu->stopped = true;
725 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
726 296af7c9 Blue Swirl
    }
727 6d45b109 Andreas Färber
    flush_queued_work(cpu);
728 216fc9a4 Andreas Färber
    cpu->thread_kicked = false;
729 296af7c9 Blue Swirl
}
730 296af7c9 Blue Swirl
731 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
732 296af7c9 Blue Swirl
{
733 182735ef Andreas Färber
    CPUState *cpu;
734 6cabe1f3 Jan Kiszka
735 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
736 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
737 ab33fcda Paolo Bonzini
          are idle.  */
738 ab33fcda Paolo Bonzini
        qemu_clock_warp(vm_clock);
739 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
740 16400322 Jan Kiszka
    }
741 296af7c9 Blue Swirl
742 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
743 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
744 46daff13 Paolo Bonzini
    }
745 6cabe1f3 Jan Kiszka
746 182735ef Andreas Färber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
747 182735ef Andreas Färber
        qemu_wait_io_event_common(cpu);
748 6cabe1f3 Jan Kiszka
    }
749 296af7c9 Blue Swirl
}
750 296af7c9 Blue Swirl
751 fd529e8f Andreas Färber
static void qemu_kvm_wait_io_event(CPUState *cpu)
752 296af7c9 Blue Swirl
{
753 a98ae1d8 Andreas Färber
    while (cpu_thread_is_idle(cpu)) {
754 f5c121b8 Andreas Färber
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
755 16400322 Jan Kiszka
    }
756 296af7c9 Blue Swirl
757 290adf38 Andreas Färber
    qemu_kvm_eat_signals(cpu);
758 509a0d78 Andreas Färber
    qemu_wait_io_event_common(cpu);
759 296af7c9 Blue Swirl
}
760 296af7c9 Blue Swirl
761 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
762 296af7c9 Blue Swirl
{
763 48a106bd Andreas Färber
    CPUState *cpu = arg;
764 84b4915d Jan Kiszka
    int r;
765 296af7c9 Blue Swirl
766 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
767 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
768 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
769 4917cf44 Andreas Färber
    current_cpu = cpu;
770 296af7c9 Blue Swirl
771 504134d2 Andreas Färber
    r = kvm_init_vcpu(cpu);
772 84b4915d Jan Kiszka
    if (r < 0) {
773 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
774 84b4915d Jan Kiszka
        exit(1);
775 84b4915d Jan Kiszka
    }
776 296af7c9 Blue Swirl
777 13618e05 Andreas Färber
    qemu_kvm_init_cpu_signals(cpu);
778 296af7c9 Blue Swirl
779 296af7c9 Blue Swirl
    /* signal CPU creation */
780 61a46217 Andreas Färber
    cpu->created = true;
781 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
782 296af7c9 Blue Swirl
783 296af7c9 Blue Swirl
    while (1) {
784 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
785 1458c363 Andreas Färber
            r = kvm_cpu_exec(cpu);
786 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
787 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
788 83f338f7 Jan Kiszka
            }
789 0ab07c62 Jan Kiszka
        }
790 fd529e8f Andreas Färber
        qemu_kvm_wait_io_event(cpu);
791 296af7c9 Blue Swirl
    }
792 296af7c9 Blue Swirl
793 296af7c9 Blue Swirl
    return NULL;
794 296af7c9 Blue Swirl
}
795 296af7c9 Blue Swirl
796 c7f0f3b1 Anthony Liguori
static void *qemu_dummy_cpu_thread_fn(void *arg)
797 c7f0f3b1 Anthony Liguori
{
798 c7f0f3b1 Anthony Liguori
#ifdef _WIN32
799 c7f0f3b1 Anthony Liguori
    fprintf(stderr, "qtest is not supported under Windows\n");
800 c7f0f3b1 Anthony Liguori
    exit(1);
801 c7f0f3b1 Anthony Liguori
#else
802 10a9021d Andreas Färber
    CPUState *cpu = arg;
803 c7f0f3b1 Anthony Liguori
    sigset_t waitset;
804 c7f0f3b1 Anthony Liguori
    int r;
805 c7f0f3b1 Anthony Liguori
806 c7f0f3b1 Anthony Liguori
    qemu_mutex_lock_iothread();
807 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
808 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
809 c7f0f3b1 Anthony Liguori
810 c7f0f3b1 Anthony Liguori
    sigemptyset(&waitset);
811 c7f0f3b1 Anthony Liguori
    sigaddset(&waitset, SIG_IPI);
812 c7f0f3b1 Anthony Liguori
813 c7f0f3b1 Anthony Liguori
    /* signal CPU creation */
814 61a46217 Andreas Färber
    cpu->created = true;
815 c7f0f3b1 Anthony Liguori
    qemu_cond_signal(&qemu_cpu_cond);
816 c7f0f3b1 Anthony Liguori
817 4917cf44 Andreas Färber
    current_cpu = cpu;
818 c7f0f3b1 Anthony Liguori
    while (1) {
819 4917cf44 Andreas Färber
        current_cpu = NULL;
820 c7f0f3b1 Anthony Liguori
        qemu_mutex_unlock_iothread();
821 c7f0f3b1 Anthony Liguori
        do {
822 c7f0f3b1 Anthony Liguori
            int sig;
823 c7f0f3b1 Anthony Liguori
            r = sigwait(&waitset, &sig);
824 c7f0f3b1 Anthony Liguori
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
825 c7f0f3b1 Anthony Liguori
        if (r == -1) {
826 c7f0f3b1 Anthony Liguori
            perror("sigwait");
827 c7f0f3b1 Anthony Liguori
            exit(1);
828 c7f0f3b1 Anthony Liguori
        }
829 c7f0f3b1 Anthony Liguori
        qemu_mutex_lock_iothread();
830 4917cf44 Andreas Färber
        current_cpu = cpu;
831 509a0d78 Andreas Färber
        qemu_wait_io_event_common(cpu);
832 c7f0f3b1 Anthony Liguori
    }
833 c7f0f3b1 Anthony Liguori
834 c7f0f3b1 Anthony Liguori
    return NULL;
835 c7f0f3b1 Anthony Liguori
#endif
836 c7f0f3b1 Anthony Liguori
}
837 c7f0f3b1 Anthony Liguori
838 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
839 bdb7ca67 Jan Kiszka
840 a37677c3 Igor Mammedov
static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
841 a37677c3 Igor Mammedov
{
842 a37677c3 Igor Mammedov
    cpu->thread_id = qemu_get_thread_id();
843 a37677c3 Igor Mammedov
    cpu->created = true;
844 a37677c3 Igor Mammedov
}
845 a37677c3 Igor Mammedov
846 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
847 296af7c9 Blue Swirl
{
848 c3586ba7 Andreas Färber
    CPUState *cpu = arg;
849 296af7c9 Blue Swirl
850 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
851 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
852 296af7c9 Blue Swirl
853 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
854 a37677c3 Igor Mammedov
    qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
855 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
856 296af7c9 Blue Swirl
857 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
858 182735ef Andreas Färber
    while (first_cpu->stopped) {
859 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
860 8e564b4e Jan Kiszka
861 8e564b4e Jan Kiszka
        /* process any pending work */
862 182735ef Andreas Färber
        for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
863 182735ef Andreas Färber
            qemu_wait_io_event_common(cpu);
864 8e564b4e Jan Kiszka
        }
865 0ab07c62 Jan Kiszka
    }
866 296af7c9 Blue Swirl
867 296af7c9 Blue Swirl
    while (1) {
868 bdb7ca67 Jan Kiszka
        tcg_exec_all();
869 946fb27c Paolo Bonzini
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
870 3b2319a3 Paolo Bonzini
            qemu_notify_event();
871 3b2319a3 Paolo Bonzini
        }
872 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
873 296af7c9 Blue Swirl
    }
874 296af7c9 Blue Swirl
875 296af7c9 Blue Swirl
    return NULL;
876 296af7c9 Blue Swirl
}
877 296af7c9 Blue Swirl
878 2ff09a40 Andreas Färber
static void qemu_cpu_kick_thread(CPUState *cpu)
879 cc015e9a Paolo Bonzini
{
880 cc015e9a Paolo Bonzini
#ifndef _WIN32
881 cc015e9a Paolo Bonzini
    int err;
882 cc015e9a Paolo Bonzini
883 814e612e Andreas Färber
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
884 cc015e9a Paolo Bonzini
    if (err) {
885 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
886 cc015e9a Paolo Bonzini
        exit(1);
887 cc015e9a Paolo Bonzini
    }
888 cc015e9a Paolo Bonzini
#else /* _WIN32 */
889 60e82579 Andreas Färber
    if (!qemu_cpu_is_self(cpu)) {
890 ed9164a3 Olivier Hainque
        CONTEXT tcgContext;
891 ed9164a3 Olivier Hainque
892 ed9164a3 Olivier Hainque
        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
893 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
894 ed9164a3 Olivier Hainque
                    GetLastError());
895 ed9164a3 Olivier Hainque
            exit(1);
896 ed9164a3 Olivier Hainque
        }
897 ed9164a3 Olivier Hainque
898 ed9164a3 Olivier Hainque
        /* On multi-core systems, we are not sure that the thread is actually
899 ed9164a3 Olivier Hainque
         * suspended until we can get the context.
900 ed9164a3 Olivier Hainque
         */
901 ed9164a3 Olivier Hainque
        tcgContext.ContextFlags = CONTEXT_CONTROL;
902 ed9164a3 Olivier Hainque
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
903 ed9164a3 Olivier Hainque
            continue;
904 ed9164a3 Olivier Hainque
        }
905 ed9164a3 Olivier Hainque
906 cc015e9a Paolo Bonzini
        cpu_signal(0);
907 ed9164a3 Olivier Hainque
908 ed9164a3 Olivier Hainque
        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
909 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
910 ed9164a3 Olivier Hainque
                    GetLastError());
911 ed9164a3 Olivier Hainque
            exit(1);
912 ed9164a3 Olivier Hainque
        }
913 cc015e9a Paolo Bonzini
    }
914 cc015e9a Paolo Bonzini
#endif
915 cc015e9a Paolo Bonzini
}
916 cc015e9a Paolo Bonzini
917 c08d7424 Andreas Färber
void qemu_cpu_kick(CPUState *cpu)
918 296af7c9 Blue Swirl
{
919 f5c121b8 Andreas Färber
    qemu_cond_broadcast(cpu->halt_cond);
920 216fc9a4 Andreas Färber
    if (!tcg_enabled() && !cpu->thread_kicked) {
921 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu);
922 216fc9a4 Andreas Färber
        cpu->thread_kicked = true;
923 aa2c364b Jan Kiszka
    }
924 296af7c9 Blue Swirl
}
925 296af7c9 Blue Swirl
926 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
927 296af7c9 Blue Swirl
{
928 b55c22c6 Paolo Bonzini
#ifndef _WIN32
929 4917cf44 Andreas Färber
    assert(current_cpu);
930 296af7c9 Blue Swirl
931 4917cf44 Andreas Färber
    if (!current_cpu->thread_kicked) {
932 4917cf44 Andreas Färber
        qemu_cpu_kick_thread(current_cpu);
933 4917cf44 Andreas Färber
        current_cpu->thread_kicked = true;
934 296af7c9 Blue Swirl
    }
935 b55c22c6 Paolo Bonzini
#else
936 b55c22c6 Paolo Bonzini
    abort();
937 b55c22c6 Paolo Bonzini
#endif
938 296af7c9 Blue Swirl
}
939 296af7c9 Blue Swirl
940 60e82579 Andreas Färber
bool qemu_cpu_is_self(CPUState *cpu)
941 296af7c9 Blue Swirl
{
942 814e612e Andreas Färber
    return qemu_thread_is_self(cpu->thread);
943 296af7c9 Blue Swirl
}
944 296af7c9 Blue Swirl
945 aa723c23 Juan Quintela
static bool qemu_in_vcpu_thread(void)
946 aa723c23 Juan Quintela
{
947 4917cf44 Andreas Färber
    return current_cpu && qemu_cpu_is_self(current_cpu);
948 aa723c23 Juan Quintela
}
949 aa723c23 Juan Quintela
950 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
951 296af7c9 Blue Swirl
{
952 c7f0f3b1 Anthony Liguori
    if (!tcg_enabled()) {
953 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
954 1a28cac3 Marcelo Tosatti
    } else {
955 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
956 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
957 182735ef Andreas Färber
            qemu_cpu_kick_thread(first_cpu);
958 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
959 1a28cac3 Marcelo Tosatti
        }
960 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
961 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
962 1a28cac3 Marcelo Tosatti
    }
963 296af7c9 Blue Swirl
}
964 296af7c9 Blue Swirl
965 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
966 296af7c9 Blue Swirl
{
967 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
968 296af7c9 Blue Swirl
}
969 296af7c9 Blue Swirl
970 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
971 296af7c9 Blue Swirl
{
972 182735ef Andreas Färber
    CPUState *cpu = first_cpu;
973 296af7c9 Blue Swirl
974 182735ef Andreas Färber
    while (cpu) {
975 182735ef Andreas Färber
        if (!cpu->stopped) {
976 296af7c9 Blue Swirl
            return 0;
977 0ab07c62 Jan Kiszka
        }
978 182735ef Andreas Färber
        cpu = cpu->next_cpu;
979 296af7c9 Blue Swirl
    }
980 296af7c9 Blue Swirl
981 296af7c9 Blue Swirl
    return 1;
982 296af7c9 Blue Swirl
}
983 296af7c9 Blue Swirl
984 296af7c9 Blue Swirl
void pause_all_vcpus(void)
985 296af7c9 Blue Swirl
{
986 182735ef Andreas Färber
    CPUState *cpu = first_cpu;
987 296af7c9 Blue Swirl
988 a5c57d64 Paolo Bonzini
    qemu_clock_enable(vm_clock, false);
989 182735ef Andreas Färber
    while (cpu) {
990 182735ef Andreas Färber
        cpu->stop = true;
991 182735ef Andreas Färber
        qemu_cpu_kick(cpu);
992 182735ef Andreas Färber
        cpu = cpu->next_cpu;
993 296af7c9 Blue Swirl
    }
994 296af7c9 Blue Swirl
995 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
996 d798e974 Jan Kiszka
        cpu_stop_current();
997 d798e974 Jan Kiszka
        if (!kvm_enabled()) {
998 182735ef Andreas Färber
            cpu = first_cpu;
999 182735ef Andreas Färber
            while (cpu) {
1000 182735ef Andreas Färber
                cpu->stop = false;
1001 182735ef Andreas Färber
                cpu->stopped = true;
1002 182735ef Andreas Färber
                cpu = cpu->next_cpu;
1003 d798e974 Jan Kiszka
            }
1004 d798e974 Jan Kiszka
            return;
1005 d798e974 Jan Kiszka
        }
1006 d798e974 Jan Kiszka
    }
1007 d798e974 Jan Kiszka
1008 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
1009 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1010 182735ef Andreas Färber
        cpu = first_cpu;
1011 182735ef Andreas Färber
        while (cpu) {
1012 182735ef Andreas Färber
            qemu_cpu_kick(cpu);
1013 182735ef Andreas Färber
            cpu = cpu->next_cpu;
1014 296af7c9 Blue Swirl
        }
1015 296af7c9 Blue Swirl
    }
1016 296af7c9 Blue Swirl
}
1017 296af7c9 Blue Swirl
1018 2993683b Igor Mammedov
void cpu_resume(CPUState *cpu)
1019 2993683b Igor Mammedov
{
1020 2993683b Igor Mammedov
    cpu->stop = false;
1021 2993683b Igor Mammedov
    cpu->stopped = false;
1022 2993683b Igor Mammedov
    qemu_cpu_kick(cpu);
1023 2993683b Igor Mammedov
}
1024 2993683b Igor Mammedov
1025 296af7c9 Blue Swirl
void resume_all_vcpus(void)
1026 296af7c9 Blue Swirl
{
1027 182735ef Andreas Färber
    CPUState *cpu = first_cpu;
1028 296af7c9 Blue Swirl
1029 47113ab6 Wen Congyang
    qemu_clock_enable(vm_clock, true);
1030 182735ef Andreas Färber
    while (cpu) {
1031 182735ef Andreas Färber
        cpu_resume(cpu);
1032 182735ef Andreas Färber
        cpu = cpu->next_cpu;
1033 296af7c9 Blue Swirl
    }
1034 296af7c9 Blue Swirl
}
1035 296af7c9 Blue Swirl
1036 e5ab30a2 Andreas Färber
static void qemu_tcg_init_vcpu(CPUState *cpu)
1037 296af7c9 Blue Swirl
{
1038 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
1039 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
1040 814e612e Andreas Färber
        cpu->thread = g_malloc0(sizeof(QemuThread));
1041 f5c121b8 Andreas Färber
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1042 f5c121b8 Andreas Färber
        qemu_cond_init(cpu->halt_cond);
1043 f5c121b8 Andreas Färber
        tcg_halt_cond = cpu->halt_cond;
1044 c3586ba7 Andreas Färber
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1045 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
1046 1ecf47bf Paolo Bonzini
#ifdef _WIN32
1047 814e612e Andreas Färber
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
1048 1ecf47bf Paolo Bonzini
#endif
1049 61a46217 Andreas Färber
        while (!cpu->created) {
1050 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1051 0ab07c62 Jan Kiszka
        }
1052 814e612e Andreas Färber
        tcg_cpu_thread = cpu->thread;
1053 296af7c9 Blue Swirl
    } else {
1054 814e612e Andreas Färber
        cpu->thread = tcg_cpu_thread;
1055 f5c121b8 Andreas Färber
        cpu->halt_cond = tcg_halt_cond;
1056 296af7c9 Blue Swirl
    }
1057 296af7c9 Blue Swirl
}
1058 296af7c9 Blue Swirl
1059 48a106bd Andreas Färber
static void qemu_kvm_start_vcpu(CPUState *cpu)
1060 296af7c9 Blue Swirl
{
1061 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1062 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1063 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1064 48a106bd Andreas Färber
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
1065 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
1066 61a46217 Andreas Färber
    while (!cpu->created) {
1067 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1068 0ab07c62 Jan Kiszka
    }
1069 296af7c9 Blue Swirl
}
1070 296af7c9 Blue Swirl
1071 10a9021d Andreas Färber
static void qemu_dummy_start_vcpu(CPUState *cpu)
1072 c7f0f3b1 Anthony Liguori
{
1073 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1074 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1075 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1076 10a9021d Andreas Färber
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
1077 c7f0f3b1 Anthony Liguori
                       QEMU_THREAD_JOINABLE);
1078 61a46217 Andreas Färber
    while (!cpu->created) {
1079 c7f0f3b1 Anthony Liguori
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1080 c7f0f3b1 Anthony Liguori
    }
1081 c7f0f3b1 Anthony Liguori
}
1082 c7f0f3b1 Anthony Liguori
1083 c643bed9 Andreas Färber
void qemu_init_vcpu(CPUState *cpu)
1084 296af7c9 Blue Swirl
{
1085 ce3960eb Andreas Färber
    cpu->nr_cores = smp_cores;
1086 ce3960eb Andreas Färber
    cpu->nr_threads = smp_threads;
1087 f324e766 Andreas Färber
    cpu->stopped = true;
1088 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
1089 48a106bd Andreas Färber
        qemu_kvm_start_vcpu(cpu);
1090 c7f0f3b1 Anthony Liguori
    } else if (tcg_enabled()) {
1091 e5ab30a2 Andreas Färber
        qemu_tcg_init_vcpu(cpu);
1092 c7f0f3b1 Anthony Liguori
    } else {
1093 10a9021d Andreas Färber
        qemu_dummy_start_vcpu(cpu);
1094 0ab07c62 Jan Kiszka
    }
1095 296af7c9 Blue Swirl
}
1096 296af7c9 Blue Swirl
1097 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
1098 296af7c9 Blue Swirl
{
1099 4917cf44 Andreas Färber
    if (current_cpu) {
1100 4917cf44 Andreas Färber
        current_cpu->stop = false;
1101 4917cf44 Andreas Färber
        current_cpu->stopped = true;
1102 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
1103 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
1104 b4a3d965 Jan Kiszka
    }
1105 296af7c9 Blue Swirl
}
1106 296af7c9 Blue Swirl
1107 56983463 Kevin Wolf
int vm_stop(RunState state)
1108 296af7c9 Blue Swirl
{
1109 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1110 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
1111 296af7c9 Blue Swirl
        /*
1112 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
1113 296af7c9 Blue Swirl
         * vm_stop() has been requested.
1114 296af7c9 Blue Swirl
         */
1115 b4a3d965 Jan Kiszka
        cpu_stop_current();
1116 56983463 Kevin Wolf
        return 0;
1117 296af7c9 Blue Swirl
    }
1118 56983463 Kevin Wolf
1119 56983463 Kevin Wolf
    return do_vm_stop(state);
1120 296af7c9 Blue Swirl
}
1121 296af7c9 Blue Swirl
1122 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
1123 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
1124 56983463 Kevin Wolf
int vm_stop_force_state(RunState state)
1125 8a9236f1 Luiz Capitulino
{
1126 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
1127 56983463 Kevin Wolf
        return vm_stop(state);
1128 8a9236f1 Luiz Capitulino
    } else {
1129 8a9236f1 Luiz Capitulino
        runstate_set(state);
1130 594a45ce Kevin Wolf
        /* Make sure to return an error if the flush in a previous vm_stop()
1131 594a45ce Kevin Wolf
         * failed. */
1132 594a45ce Kevin Wolf
        return bdrv_flush_all();
1133 8a9236f1 Luiz Capitulino
    }
1134 8a9236f1 Luiz Capitulino
}
1135 8a9236f1 Luiz Capitulino
1136 9349b4f9 Andreas Färber
static int tcg_cpu_exec(CPUArchState *env)
1137 296af7c9 Blue Swirl
{
1138 296af7c9 Blue Swirl
    int ret;
1139 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1140 296af7c9 Blue Swirl
    int64_t ti;
1141 296af7c9 Blue Swirl
#endif
1142 296af7c9 Blue Swirl
1143 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1144 296af7c9 Blue Swirl
    ti = profile_getclock();
1145 296af7c9 Blue Swirl
#endif
1146 296af7c9 Blue Swirl
    if (use_icount) {
1147 296af7c9 Blue Swirl
        int64_t count;
1148 296af7c9 Blue Swirl
        int decr;
1149 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1150 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1151 296af7c9 Blue Swirl
        env->icount_extra = 0;
1152 946fb27c Paolo Bonzini
        count = qemu_icount_round(qemu_clock_deadline(vm_clock));
1153 296af7c9 Blue Swirl
        qemu_icount += count;
1154 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1155 296af7c9 Blue Swirl
        count -= decr;
1156 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1157 296af7c9 Blue Swirl
        env->icount_extra = count;
1158 296af7c9 Blue Swirl
    }
1159 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1160 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1161 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1162 296af7c9 Blue Swirl
#endif
1163 296af7c9 Blue Swirl
    if (use_icount) {
1164 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1165 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1166 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1167 296af7c9 Blue Swirl
                        + env->icount_extra);
1168 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1169 296af7c9 Blue Swirl
        env->icount_extra = 0;
1170 296af7c9 Blue Swirl
    }
1171 296af7c9 Blue Swirl
    return ret;
1172 296af7c9 Blue Swirl
}
1173 296af7c9 Blue Swirl
1174 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1175 296af7c9 Blue Swirl
{
1176 9a36085b Jan Kiszka
    int r;
1177 9a36085b Jan Kiszka
1178 ab33fcda Paolo Bonzini
    /* Account partial waits to the vm_clock.  */
1179 ab33fcda Paolo Bonzini
    qemu_clock_warp(vm_clock);
1180 ab33fcda Paolo Bonzini
1181 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1182 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1183 0ab07c62 Jan Kiszka
    }
1184 c629a4bc Jan Kiszka
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1185 182735ef Andreas Färber
        CPUState *cpu = next_cpu;
1186 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1187 296af7c9 Blue Swirl
1188 296af7c9 Blue Swirl
        qemu_clock_enable(vm_clock,
1189 345f4426 Jan Kiszka
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1190 296af7c9 Blue Swirl
1191 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
1192 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1193 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1194 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
1195 3c638d06 Jan Kiszka
                break;
1196 3c638d06 Jan Kiszka
            }
1197 f324e766 Andreas Färber
        } else if (cpu->stop || cpu->stopped) {
1198 296af7c9 Blue Swirl
            break;
1199 296af7c9 Blue Swirl
        }
1200 296af7c9 Blue Swirl
    }
1201 c629a4bc Jan Kiszka
    exit_request = 0;
1202 296af7c9 Blue Swirl
}
1203 296af7c9 Blue Swirl
1204 296af7c9 Blue Swirl
void set_numa_modes(void)
1205 296af7c9 Blue Swirl
{
1206 1b1ed8dc Andreas Färber
    CPUState *cpu;
1207 296af7c9 Blue Swirl
    int i;
1208 296af7c9 Blue Swirl
1209 182735ef Andreas Färber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1210 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1211 55e5c285 Andreas Färber
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1212 1b1ed8dc Andreas Färber
                cpu->numa_node = i;
1213 296af7c9 Blue Swirl
            }
1214 296af7c9 Blue Swirl
        }
1215 296af7c9 Blue Swirl
    }
1216 296af7c9 Blue Swirl
}
1217 296af7c9 Blue Swirl
1218 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1219 262353cb Blue Swirl
{
1220 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1221 e916cbf8 Peter Maydell
#if defined(cpu_list)
1222 e916cbf8 Peter Maydell
    cpu_list(f, cpu_fprintf);
1223 262353cb Blue Swirl
#endif
1224 262353cb Blue Swirl
}
1225 de0b36b6 Luiz Capitulino
1226 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1227 de0b36b6 Luiz Capitulino
{
1228 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1229 182735ef Andreas Färber
    CPUState *cpu;
1230 de0b36b6 Luiz Capitulino
1231 182735ef Andreas Färber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1232 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1233 182735ef Andreas Färber
#if defined(TARGET_I386)
1234 182735ef Andreas Färber
        X86CPU *x86_cpu = X86_CPU(cpu);
1235 182735ef Andreas Färber
        CPUX86State *env = &x86_cpu->env;
1236 182735ef Andreas Färber
#elif defined(TARGET_PPC)
1237 182735ef Andreas Färber
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1238 182735ef Andreas Färber
        CPUPPCState *env = &ppc_cpu->env;
1239 182735ef Andreas Färber
#elif defined(TARGET_SPARC)
1240 182735ef Andreas Färber
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1241 182735ef Andreas Färber
        CPUSPARCState *env = &sparc_cpu->env;
1242 182735ef Andreas Färber
#elif defined(TARGET_MIPS)
1243 182735ef Andreas Färber
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1244 182735ef Andreas Färber
        CPUMIPSState *env = &mips_cpu->env;
1245 182735ef Andreas Färber
#endif
1246 de0b36b6 Luiz Capitulino
1247 cb446eca Andreas Färber
        cpu_synchronize_state(cpu);
1248 de0b36b6 Luiz Capitulino
1249 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1250 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1251 55e5c285 Andreas Färber
        info->value->CPU = cpu->cpu_index;
1252 182735ef Andreas Färber
        info->value->current = (cpu == first_cpu);
1253 259186a7 Andreas Färber
        info->value->halted = cpu->halted;
1254 9f09e18a Andreas Färber
        info->value->thread_id = cpu->thread_id;
1255 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1256 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1257 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1258 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1259 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1260 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1261 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1262 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1263 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1264 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1265 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1266 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1267 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1268 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1269 de0b36b6 Luiz Capitulino
#endif
1270 de0b36b6 Luiz Capitulino
1271 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1272 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1273 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1274 de0b36b6 Luiz Capitulino
        } else {
1275 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1276 de0b36b6 Luiz Capitulino
            cur_item = info;
1277 de0b36b6 Luiz Capitulino
        }
1278 de0b36b6 Luiz Capitulino
    }
1279 de0b36b6 Luiz Capitulino
1280 de0b36b6 Luiz Capitulino
    return head;
1281 de0b36b6 Luiz Capitulino
}
1282 0cfd6a9a Luiz Capitulino
1283 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1284 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1285 0cfd6a9a Luiz Capitulino
{
1286 0cfd6a9a Luiz Capitulino
    FILE *f;
1287 0cfd6a9a Luiz Capitulino
    uint32_t l;
1288 9349b4f9 Andreas Färber
    CPUArchState *env;
1289 55e5c285 Andreas Färber
    CPUState *cpu;
1290 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1291 0cfd6a9a Luiz Capitulino
1292 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1293 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1294 0cfd6a9a Luiz Capitulino
    }
1295 0cfd6a9a Luiz Capitulino
1296 151d1322 Andreas Färber
    cpu = qemu_get_cpu(cpu_index);
1297 151d1322 Andreas Färber
    if (cpu == NULL) {
1298 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1299 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1300 0cfd6a9a Luiz Capitulino
        return;
1301 0cfd6a9a Luiz Capitulino
    }
1302 151d1322 Andreas Färber
    env = cpu->env_ptr;
1303 0cfd6a9a Luiz Capitulino
1304 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1305 0cfd6a9a Luiz Capitulino
    if (!f) {
1306 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1307 0cfd6a9a Luiz Capitulino
        return;
1308 0cfd6a9a Luiz Capitulino
    }
1309 0cfd6a9a Luiz Capitulino
1310 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1311 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1312 0cfd6a9a Luiz Capitulino
        if (l > size)
1313 0cfd6a9a Luiz Capitulino
            l = size;
1314 0cfd6a9a Luiz Capitulino
        cpu_memory_rw_debug(env, addr, buf, l, 0);
1315 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1316 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1317 0cfd6a9a Luiz Capitulino
            goto exit;
1318 0cfd6a9a Luiz Capitulino
        }
1319 0cfd6a9a Luiz Capitulino
        addr += l;
1320 0cfd6a9a Luiz Capitulino
        size -= l;
1321 0cfd6a9a Luiz Capitulino
    }
1322 0cfd6a9a Luiz Capitulino
1323 0cfd6a9a Luiz Capitulino
exit:
1324 0cfd6a9a Luiz Capitulino
    fclose(f);
1325 0cfd6a9a Luiz Capitulino
}
1326 6d3962bf Luiz Capitulino
1327 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1328 6d3962bf Luiz Capitulino
                  Error **errp)
1329 6d3962bf Luiz Capitulino
{
1330 6d3962bf Luiz Capitulino
    FILE *f;
1331 6d3962bf Luiz Capitulino
    uint32_t l;
1332 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1333 6d3962bf Luiz Capitulino
1334 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1335 6d3962bf Luiz Capitulino
    if (!f) {
1336 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1337 6d3962bf Luiz Capitulino
        return;
1338 6d3962bf Luiz Capitulino
    }
1339 6d3962bf Luiz Capitulino
1340 6d3962bf Luiz Capitulino
    while (size != 0) {
1341 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1342 6d3962bf Luiz Capitulino
        if (l > size)
1343 6d3962bf Luiz Capitulino
            l = size;
1344 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1345 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1346 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1347 6d3962bf Luiz Capitulino
            goto exit;
1348 6d3962bf Luiz Capitulino
        }
1349 6d3962bf Luiz Capitulino
        addr += l;
1350 6d3962bf Luiz Capitulino
        size -= l;
1351 6d3962bf Luiz Capitulino
    }
1352 6d3962bf Luiz Capitulino
1353 6d3962bf Luiz Capitulino
exit:
1354 6d3962bf Luiz Capitulino
    fclose(f);
1355 6d3962bf Luiz Capitulino
}
1356 ab49ab5c Luiz Capitulino
1357 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1358 ab49ab5c Luiz Capitulino
{
1359 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1360 182735ef Andreas Färber
    CPUState *cs;
1361 182735ef Andreas Färber
1362 182735ef Andreas Färber
    for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
1363 182735ef Andreas Färber
        X86CPU *cpu = X86_CPU(cs);
1364 182735ef Andreas Färber
        CPUX86State *env = &cpu->env;
1365 ab49ab5c Luiz Capitulino
1366 02c09195 Jan Kiszka
        if (!env->apic_state) {
1367 182735ef Andreas Färber
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1368 02c09195 Jan Kiszka
        } else {
1369 02c09195 Jan Kiszka
            apic_deliver_nmi(env->apic_state);
1370 02c09195 Jan Kiszka
        }
1371 ab49ab5c Luiz Capitulino
    }
1372 ab49ab5c Luiz Capitulino
#else
1373 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1374 ab49ab5c Luiz Capitulino
#endif
1375 ab49ab5c Luiz Capitulino
}