Statistics
| Branch: | Revision:

root / cpus.c @ 34b5d2c6

History | View | Annotate | Download (35.1 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
29 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
30 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/qtest.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
39 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
40 0ff0fc19 Jan Kiszka
41 0ff0fc19 Jan Kiszka
#ifndef _WIN32
42 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
43 0ff0fc19 Jan Kiszka
#endif
44 296af7c9 Blue Swirl
45 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
46 6d9cb73c Jan Kiszka
47 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
48 6d9cb73c Jan Kiszka
49 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
50 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
51 c0532a76 Marcelo Tosatti
#endif
52 c0532a76 Marcelo Tosatti
53 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
54 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
55 6d9cb73c Jan Kiszka
#endif
56 6d9cb73c Jan Kiszka
57 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
58 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
59 6d9cb73c Jan Kiszka
#endif
60 6d9cb73c Jan Kiszka
61 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
62 6d9cb73c Jan Kiszka
63 182735ef Andreas Färber
static CPUState *next_cpu;
64 296af7c9 Blue Swirl
65 321bc0b2 Tiejun Chen
bool cpu_is_stopped(CPUState *cpu)
66 321bc0b2 Tiejun Chen
{
67 321bc0b2 Tiejun Chen
    return cpu->stopped || !runstate_is_running();
68 321bc0b2 Tiejun Chen
}
69 321bc0b2 Tiejun Chen
70 a98ae1d8 Andreas Färber
static bool cpu_thread_is_idle(CPUState *cpu)
71 ac873f1e Peter Maydell
{
72 c64ca814 Andreas Färber
    if (cpu->stop || cpu->queued_work_first) {
73 ac873f1e Peter Maydell
        return false;
74 ac873f1e Peter Maydell
    }
75 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
76 ac873f1e Peter Maydell
        return true;
77 ac873f1e Peter Maydell
    }
78 259186a7 Andreas Färber
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
79 215e79c0 Alexander Graf
        kvm_halt_in_kernel()) {
80 ac873f1e Peter Maydell
        return false;
81 ac873f1e Peter Maydell
    }
82 ac873f1e Peter Maydell
    return true;
83 ac873f1e Peter Maydell
}
84 ac873f1e Peter Maydell
85 ac873f1e Peter Maydell
static bool all_cpu_threads_idle(void)
86 ac873f1e Peter Maydell
{
87 182735ef Andreas Färber
    CPUState *cpu;
88 ac873f1e Peter Maydell
89 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
90 182735ef Andreas Färber
        if (!cpu_thread_is_idle(cpu)) {
91 ac873f1e Peter Maydell
            return false;
92 ac873f1e Peter Maydell
        }
93 ac873f1e Peter Maydell
    }
94 ac873f1e Peter Maydell
    return true;
95 ac873f1e Peter Maydell
}
96 ac873f1e Peter Maydell
97 296af7c9 Blue Swirl
/***********************************************************/
98 946fb27c Paolo Bonzini
/* guest cycle counter */
99 946fb27c Paolo Bonzini
100 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
101 946fb27c Paolo Bonzini
static int icount_time_shift;
102 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
103 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
104 946fb27c Paolo Bonzini
/* Compensate for varying guest execution speed.  */
105 946fb27c Paolo Bonzini
static int64_t qemu_icount_bias;
106 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
107 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
108 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
109 946fb27c Paolo Bonzini
static int64_t vm_clock_warp_start;
110 946fb27c Paolo Bonzini
static int64_t qemu_icount;
111 946fb27c Paolo Bonzini
112 946fb27c Paolo Bonzini
typedef struct TimersState {
113 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
114 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
115 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
116 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
117 946fb27c Paolo Bonzini
    int64_t dummy;
118 946fb27c Paolo Bonzini
} TimersState;
119 946fb27c Paolo Bonzini
120 d9cd4007 Liu Ping Fan
static TimersState timers_state;
121 946fb27c Paolo Bonzini
122 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
123 946fb27c Paolo Bonzini
int64_t cpu_get_icount(void)
124 946fb27c Paolo Bonzini
{
125 946fb27c Paolo Bonzini
    int64_t icount;
126 4917cf44 Andreas Färber
    CPUState *cpu = current_cpu;
127 946fb27c Paolo Bonzini
128 946fb27c Paolo Bonzini
    icount = qemu_icount;
129 4917cf44 Andreas Färber
    if (cpu) {
130 4917cf44 Andreas Färber
        CPUArchState *env = cpu->env_ptr;
131 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
132 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
133 946fb27c Paolo Bonzini
        }
134 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
135 946fb27c Paolo Bonzini
    }
136 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
137 946fb27c Paolo Bonzini
}
138 946fb27c Paolo Bonzini
139 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
140 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
141 946fb27c Paolo Bonzini
{
142 946fb27c Paolo Bonzini
    if (use_icount) {
143 946fb27c Paolo Bonzini
        return cpu_get_icount();
144 946fb27c Paolo Bonzini
    }
145 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
146 946fb27c Paolo Bonzini
        return timers_state.cpu_ticks_offset;
147 946fb27c Paolo Bonzini
    } else {
148 946fb27c Paolo Bonzini
        int64_t ticks;
149 946fb27c Paolo Bonzini
        ticks = cpu_get_real_ticks();
150 946fb27c Paolo Bonzini
        if (timers_state.cpu_ticks_prev > ticks) {
151 946fb27c Paolo Bonzini
            /* Note: non increasing ticks may happen if the host uses
152 946fb27c Paolo Bonzini
               software suspend */
153 946fb27c Paolo Bonzini
            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
154 946fb27c Paolo Bonzini
        }
155 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_prev = ticks;
156 946fb27c Paolo Bonzini
        return ticks + timers_state.cpu_ticks_offset;
157 946fb27c Paolo Bonzini
    }
158 946fb27c Paolo Bonzini
}
159 946fb27c Paolo Bonzini
160 946fb27c Paolo Bonzini
/* return the host CPU monotonic timer and handle stop/restart */
161 946fb27c Paolo Bonzini
int64_t cpu_get_clock(void)
162 946fb27c Paolo Bonzini
{
163 946fb27c Paolo Bonzini
    int64_t ti;
164 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
165 946fb27c Paolo Bonzini
        return timers_state.cpu_clock_offset;
166 946fb27c Paolo Bonzini
    } else {
167 946fb27c Paolo Bonzini
        ti = get_clock();
168 946fb27c Paolo Bonzini
        return ti + timers_state.cpu_clock_offset;
169 946fb27c Paolo Bonzini
    }
170 946fb27c Paolo Bonzini
}
171 946fb27c Paolo Bonzini
172 946fb27c Paolo Bonzini
/* enable cpu_get_ticks() */
173 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
174 946fb27c Paolo Bonzini
{
175 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
176 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
177 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
178 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
179 946fb27c Paolo Bonzini
    }
180 946fb27c Paolo Bonzini
}
181 946fb27c Paolo Bonzini
182 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
183 946fb27c Paolo Bonzini
   cpu_get_ticks() after that.  */
184 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
185 946fb27c Paolo Bonzini
{
186 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
187 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset = cpu_get_ticks();
188 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset = cpu_get_clock();
189 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
190 946fb27c Paolo Bonzini
    }
191 946fb27c Paolo Bonzini
}
192 946fb27c Paolo Bonzini
193 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
194 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
195 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
196 946fb27c Paolo Bonzini
   the IO wait loop.  */
197 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
198 946fb27c Paolo Bonzini
199 946fb27c Paolo Bonzini
static void icount_adjust(void)
200 946fb27c Paolo Bonzini
{
201 946fb27c Paolo Bonzini
    int64_t cur_time;
202 946fb27c Paolo Bonzini
    int64_t cur_icount;
203 946fb27c Paolo Bonzini
    int64_t delta;
204 946fb27c Paolo Bonzini
    static int64_t last_delta;
205 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
206 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
207 946fb27c Paolo Bonzini
        return;
208 946fb27c Paolo Bonzini
    }
209 946fb27c Paolo Bonzini
    cur_time = cpu_get_clock();
210 40daca54 Alex Bligh
    cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
211 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
212 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
213 946fb27c Paolo Bonzini
    if (delta > 0
214 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
215 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
216 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
217 946fb27c Paolo Bonzini
        icount_time_shift--;
218 946fb27c Paolo Bonzini
    }
219 946fb27c Paolo Bonzini
    if (delta < 0
220 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
221 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
222 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
223 946fb27c Paolo Bonzini
        icount_time_shift++;
224 946fb27c Paolo Bonzini
    }
225 946fb27c Paolo Bonzini
    last_delta = delta;
226 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
227 946fb27c Paolo Bonzini
}
228 946fb27c Paolo Bonzini
229 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
230 946fb27c Paolo Bonzini
{
231 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
232 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
233 946fb27c Paolo Bonzini
    icount_adjust();
234 946fb27c Paolo Bonzini
}
235 946fb27c Paolo Bonzini
236 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
237 946fb27c Paolo Bonzini
{
238 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
239 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
240 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
241 946fb27c Paolo Bonzini
    icount_adjust();
242 946fb27c Paolo Bonzini
}
243 946fb27c Paolo Bonzini
244 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
245 946fb27c Paolo Bonzini
{
246 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
247 946fb27c Paolo Bonzini
}
248 946fb27c Paolo Bonzini
249 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
250 946fb27c Paolo Bonzini
{
251 946fb27c Paolo Bonzini
    if (vm_clock_warp_start == -1) {
252 946fb27c Paolo Bonzini
        return;
253 946fb27c Paolo Bonzini
    }
254 946fb27c Paolo Bonzini
255 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
256 40daca54 Alex Bligh
        int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
257 946fb27c Paolo Bonzini
        int64_t warp_delta = clock - vm_clock_warp_start;
258 946fb27c Paolo Bonzini
        if (use_icount == 1) {
259 946fb27c Paolo Bonzini
            qemu_icount_bias += warp_delta;
260 946fb27c Paolo Bonzini
        } else {
261 946fb27c Paolo Bonzini
            /*
262 40daca54 Alex Bligh
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
263 946fb27c Paolo Bonzini
             * far ahead of real time.
264 946fb27c Paolo Bonzini
             */
265 946fb27c Paolo Bonzini
            int64_t cur_time = cpu_get_clock();
266 40daca54 Alex Bligh
            int64_t cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
267 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
268 946fb27c Paolo Bonzini
            qemu_icount_bias += MIN(warp_delta, delta);
269 946fb27c Paolo Bonzini
        }
270 40daca54 Alex Bligh
        if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
271 40daca54 Alex Bligh
            qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
272 946fb27c Paolo Bonzini
        }
273 946fb27c Paolo Bonzini
    }
274 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
275 946fb27c Paolo Bonzini
}
276 946fb27c Paolo Bonzini
277 8156be56 Paolo Bonzini
void qtest_clock_warp(int64_t dest)
278 8156be56 Paolo Bonzini
{
279 40daca54 Alex Bligh
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
280 8156be56 Paolo Bonzini
    assert(qtest_enabled());
281 8156be56 Paolo Bonzini
    while (clock < dest) {
282 40daca54 Alex Bligh
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
283 8156be56 Paolo Bonzini
        int64_t warp = MIN(dest - clock, deadline);
284 8156be56 Paolo Bonzini
        qemu_icount_bias += warp;
285 40daca54 Alex Bligh
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
286 40daca54 Alex Bligh
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
287 8156be56 Paolo Bonzini
    }
288 40daca54 Alex Bligh
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
289 8156be56 Paolo Bonzini
}
290 8156be56 Paolo Bonzini
291 40daca54 Alex Bligh
void qemu_clock_warp(QEMUClockType type)
292 946fb27c Paolo Bonzini
{
293 946fb27c Paolo Bonzini
    int64_t deadline;
294 946fb27c Paolo Bonzini
295 946fb27c Paolo Bonzini
    /*
296 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
297 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
298 946fb27c Paolo Bonzini
     * need for if statements all over the place.
299 946fb27c Paolo Bonzini
     */
300 40daca54 Alex Bligh
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
301 946fb27c Paolo Bonzini
        return;
302 946fb27c Paolo Bonzini
    }
303 946fb27c Paolo Bonzini
304 946fb27c Paolo Bonzini
    /*
305 40daca54 Alex Bligh
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
306 40daca54 Alex Bligh
     * This ensures that the deadline for the timer is computed correctly below.
307 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
308 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
309 40daca54 Alex Bligh
     * the earliest QEMU_CLOCK_VIRTUAL timer.
310 946fb27c Paolo Bonzini
     */
311 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
312 40daca54 Alex Bligh
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
313 40daca54 Alex Bligh
        timer_del(icount_warp_timer);
314 946fb27c Paolo Bonzini
        return;
315 946fb27c Paolo Bonzini
    }
316 946fb27c Paolo Bonzini
317 8156be56 Paolo Bonzini
    if (qtest_enabled()) {
318 8156be56 Paolo Bonzini
        /* When testing, qtest commands advance icount.  */
319 8156be56 Paolo Bonzini
        return;
320 8156be56 Paolo Bonzini
    }
321 8156be56 Paolo Bonzini
322 40daca54 Alex Bligh
    vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
323 ac70aafc Alex Bligh
    /* We want to use the earliest deadline from ALL vm_clocks */
324 40daca54 Alex Bligh
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
325 ac70aafc Alex Bligh
326 ac70aafc Alex Bligh
    /* Maintain prior (possibly buggy) behaviour where if no deadline
327 40daca54 Alex Bligh
     * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
328 ac70aafc Alex Bligh
     * INT32_MAX nanoseconds ahead, we still use INT32_MAX
329 ac70aafc Alex Bligh
     * nanoseconds.
330 ac70aafc Alex Bligh
     */
331 ac70aafc Alex Bligh
    if ((deadline < 0) || (deadline > INT32_MAX)) {
332 ac70aafc Alex Bligh
        deadline = INT32_MAX;
333 ac70aafc Alex Bligh
    }
334 ac70aafc Alex Bligh
335 946fb27c Paolo Bonzini
    if (deadline > 0) {
336 946fb27c Paolo Bonzini
        /*
337 40daca54 Alex Bligh
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
338 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
339 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
340 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
341 40daca54 Alex Bligh
         * QEMU_CLOCK_VIRTUAL.
342 946fb27c Paolo Bonzini
         *
343 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
344 40daca54 Alex Bligh
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
345 40daca54 Alex Bligh
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
346 40daca54 Alex Bligh
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
347 40daca54 Alex Bligh
         * after some e"real" time, (related to the time left until the next
348 40daca54 Alex Bligh
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
349 40daca54 Alex Bligh
         * This avoids that the warps are visible externally; for example,
350 40daca54 Alex Bligh
         * you will not be sending network packets continuously instead of
351 40daca54 Alex Bligh
         * every 100ms.
352 946fb27c Paolo Bonzini
         */
353 40daca54 Alex Bligh
        timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
354 ac70aafc Alex Bligh
    } else if (deadline == 0) {
355 40daca54 Alex Bligh
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
356 946fb27c Paolo Bonzini
    }
357 946fb27c Paolo Bonzini
}
358 946fb27c Paolo Bonzini
359 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
360 946fb27c Paolo Bonzini
    .name = "timer",
361 946fb27c Paolo Bonzini
    .version_id = 2,
362 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
363 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
364 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
365 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
366 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
367 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
368 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
369 946fb27c Paolo Bonzini
    }
370 946fb27c Paolo Bonzini
};
371 946fb27c Paolo Bonzini
372 946fb27c Paolo Bonzini
void configure_icount(const char *option)
373 946fb27c Paolo Bonzini
{
374 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
375 946fb27c Paolo Bonzini
    if (!option) {
376 946fb27c Paolo Bonzini
        return;
377 946fb27c Paolo Bonzini
    }
378 946fb27c Paolo Bonzini
379 40daca54 Alex Bligh
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
380 40daca54 Alex Bligh
                                          icount_warp_rt, NULL);
381 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
382 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
383 946fb27c Paolo Bonzini
        use_icount = 1;
384 946fb27c Paolo Bonzini
        return;
385 946fb27c Paolo Bonzini
    }
386 946fb27c Paolo Bonzini
387 946fb27c Paolo Bonzini
    use_icount = 2;
388 946fb27c Paolo Bonzini
389 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
390 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
391 946fb27c Paolo Bonzini
    icount_time_shift = 3;
392 946fb27c Paolo Bonzini
393 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
394 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
395 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
396 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
397 946fb27c Paolo Bonzini
       than VM triggers.  */
398 40daca54 Alex Bligh
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
399 40daca54 Alex Bligh
                                        icount_adjust_rt, NULL);
400 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
401 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
402 40daca54 Alex Bligh
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
403 40daca54 Alex Bligh
                                        icount_adjust_vm, NULL);
404 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
405 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
406 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
407 946fb27c Paolo Bonzini
}
408 946fb27c Paolo Bonzini
409 946fb27c Paolo Bonzini
/***********************************************************/
410 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
411 296af7c9 Blue Swirl
{
412 296af7c9 Blue Swirl
    va_list ap;
413 55e5c285 Andreas Färber
    CPUState *cpu;
414 296af7c9 Blue Swirl
415 296af7c9 Blue Swirl
    va_start(ap, fmt);
416 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
417 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
418 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
419 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
420 55e5c285 Andreas Färber
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
421 878096ee Andreas Färber
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
422 296af7c9 Blue Swirl
    }
423 296af7c9 Blue Swirl
    va_end(ap);
424 296af7c9 Blue Swirl
    abort();
425 296af7c9 Blue Swirl
}
426 296af7c9 Blue Swirl
427 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
428 296af7c9 Blue Swirl
{
429 182735ef Andreas Färber
    CPUState *cpu;
430 296af7c9 Blue Swirl
431 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
432 182735ef Andreas Färber
        cpu_synchronize_state(cpu);
433 296af7c9 Blue Swirl
    }
434 296af7c9 Blue Swirl
}
435 296af7c9 Blue Swirl
436 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
437 296af7c9 Blue Swirl
{
438 182735ef Andreas Färber
    CPUState *cpu;
439 296af7c9 Blue Swirl
440 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
441 182735ef Andreas Färber
        cpu_synchronize_post_reset(cpu);
442 296af7c9 Blue Swirl
    }
443 296af7c9 Blue Swirl
}
444 296af7c9 Blue Swirl
445 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
446 296af7c9 Blue Swirl
{
447 182735ef Andreas Färber
    CPUState *cpu;
448 296af7c9 Blue Swirl
449 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
450 182735ef Andreas Färber
        cpu_synchronize_post_init(cpu);
451 296af7c9 Blue Swirl
    }
452 296af7c9 Blue Swirl
}
453 296af7c9 Blue Swirl
454 56983463 Kevin Wolf
static int do_vm_stop(RunState state)
455 296af7c9 Blue Swirl
{
456 56983463 Kevin Wolf
    int ret = 0;
457 56983463 Kevin Wolf
458 1354869c Luiz Capitulino
    if (runstate_is_running()) {
459 296af7c9 Blue Swirl
        cpu_disable_ticks();
460 296af7c9 Blue Swirl
        pause_all_vcpus();
461 f5bbfba1 Luiz Capitulino
        runstate_set(state);
462 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
463 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
464 296af7c9 Blue Swirl
    }
465 56983463 Kevin Wolf
466 594a45ce Kevin Wolf
    bdrv_drain_all();
467 594a45ce Kevin Wolf
    ret = bdrv_flush_all();
468 594a45ce Kevin Wolf
469 56983463 Kevin Wolf
    return ret;
470 296af7c9 Blue Swirl
}
471 296af7c9 Blue Swirl
472 a1fcaa73 Andreas Färber
static bool cpu_can_run(CPUState *cpu)
473 296af7c9 Blue Swirl
{
474 4fdeee7c Andreas Färber
    if (cpu->stop) {
475 a1fcaa73 Andreas Färber
        return false;
476 0ab07c62 Jan Kiszka
    }
477 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
478 a1fcaa73 Andreas Färber
        return false;
479 0ab07c62 Jan Kiszka
    }
480 a1fcaa73 Andreas Färber
    return true;
481 296af7c9 Blue Swirl
}
482 296af7c9 Blue Swirl
483 91325046 Andreas Färber
static void cpu_handle_guest_debug(CPUState *cpu)
484 83f338f7 Jan Kiszka
{
485 64f6b346 Andreas Färber
    gdb_set_stop_cpu(cpu);
486 8cf71710 Jan Kiszka
    qemu_system_debug_request();
487 f324e766 Andreas Färber
    cpu->stopped = true;
488 3c638d06 Jan Kiszka
}
489 3c638d06 Jan Kiszka
490 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
491 714bd040 Paolo Bonzini
{
492 4917cf44 Andreas Färber
    if (current_cpu) {
493 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
494 714bd040 Paolo Bonzini
    }
495 714bd040 Paolo Bonzini
    exit_request = 1;
496 714bd040 Paolo Bonzini
}
497 714bd040 Paolo Bonzini
498 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
499 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
500 6d9cb73c Jan Kiszka
{
501 6d9cb73c Jan Kiszka
    sigset_t set;
502 6d9cb73c Jan Kiszka
    struct sigaction action;
503 6d9cb73c Jan Kiszka
504 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
505 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
506 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
507 6d9cb73c Jan Kiszka
        raise(SIGBUS);
508 6d9cb73c Jan Kiszka
        sigemptyset(&set);
509 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
510 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
511 6d9cb73c Jan Kiszka
    }
512 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
513 6d9cb73c Jan Kiszka
    abort();
514 6d9cb73c Jan Kiszka
}
515 6d9cb73c Jan Kiszka
516 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
517 6d9cb73c Jan Kiszka
                           void *ctx)
518 6d9cb73c Jan Kiszka
{
519 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
520 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
521 6d9cb73c Jan Kiszka
        sigbus_reraise();
522 6d9cb73c Jan Kiszka
    }
523 6d9cb73c Jan Kiszka
}
524 6d9cb73c Jan Kiszka
525 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
526 6d9cb73c Jan Kiszka
{
527 6d9cb73c Jan Kiszka
    struct sigaction action;
528 6d9cb73c Jan Kiszka
529 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
530 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
531 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
532 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
533 6d9cb73c Jan Kiszka
534 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
535 6d9cb73c Jan Kiszka
}
536 6d9cb73c Jan Kiszka
537 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
538 1ab3c6c0 Jan Kiszka
{
539 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
540 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
541 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
542 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
543 1ab3c6c0 Jan Kiszka
    int r;
544 1ab3c6c0 Jan Kiszka
545 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
546 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
547 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
548 1ab3c6c0 Jan Kiszka
549 1ab3c6c0 Jan Kiszka
    do {
550 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
551 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
552 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
553 1ab3c6c0 Jan Kiszka
            exit(1);
554 1ab3c6c0 Jan Kiszka
        }
555 1ab3c6c0 Jan Kiszka
556 1ab3c6c0 Jan Kiszka
        switch (r) {
557 1ab3c6c0 Jan Kiszka
        case SIGBUS:
558 290adf38 Andreas Färber
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
559 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
560 1ab3c6c0 Jan Kiszka
            }
561 1ab3c6c0 Jan Kiszka
            break;
562 1ab3c6c0 Jan Kiszka
        default:
563 1ab3c6c0 Jan Kiszka
            break;
564 1ab3c6c0 Jan Kiszka
        }
565 1ab3c6c0 Jan Kiszka
566 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
567 1ab3c6c0 Jan Kiszka
        if (r == -1) {
568 1ab3c6c0 Jan Kiszka
            perror("sigpending");
569 1ab3c6c0 Jan Kiszka
            exit(1);
570 1ab3c6c0 Jan Kiszka
        }
571 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
572 1ab3c6c0 Jan Kiszka
}
573 1ab3c6c0 Jan Kiszka
574 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
575 6d9cb73c Jan Kiszka
576 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
577 6d9cb73c Jan Kiszka
{
578 6d9cb73c Jan Kiszka
}
579 1ab3c6c0 Jan Kiszka
580 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
581 1ab3c6c0 Jan Kiszka
{
582 1ab3c6c0 Jan Kiszka
}
583 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
584 6d9cb73c Jan Kiszka
585 296af7c9 Blue Swirl
#ifndef _WIN32
586 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
587 55f8d6ac Jan Kiszka
{
588 55f8d6ac Jan Kiszka
}
589 55f8d6ac Jan Kiszka
590 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
591 714bd040 Paolo Bonzini
{
592 714bd040 Paolo Bonzini
    int r;
593 714bd040 Paolo Bonzini
    sigset_t set;
594 714bd040 Paolo Bonzini
    struct sigaction sigact;
595 714bd040 Paolo Bonzini
596 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
597 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
598 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
599 714bd040 Paolo Bonzini
600 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
601 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
602 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
603 491d6e80 Andreas Färber
    r = kvm_set_signal_mask(cpu, &set);
604 714bd040 Paolo Bonzini
    if (r) {
605 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
606 714bd040 Paolo Bonzini
        exit(1);
607 714bd040 Paolo Bonzini
    }
608 714bd040 Paolo Bonzini
}
609 714bd040 Paolo Bonzini
610 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
611 714bd040 Paolo Bonzini
{
612 714bd040 Paolo Bonzini
    sigset_t set;
613 714bd040 Paolo Bonzini
    struct sigaction sigact;
614 714bd040 Paolo Bonzini
615 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
616 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
617 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
618 714bd040 Paolo Bonzini
619 714bd040 Paolo Bonzini
    sigemptyset(&set);
620 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
621 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
622 714bd040 Paolo Bonzini
}
623 714bd040 Paolo Bonzini
624 55f8d6ac Jan Kiszka
#else /* _WIN32 */
625 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
626 ff48eb5f Jan Kiszka
{
627 714bd040 Paolo Bonzini
    abort();
628 714bd040 Paolo Bonzini
}
629 ff48eb5f Jan Kiszka
630 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
631 714bd040 Paolo Bonzini
{
632 ff48eb5f Jan Kiszka
}
633 714bd040 Paolo Bonzini
#endif /* _WIN32 */
634 ff48eb5f Jan Kiszka
635 b2532d88 Stefan Weil
static QemuMutex qemu_global_mutex;
636 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
637 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
638 296af7c9 Blue Swirl
639 296af7c9 Blue Swirl
static QemuThread io_thread;
640 296af7c9 Blue Swirl
641 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
642 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
643 296af7c9 Blue Swirl
644 296af7c9 Blue Swirl
/* cpu creation */
645 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
646 296af7c9 Blue Swirl
/* system init */
647 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
648 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
649 296af7c9 Blue Swirl
650 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
651 296af7c9 Blue Swirl
{
652 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
653 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
654 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
655 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
656 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
657 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
658 296af7c9 Blue Swirl
659 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
660 296af7c9 Blue Swirl
}
661 296af7c9 Blue Swirl
662 f100f0b3 Andreas Färber
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
663 e82bcec2 Marcelo Tosatti
{
664 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
665 e82bcec2 Marcelo Tosatti
666 60e82579 Andreas Färber
    if (qemu_cpu_is_self(cpu)) {
667 e82bcec2 Marcelo Tosatti
        func(data);
668 e82bcec2 Marcelo Tosatti
        return;
669 e82bcec2 Marcelo Tosatti
    }
670 e82bcec2 Marcelo Tosatti
671 e82bcec2 Marcelo Tosatti
    wi.func = func;
672 e82bcec2 Marcelo Tosatti
    wi.data = data;
673 3c02270d Chegu Vinod
    wi.free = false;
674 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
675 c64ca814 Andreas Färber
        cpu->queued_work_first = &wi;
676 0ab07c62 Jan Kiszka
    } else {
677 c64ca814 Andreas Färber
        cpu->queued_work_last->next = &wi;
678 0ab07c62 Jan Kiszka
    }
679 c64ca814 Andreas Färber
    cpu->queued_work_last = &wi;
680 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
681 e82bcec2 Marcelo Tosatti
    wi.done = false;
682 e82bcec2 Marcelo Tosatti
683 c08d7424 Andreas Färber
    qemu_cpu_kick(cpu);
684 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
685 4917cf44 Andreas Färber
        CPUState *self_cpu = current_cpu;
686 e82bcec2 Marcelo Tosatti
687 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
688 4917cf44 Andreas Färber
        current_cpu = self_cpu;
689 e82bcec2 Marcelo Tosatti
    }
690 e82bcec2 Marcelo Tosatti
}
691 e82bcec2 Marcelo Tosatti
692 3c02270d Chegu Vinod
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
693 3c02270d Chegu Vinod
{
694 3c02270d Chegu Vinod
    struct qemu_work_item *wi;
695 3c02270d Chegu Vinod
696 3c02270d Chegu Vinod
    if (qemu_cpu_is_self(cpu)) {
697 3c02270d Chegu Vinod
        func(data);
698 3c02270d Chegu Vinod
        return;
699 3c02270d Chegu Vinod
    }
700 3c02270d Chegu Vinod
701 3c02270d Chegu Vinod
    wi = g_malloc0(sizeof(struct qemu_work_item));
702 3c02270d Chegu Vinod
    wi->func = func;
703 3c02270d Chegu Vinod
    wi->data = data;
704 3c02270d Chegu Vinod
    wi->free = true;
705 3c02270d Chegu Vinod
    if (cpu->queued_work_first == NULL) {
706 3c02270d Chegu Vinod
        cpu->queued_work_first = wi;
707 3c02270d Chegu Vinod
    } else {
708 3c02270d Chegu Vinod
        cpu->queued_work_last->next = wi;
709 3c02270d Chegu Vinod
    }
710 3c02270d Chegu Vinod
    cpu->queued_work_last = wi;
711 3c02270d Chegu Vinod
    wi->next = NULL;
712 3c02270d Chegu Vinod
    wi->done = false;
713 3c02270d Chegu Vinod
714 3c02270d Chegu Vinod
    qemu_cpu_kick(cpu);
715 3c02270d Chegu Vinod
}
716 3c02270d Chegu Vinod
717 6d45b109 Andreas Färber
static void flush_queued_work(CPUState *cpu)
718 e82bcec2 Marcelo Tosatti
{
719 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
720 e82bcec2 Marcelo Tosatti
721 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
722 e82bcec2 Marcelo Tosatti
        return;
723 0ab07c62 Jan Kiszka
    }
724 e82bcec2 Marcelo Tosatti
725 c64ca814 Andreas Färber
    while ((wi = cpu->queued_work_first)) {
726 c64ca814 Andreas Färber
        cpu->queued_work_first = wi->next;
727 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
728 e82bcec2 Marcelo Tosatti
        wi->done = true;
729 3c02270d Chegu Vinod
        if (wi->free) {
730 3c02270d Chegu Vinod
            g_free(wi);
731 3c02270d Chegu Vinod
        }
732 e82bcec2 Marcelo Tosatti
    }
733 c64ca814 Andreas Färber
    cpu->queued_work_last = NULL;
734 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
735 e82bcec2 Marcelo Tosatti
}
736 e82bcec2 Marcelo Tosatti
737 509a0d78 Andreas Färber
static void qemu_wait_io_event_common(CPUState *cpu)
738 296af7c9 Blue Swirl
{
739 4fdeee7c Andreas Färber
    if (cpu->stop) {
740 4fdeee7c Andreas Färber
        cpu->stop = false;
741 f324e766 Andreas Färber
        cpu->stopped = true;
742 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
743 296af7c9 Blue Swirl
    }
744 6d45b109 Andreas Färber
    flush_queued_work(cpu);
745 216fc9a4 Andreas Färber
    cpu->thread_kicked = false;
746 296af7c9 Blue Swirl
}
747 296af7c9 Blue Swirl
748 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
749 296af7c9 Blue Swirl
{
750 182735ef Andreas Färber
    CPUState *cpu;
751 6cabe1f3 Jan Kiszka
752 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
753 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
754 ab33fcda Paolo Bonzini
          are idle.  */
755 40daca54 Alex Bligh
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
756 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
757 16400322 Jan Kiszka
    }
758 296af7c9 Blue Swirl
759 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
760 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
761 46daff13 Paolo Bonzini
    }
762 6cabe1f3 Jan Kiszka
763 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
764 182735ef Andreas Färber
        qemu_wait_io_event_common(cpu);
765 6cabe1f3 Jan Kiszka
    }
766 296af7c9 Blue Swirl
}
767 296af7c9 Blue Swirl
768 fd529e8f Andreas Färber
static void qemu_kvm_wait_io_event(CPUState *cpu)
769 296af7c9 Blue Swirl
{
770 a98ae1d8 Andreas Färber
    while (cpu_thread_is_idle(cpu)) {
771 f5c121b8 Andreas Färber
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
772 16400322 Jan Kiszka
    }
773 296af7c9 Blue Swirl
774 290adf38 Andreas Färber
    qemu_kvm_eat_signals(cpu);
775 509a0d78 Andreas Färber
    qemu_wait_io_event_common(cpu);
776 296af7c9 Blue Swirl
}
777 296af7c9 Blue Swirl
778 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
779 296af7c9 Blue Swirl
{
780 48a106bd Andreas Färber
    CPUState *cpu = arg;
781 84b4915d Jan Kiszka
    int r;
782 296af7c9 Blue Swirl
783 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
784 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
785 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
786 4917cf44 Andreas Färber
    current_cpu = cpu;
787 296af7c9 Blue Swirl
788 504134d2 Andreas Färber
    r = kvm_init_vcpu(cpu);
789 84b4915d Jan Kiszka
    if (r < 0) {
790 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
791 84b4915d Jan Kiszka
        exit(1);
792 84b4915d Jan Kiszka
    }
793 296af7c9 Blue Swirl
794 13618e05 Andreas Färber
    qemu_kvm_init_cpu_signals(cpu);
795 296af7c9 Blue Swirl
796 296af7c9 Blue Swirl
    /* signal CPU creation */
797 61a46217 Andreas Färber
    cpu->created = true;
798 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
799 296af7c9 Blue Swirl
800 296af7c9 Blue Swirl
    while (1) {
801 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
802 1458c363 Andreas Färber
            r = kvm_cpu_exec(cpu);
803 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
804 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
805 83f338f7 Jan Kiszka
            }
806 0ab07c62 Jan Kiszka
        }
807 fd529e8f Andreas Färber
        qemu_kvm_wait_io_event(cpu);
808 296af7c9 Blue Swirl
    }
809 296af7c9 Blue Swirl
810 296af7c9 Blue Swirl
    return NULL;
811 296af7c9 Blue Swirl
}
812 296af7c9 Blue Swirl
813 c7f0f3b1 Anthony Liguori
static void *qemu_dummy_cpu_thread_fn(void *arg)
814 c7f0f3b1 Anthony Liguori
{
815 c7f0f3b1 Anthony Liguori
#ifdef _WIN32
816 c7f0f3b1 Anthony Liguori
    fprintf(stderr, "qtest is not supported under Windows\n");
817 c7f0f3b1 Anthony Liguori
    exit(1);
818 c7f0f3b1 Anthony Liguori
#else
819 10a9021d Andreas Färber
    CPUState *cpu = arg;
820 c7f0f3b1 Anthony Liguori
    sigset_t waitset;
821 c7f0f3b1 Anthony Liguori
    int r;
822 c7f0f3b1 Anthony Liguori
823 c7f0f3b1 Anthony Liguori
    qemu_mutex_lock_iothread();
824 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
825 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
826 c7f0f3b1 Anthony Liguori
827 c7f0f3b1 Anthony Liguori
    sigemptyset(&waitset);
828 c7f0f3b1 Anthony Liguori
    sigaddset(&waitset, SIG_IPI);
829 c7f0f3b1 Anthony Liguori
830 c7f0f3b1 Anthony Liguori
    /* signal CPU creation */
831 61a46217 Andreas Färber
    cpu->created = true;
832 c7f0f3b1 Anthony Liguori
    qemu_cond_signal(&qemu_cpu_cond);
833 c7f0f3b1 Anthony Liguori
834 4917cf44 Andreas Färber
    current_cpu = cpu;
835 c7f0f3b1 Anthony Liguori
    while (1) {
836 4917cf44 Andreas Färber
        current_cpu = NULL;
837 c7f0f3b1 Anthony Liguori
        qemu_mutex_unlock_iothread();
838 c7f0f3b1 Anthony Liguori
        do {
839 c7f0f3b1 Anthony Liguori
            int sig;
840 c7f0f3b1 Anthony Liguori
            r = sigwait(&waitset, &sig);
841 c7f0f3b1 Anthony Liguori
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
842 c7f0f3b1 Anthony Liguori
        if (r == -1) {
843 c7f0f3b1 Anthony Liguori
            perror("sigwait");
844 c7f0f3b1 Anthony Liguori
            exit(1);
845 c7f0f3b1 Anthony Liguori
        }
846 c7f0f3b1 Anthony Liguori
        qemu_mutex_lock_iothread();
847 4917cf44 Andreas Färber
        current_cpu = cpu;
848 509a0d78 Andreas Färber
        qemu_wait_io_event_common(cpu);
849 c7f0f3b1 Anthony Liguori
    }
850 c7f0f3b1 Anthony Liguori
851 c7f0f3b1 Anthony Liguori
    return NULL;
852 c7f0f3b1 Anthony Liguori
#endif
853 c7f0f3b1 Anthony Liguori
}
854 c7f0f3b1 Anthony Liguori
855 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
856 bdb7ca67 Jan Kiszka
857 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
858 296af7c9 Blue Swirl
{
859 c3586ba7 Andreas Färber
    CPUState *cpu = arg;
860 296af7c9 Blue Swirl
861 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
862 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
863 296af7c9 Blue Swirl
864 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
865 38fcbd3f Andreas Färber
    CPU_FOREACH(cpu) {
866 38fcbd3f Andreas Färber
        cpu->thread_id = qemu_get_thread_id();
867 38fcbd3f Andreas Färber
        cpu->created = true;
868 38fcbd3f Andreas Färber
    }
869 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
870 296af7c9 Blue Swirl
871 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
872 bdc44640 Andreas Färber
    while (QTAILQ_FIRST(&cpus)->stopped) {
873 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
874 8e564b4e Jan Kiszka
875 8e564b4e Jan Kiszka
        /* process any pending work */
876 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
877 182735ef Andreas Färber
            qemu_wait_io_event_common(cpu);
878 8e564b4e Jan Kiszka
        }
879 0ab07c62 Jan Kiszka
    }
880 296af7c9 Blue Swirl
881 296af7c9 Blue Swirl
    while (1) {
882 bdb7ca67 Jan Kiszka
        tcg_exec_all();
883 ac70aafc Alex Bligh
884 ac70aafc Alex Bligh
        if (use_icount) {
885 40daca54 Alex Bligh
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
886 ac70aafc Alex Bligh
887 ac70aafc Alex Bligh
            if (deadline == 0) {
888 40daca54 Alex Bligh
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
889 ac70aafc Alex Bligh
            }
890 3b2319a3 Paolo Bonzini
        }
891 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
892 296af7c9 Blue Swirl
    }
893 296af7c9 Blue Swirl
894 296af7c9 Blue Swirl
    return NULL;
895 296af7c9 Blue Swirl
}
896 296af7c9 Blue Swirl
897 2ff09a40 Andreas Färber
static void qemu_cpu_kick_thread(CPUState *cpu)
898 cc015e9a Paolo Bonzini
{
899 cc015e9a Paolo Bonzini
#ifndef _WIN32
900 cc015e9a Paolo Bonzini
    int err;
901 cc015e9a Paolo Bonzini
902 814e612e Andreas Färber
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
903 cc015e9a Paolo Bonzini
    if (err) {
904 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
905 cc015e9a Paolo Bonzini
        exit(1);
906 cc015e9a Paolo Bonzini
    }
907 cc015e9a Paolo Bonzini
#else /* _WIN32 */
908 60e82579 Andreas Färber
    if (!qemu_cpu_is_self(cpu)) {
909 ed9164a3 Olivier Hainque
        CONTEXT tcgContext;
910 ed9164a3 Olivier Hainque
911 ed9164a3 Olivier Hainque
        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
912 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
913 ed9164a3 Olivier Hainque
                    GetLastError());
914 ed9164a3 Olivier Hainque
            exit(1);
915 ed9164a3 Olivier Hainque
        }
916 ed9164a3 Olivier Hainque
917 ed9164a3 Olivier Hainque
        /* On multi-core systems, we are not sure that the thread is actually
918 ed9164a3 Olivier Hainque
         * suspended until we can get the context.
919 ed9164a3 Olivier Hainque
         */
920 ed9164a3 Olivier Hainque
        tcgContext.ContextFlags = CONTEXT_CONTROL;
921 ed9164a3 Olivier Hainque
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
922 ed9164a3 Olivier Hainque
            continue;
923 ed9164a3 Olivier Hainque
        }
924 ed9164a3 Olivier Hainque
925 cc015e9a Paolo Bonzini
        cpu_signal(0);
926 ed9164a3 Olivier Hainque
927 ed9164a3 Olivier Hainque
        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
928 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
929 ed9164a3 Olivier Hainque
                    GetLastError());
930 ed9164a3 Olivier Hainque
            exit(1);
931 ed9164a3 Olivier Hainque
        }
932 cc015e9a Paolo Bonzini
    }
933 cc015e9a Paolo Bonzini
#endif
934 cc015e9a Paolo Bonzini
}
935 cc015e9a Paolo Bonzini
936 c08d7424 Andreas Färber
void qemu_cpu_kick(CPUState *cpu)
937 296af7c9 Blue Swirl
{
938 f5c121b8 Andreas Färber
    qemu_cond_broadcast(cpu->halt_cond);
939 216fc9a4 Andreas Färber
    if (!tcg_enabled() && !cpu->thread_kicked) {
940 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu);
941 216fc9a4 Andreas Färber
        cpu->thread_kicked = true;
942 aa2c364b Jan Kiszka
    }
943 296af7c9 Blue Swirl
}
944 296af7c9 Blue Swirl
945 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
946 296af7c9 Blue Swirl
{
947 b55c22c6 Paolo Bonzini
#ifndef _WIN32
948 4917cf44 Andreas Färber
    assert(current_cpu);
949 296af7c9 Blue Swirl
950 4917cf44 Andreas Färber
    if (!current_cpu->thread_kicked) {
951 4917cf44 Andreas Färber
        qemu_cpu_kick_thread(current_cpu);
952 4917cf44 Andreas Färber
        current_cpu->thread_kicked = true;
953 296af7c9 Blue Swirl
    }
954 b55c22c6 Paolo Bonzini
#else
955 b55c22c6 Paolo Bonzini
    abort();
956 b55c22c6 Paolo Bonzini
#endif
957 296af7c9 Blue Swirl
}
958 296af7c9 Blue Swirl
959 60e82579 Andreas Färber
bool qemu_cpu_is_self(CPUState *cpu)
960 296af7c9 Blue Swirl
{
961 814e612e Andreas Färber
    return qemu_thread_is_self(cpu->thread);
962 296af7c9 Blue Swirl
}
963 296af7c9 Blue Swirl
964 aa723c23 Juan Quintela
static bool qemu_in_vcpu_thread(void)
965 aa723c23 Juan Quintela
{
966 4917cf44 Andreas Färber
    return current_cpu && qemu_cpu_is_self(current_cpu);
967 aa723c23 Juan Quintela
}
968 aa723c23 Juan Quintela
969 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
970 296af7c9 Blue Swirl
{
971 c7f0f3b1 Anthony Liguori
    if (!tcg_enabled()) {
972 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
973 1a28cac3 Marcelo Tosatti
    } else {
974 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
975 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
976 182735ef Andreas Färber
            qemu_cpu_kick_thread(first_cpu);
977 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
978 1a28cac3 Marcelo Tosatti
        }
979 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
980 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
981 1a28cac3 Marcelo Tosatti
    }
982 296af7c9 Blue Swirl
}
983 296af7c9 Blue Swirl
984 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
985 296af7c9 Blue Swirl
{
986 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
987 296af7c9 Blue Swirl
}
988 296af7c9 Blue Swirl
989 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
990 296af7c9 Blue Swirl
{
991 bdc44640 Andreas Färber
    CPUState *cpu;
992 296af7c9 Blue Swirl
993 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
994 182735ef Andreas Färber
        if (!cpu->stopped) {
995 296af7c9 Blue Swirl
            return 0;
996 0ab07c62 Jan Kiszka
        }
997 296af7c9 Blue Swirl
    }
998 296af7c9 Blue Swirl
999 296af7c9 Blue Swirl
    return 1;
1000 296af7c9 Blue Swirl
}
1001 296af7c9 Blue Swirl
1002 296af7c9 Blue Swirl
void pause_all_vcpus(void)
1003 296af7c9 Blue Swirl
{
1004 bdc44640 Andreas Färber
    CPUState *cpu;
1005 296af7c9 Blue Swirl
1006 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1007 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1008 182735ef Andreas Färber
        cpu->stop = true;
1009 182735ef Andreas Färber
        qemu_cpu_kick(cpu);
1010 296af7c9 Blue Swirl
    }
1011 296af7c9 Blue Swirl
1012 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1013 d798e974 Jan Kiszka
        cpu_stop_current();
1014 d798e974 Jan Kiszka
        if (!kvm_enabled()) {
1015 bdc44640 Andreas Färber
            CPU_FOREACH(cpu) {
1016 182735ef Andreas Färber
                cpu->stop = false;
1017 182735ef Andreas Färber
                cpu->stopped = true;
1018 d798e974 Jan Kiszka
            }
1019 d798e974 Jan Kiszka
            return;
1020 d798e974 Jan Kiszka
        }
1021 d798e974 Jan Kiszka
    }
1022 d798e974 Jan Kiszka
1023 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
1024 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1025 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
1026 182735ef Andreas Färber
            qemu_cpu_kick(cpu);
1027 296af7c9 Blue Swirl
        }
1028 296af7c9 Blue Swirl
    }
1029 296af7c9 Blue Swirl
}
1030 296af7c9 Blue Swirl
1031 2993683b Igor Mammedov
void cpu_resume(CPUState *cpu)
1032 2993683b Igor Mammedov
{
1033 2993683b Igor Mammedov
    cpu->stop = false;
1034 2993683b Igor Mammedov
    cpu->stopped = false;
1035 2993683b Igor Mammedov
    qemu_cpu_kick(cpu);
1036 2993683b Igor Mammedov
}
1037 2993683b Igor Mammedov
1038 296af7c9 Blue Swirl
void resume_all_vcpus(void)
1039 296af7c9 Blue Swirl
{
1040 bdc44640 Andreas Färber
    CPUState *cpu;
1041 296af7c9 Blue Swirl
1042 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1043 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1044 182735ef Andreas Färber
        cpu_resume(cpu);
1045 296af7c9 Blue Swirl
    }
1046 296af7c9 Blue Swirl
}
1047 296af7c9 Blue Swirl
1048 e5ab30a2 Andreas Färber
static void qemu_tcg_init_vcpu(CPUState *cpu)
1049 296af7c9 Blue Swirl
{
1050 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
1051 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
1052 814e612e Andreas Färber
        cpu->thread = g_malloc0(sizeof(QemuThread));
1053 f5c121b8 Andreas Färber
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1054 f5c121b8 Andreas Färber
        qemu_cond_init(cpu->halt_cond);
1055 f5c121b8 Andreas Färber
        tcg_halt_cond = cpu->halt_cond;
1056 c3586ba7 Andreas Färber
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1057 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
1058 1ecf47bf Paolo Bonzini
#ifdef _WIN32
1059 814e612e Andreas Färber
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
1060 1ecf47bf Paolo Bonzini
#endif
1061 61a46217 Andreas Färber
        while (!cpu->created) {
1062 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1063 0ab07c62 Jan Kiszka
        }
1064 814e612e Andreas Färber
        tcg_cpu_thread = cpu->thread;
1065 296af7c9 Blue Swirl
    } else {
1066 814e612e Andreas Färber
        cpu->thread = tcg_cpu_thread;
1067 f5c121b8 Andreas Färber
        cpu->halt_cond = tcg_halt_cond;
1068 296af7c9 Blue Swirl
    }
1069 296af7c9 Blue Swirl
}
1070 296af7c9 Blue Swirl
1071 48a106bd Andreas Färber
static void qemu_kvm_start_vcpu(CPUState *cpu)
1072 296af7c9 Blue Swirl
{
1073 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1074 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1075 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1076 48a106bd Andreas Färber
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
1077 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
1078 61a46217 Andreas Färber
    while (!cpu->created) {
1079 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1080 0ab07c62 Jan Kiszka
    }
1081 296af7c9 Blue Swirl
}
1082 296af7c9 Blue Swirl
1083 10a9021d Andreas Färber
static void qemu_dummy_start_vcpu(CPUState *cpu)
1084 c7f0f3b1 Anthony Liguori
{
1085 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1086 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1087 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1088 10a9021d Andreas Färber
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
1089 c7f0f3b1 Anthony Liguori
                       QEMU_THREAD_JOINABLE);
1090 61a46217 Andreas Färber
    while (!cpu->created) {
1091 c7f0f3b1 Anthony Liguori
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1092 c7f0f3b1 Anthony Liguori
    }
1093 c7f0f3b1 Anthony Liguori
}
1094 c7f0f3b1 Anthony Liguori
1095 c643bed9 Andreas Färber
void qemu_init_vcpu(CPUState *cpu)
1096 296af7c9 Blue Swirl
{
1097 ce3960eb Andreas Färber
    cpu->nr_cores = smp_cores;
1098 ce3960eb Andreas Färber
    cpu->nr_threads = smp_threads;
1099 f324e766 Andreas Färber
    cpu->stopped = true;
1100 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
1101 48a106bd Andreas Färber
        qemu_kvm_start_vcpu(cpu);
1102 c7f0f3b1 Anthony Liguori
    } else if (tcg_enabled()) {
1103 e5ab30a2 Andreas Färber
        qemu_tcg_init_vcpu(cpu);
1104 c7f0f3b1 Anthony Liguori
    } else {
1105 10a9021d Andreas Färber
        qemu_dummy_start_vcpu(cpu);
1106 0ab07c62 Jan Kiszka
    }
1107 296af7c9 Blue Swirl
}
1108 296af7c9 Blue Swirl
1109 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
1110 296af7c9 Blue Swirl
{
1111 4917cf44 Andreas Färber
    if (current_cpu) {
1112 4917cf44 Andreas Färber
        current_cpu->stop = false;
1113 4917cf44 Andreas Färber
        current_cpu->stopped = true;
1114 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
1115 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
1116 b4a3d965 Jan Kiszka
    }
1117 296af7c9 Blue Swirl
}
1118 296af7c9 Blue Swirl
1119 56983463 Kevin Wolf
int vm_stop(RunState state)
1120 296af7c9 Blue Swirl
{
1121 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1122 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
1123 296af7c9 Blue Swirl
        /*
1124 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
1125 296af7c9 Blue Swirl
         * vm_stop() has been requested.
1126 296af7c9 Blue Swirl
         */
1127 b4a3d965 Jan Kiszka
        cpu_stop_current();
1128 56983463 Kevin Wolf
        return 0;
1129 296af7c9 Blue Swirl
    }
1130 56983463 Kevin Wolf
1131 56983463 Kevin Wolf
    return do_vm_stop(state);
1132 296af7c9 Blue Swirl
}
1133 296af7c9 Blue Swirl
1134 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
1135 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
1136 56983463 Kevin Wolf
int vm_stop_force_state(RunState state)
1137 8a9236f1 Luiz Capitulino
{
1138 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
1139 56983463 Kevin Wolf
        return vm_stop(state);
1140 8a9236f1 Luiz Capitulino
    } else {
1141 8a9236f1 Luiz Capitulino
        runstate_set(state);
1142 594a45ce Kevin Wolf
        /* Make sure to return an error if the flush in a previous vm_stop()
1143 594a45ce Kevin Wolf
         * failed. */
1144 594a45ce Kevin Wolf
        return bdrv_flush_all();
1145 8a9236f1 Luiz Capitulino
    }
1146 8a9236f1 Luiz Capitulino
}
1147 8a9236f1 Luiz Capitulino
1148 9349b4f9 Andreas Färber
static int tcg_cpu_exec(CPUArchState *env)
1149 296af7c9 Blue Swirl
{
1150 296af7c9 Blue Swirl
    int ret;
1151 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1152 296af7c9 Blue Swirl
    int64_t ti;
1153 296af7c9 Blue Swirl
#endif
1154 296af7c9 Blue Swirl
1155 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1156 296af7c9 Blue Swirl
    ti = profile_getclock();
1157 296af7c9 Blue Swirl
#endif
1158 296af7c9 Blue Swirl
    if (use_icount) {
1159 296af7c9 Blue Swirl
        int64_t count;
1160 ac70aafc Alex Bligh
        int64_t deadline;
1161 296af7c9 Blue Swirl
        int decr;
1162 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1163 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1164 296af7c9 Blue Swirl
        env->icount_extra = 0;
1165 40daca54 Alex Bligh
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1166 ac70aafc Alex Bligh
1167 ac70aafc Alex Bligh
        /* Maintain prior (possibly buggy) behaviour where if no deadline
1168 40daca54 Alex Bligh
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1169 ac70aafc Alex Bligh
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1170 ac70aafc Alex Bligh
         * nanoseconds.
1171 ac70aafc Alex Bligh
         */
1172 ac70aafc Alex Bligh
        if ((deadline < 0) || (deadline > INT32_MAX)) {
1173 ac70aafc Alex Bligh
            deadline = INT32_MAX;
1174 ac70aafc Alex Bligh
        }
1175 ac70aafc Alex Bligh
1176 ac70aafc Alex Bligh
        count = qemu_icount_round(deadline);
1177 296af7c9 Blue Swirl
        qemu_icount += count;
1178 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1179 296af7c9 Blue Swirl
        count -= decr;
1180 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1181 296af7c9 Blue Swirl
        env->icount_extra = count;
1182 296af7c9 Blue Swirl
    }
1183 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1184 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1185 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1186 296af7c9 Blue Swirl
#endif
1187 296af7c9 Blue Swirl
    if (use_icount) {
1188 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1189 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1190 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1191 296af7c9 Blue Swirl
                        + env->icount_extra);
1192 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1193 296af7c9 Blue Swirl
        env->icount_extra = 0;
1194 296af7c9 Blue Swirl
    }
1195 296af7c9 Blue Swirl
    return ret;
1196 296af7c9 Blue Swirl
}
1197 296af7c9 Blue Swirl
1198 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1199 296af7c9 Blue Swirl
{
1200 9a36085b Jan Kiszka
    int r;
1201 9a36085b Jan Kiszka
1202 40daca54 Alex Bligh
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
1203 40daca54 Alex Bligh
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1204 ab33fcda Paolo Bonzini
1205 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1206 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1207 0ab07c62 Jan Kiszka
    }
1208 bdc44640 Andreas Färber
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1209 182735ef Andreas Färber
        CPUState *cpu = next_cpu;
1210 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1211 296af7c9 Blue Swirl
1212 40daca54 Alex Bligh
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1213 ed2803da Andreas Färber
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1214 296af7c9 Blue Swirl
1215 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
1216 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1217 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1218 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
1219 3c638d06 Jan Kiszka
                break;
1220 3c638d06 Jan Kiszka
            }
1221 f324e766 Andreas Färber
        } else if (cpu->stop || cpu->stopped) {
1222 296af7c9 Blue Swirl
            break;
1223 296af7c9 Blue Swirl
        }
1224 296af7c9 Blue Swirl
    }
1225 c629a4bc Jan Kiszka
    exit_request = 0;
1226 296af7c9 Blue Swirl
}
1227 296af7c9 Blue Swirl
1228 296af7c9 Blue Swirl
void set_numa_modes(void)
1229 296af7c9 Blue Swirl
{
1230 1b1ed8dc Andreas Färber
    CPUState *cpu;
1231 296af7c9 Blue Swirl
    int i;
1232 296af7c9 Blue Swirl
1233 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1234 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1235 55e5c285 Andreas Färber
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1236 1b1ed8dc Andreas Färber
                cpu->numa_node = i;
1237 296af7c9 Blue Swirl
            }
1238 296af7c9 Blue Swirl
        }
1239 296af7c9 Blue Swirl
    }
1240 296af7c9 Blue Swirl
}
1241 296af7c9 Blue Swirl
1242 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1243 262353cb Blue Swirl
{
1244 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1245 e916cbf8 Peter Maydell
#if defined(cpu_list)
1246 e916cbf8 Peter Maydell
    cpu_list(f, cpu_fprintf);
1247 262353cb Blue Swirl
#endif
1248 262353cb Blue Swirl
}
1249 de0b36b6 Luiz Capitulino
1250 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1251 de0b36b6 Luiz Capitulino
{
1252 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1253 182735ef Andreas Färber
    CPUState *cpu;
1254 de0b36b6 Luiz Capitulino
1255 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1256 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1257 182735ef Andreas Färber
#if defined(TARGET_I386)
1258 182735ef Andreas Färber
        X86CPU *x86_cpu = X86_CPU(cpu);
1259 182735ef Andreas Färber
        CPUX86State *env = &x86_cpu->env;
1260 182735ef Andreas Färber
#elif defined(TARGET_PPC)
1261 182735ef Andreas Färber
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1262 182735ef Andreas Färber
        CPUPPCState *env = &ppc_cpu->env;
1263 182735ef Andreas Färber
#elif defined(TARGET_SPARC)
1264 182735ef Andreas Färber
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1265 182735ef Andreas Färber
        CPUSPARCState *env = &sparc_cpu->env;
1266 182735ef Andreas Färber
#elif defined(TARGET_MIPS)
1267 182735ef Andreas Färber
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1268 182735ef Andreas Färber
        CPUMIPSState *env = &mips_cpu->env;
1269 182735ef Andreas Färber
#endif
1270 de0b36b6 Luiz Capitulino
1271 cb446eca Andreas Färber
        cpu_synchronize_state(cpu);
1272 de0b36b6 Luiz Capitulino
1273 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1274 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1275 55e5c285 Andreas Färber
        info->value->CPU = cpu->cpu_index;
1276 182735ef Andreas Färber
        info->value->current = (cpu == first_cpu);
1277 259186a7 Andreas Färber
        info->value->halted = cpu->halted;
1278 9f09e18a Andreas Färber
        info->value->thread_id = cpu->thread_id;
1279 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1280 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1281 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1282 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1283 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1284 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1285 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1286 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1287 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1288 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1289 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1290 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1291 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1292 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1293 de0b36b6 Luiz Capitulino
#endif
1294 de0b36b6 Luiz Capitulino
1295 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1296 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1297 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1298 de0b36b6 Luiz Capitulino
        } else {
1299 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1300 de0b36b6 Luiz Capitulino
            cur_item = info;
1301 de0b36b6 Luiz Capitulino
        }
1302 de0b36b6 Luiz Capitulino
    }
1303 de0b36b6 Luiz Capitulino
1304 de0b36b6 Luiz Capitulino
    return head;
1305 de0b36b6 Luiz Capitulino
}
1306 0cfd6a9a Luiz Capitulino
1307 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1308 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1309 0cfd6a9a Luiz Capitulino
{
1310 0cfd6a9a Luiz Capitulino
    FILE *f;
1311 0cfd6a9a Luiz Capitulino
    uint32_t l;
1312 55e5c285 Andreas Färber
    CPUState *cpu;
1313 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1314 0cfd6a9a Luiz Capitulino
1315 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1316 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1317 0cfd6a9a Luiz Capitulino
    }
1318 0cfd6a9a Luiz Capitulino
1319 151d1322 Andreas Färber
    cpu = qemu_get_cpu(cpu_index);
1320 151d1322 Andreas Färber
    if (cpu == NULL) {
1321 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1322 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1323 0cfd6a9a Luiz Capitulino
        return;
1324 0cfd6a9a Luiz Capitulino
    }
1325 0cfd6a9a Luiz Capitulino
1326 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1327 0cfd6a9a Luiz Capitulino
    if (!f) {
1328 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1329 0cfd6a9a Luiz Capitulino
        return;
1330 0cfd6a9a Luiz Capitulino
    }
1331 0cfd6a9a Luiz Capitulino
1332 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1333 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1334 0cfd6a9a Luiz Capitulino
        if (l > size)
1335 0cfd6a9a Luiz Capitulino
            l = size;
1336 f17ec444 Andreas Färber
        cpu_memory_rw_debug(cpu, addr, buf, l, 0);
1337 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1338 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1339 0cfd6a9a Luiz Capitulino
            goto exit;
1340 0cfd6a9a Luiz Capitulino
        }
1341 0cfd6a9a Luiz Capitulino
        addr += l;
1342 0cfd6a9a Luiz Capitulino
        size -= l;
1343 0cfd6a9a Luiz Capitulino
    }
1344 0cfd6a9a Luiz Capitulino
1345 0cfd6a9a Luiz Capitulino
exit:
1346 0cfd6a9a Luiz Capitulino
    fclose(f);
1347 0cfd6a9a Luiz Capitulino
}
1348 6d3962bf Luiz Capitulino
1349 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1350 6d3962bf Luiz Capitulino
                  Error **errp)
1351 6d3962bf Luiz Capitulino
{
1352 6d3962bf Luiz Capitulino
    FILE *f;
1353 6d3962bf Luiz Capitulino
    uint32_t l;
1354 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1355 6d3962bf Luiz Capitulino
1356 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1357 6d3962bf Luiz Capitulino
    if (!f) {
1358 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1359 6d3962bf Luiz Capitulino
        return;
1360 6d3962bf Luiz Capitulino
    }
1361 6d3962bf Luiz Capitulino
1362 6d3962bf Luiz Capitulino
    while (size != 0) {
1363 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1364 6d3962bf Luiz Capitulino
        if (l > size)
1365 6d3962bf Luiz Capitulino
            l = size;
1366 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1367 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1368 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1369 6d3962bf Luiz Capitulino
            goto exit;
1370 6d3962bf Luiz Capitulino
        }
1371 6d3962bf Luiz Capitulino
        addr += l;
1372 6d3962bf Luiz Capitulino
        size -= l;
1373 6d3962bf Luiz Capitulino
    }
1374 6d3962bf Luiz Capitulino
1375 6d3962bf Luiz Capitulino
exit:
1376 6d3962bf Luiz Capitulino
    fclose(f);
1377 6d3962bf Luiz Capitulino
}
1378 ab49ab5c Luiz Capitulino
1379 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1380 ab49ab5c Luiz Capitulino
{
1381 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1382 182735ef Andreas Färber
    CPUState *cs;
1383 182735ef Andreas Färber
1384 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1385 182735ef Andreas Färber
        X86CPU *cpu = X86_CPU(cs);
1386 182735ef Andreas Färber
        CPUX86State *env = &cpu->env;
1387 ab49ab5c Luiz Capitulino
1388 02c09195 Jan Kiszka
        if (!env->apic_state) {
1389 182735ef Andreas Färber
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1390 02c09195 Jan Kiszka
        } else {
1391 02c09195 Jan Kiszka
            apic_deliver_nmi(env->apic_state);
1392 02c09195 Jan Kiszka
        }
1393 ab49ab5c Luiz Capitulino
    }
1394 7f7f9752 Eugene (jno) Dvurechenski
#elif defined(TARGET_S390X)
1395 7f7f9752 Eugene (jno) Dvurechenski
    CPUState *cs;
1396 7f7f9752 Eugene (jno) Dvurechenski
    S390CPU *cpu;
1397 7f7f9752 Eugene (jno) Dvurechenski
1398 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1399 7f7f9752 Eugene (jno) Dvurechenski
        cpu = S390_CPU(cs);
1400 7f7f9752 Eugene (jno) Dvurechenski
        if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1401 7f7f9752 Eugene (jno) Dvurechenski
            if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1402 7f7f9752 Eugene (jno) Dvurechenski
                error_set(errp, QERR_UNSUPPORTED);
1403 7f7f9752 Eugene (jno) Dvurechenski
                return;
1404 7f7f9752 Eugene (jno) Dvurechenski
            }
1405 7f7f9752 Eugene (jno) Dvurechenski
            break;
1406 7f7f9752 Eugene (jno) Dvurechenski
        }
1407 7f7f9752 Eugene (jno) Dvurechenski
    }
1408 ab49ab5c Luiz Capitulino
#else
1409 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1410 ab49ab5c Luiz Capitulino
#endif
1411 ab49ab5c Luiz Capitulino
}