Statistics
| Branch: | Revision:

root / cpus.c @ 8ed961d9

History | View | Annotate | Download (36.1 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
29 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
30 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/qtest.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
39 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
40 cb365646 Liu Ping Fan
#include "qemu/seqlock.h"
41 0ff0fc19 Jan Kiszka
42 0ff0fc19 Jan Kiszka
#ifndef _WIN32
43 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
44 0ff0fc19 Jan Kiszka
#endif
45 296af7c9 Blue Swirl
46 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
47 6d9cb73c Jan Kiszka
48 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
49 6d9cb73c Jan Kiszka
50 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
51 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
52 c0532a76 Marcelo Tosatti
#endif
53 c0532a76 Marcelo Tosatti
54 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
55 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
56 6d9cb73c Jan Kiszka
#endif
57 6d9cb73c Jan Kiszka
58 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
59 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
60 6d9cb73c Jan Kiszka
#endif
61 6d9cb73c Jan Kiszka
62 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
63 6d9cb73c Jan Kiszka
64 182735ef Andreas Färber
static CPUState *next_cpu;
65 296af7c9 Blue Swirl
66 321bc0b2 Tiejun Chen
bool cpu_is_stopped(CPUState *cpu)
67 321bc0b2 Tiejun Chen
{
68 321bc0b2 Tiejun Chen
    return cpu->stopped || !runstate_is_running();
69 321bc0b2 Tiejun Chen
}
70 321bc0b2 Tiejun Chen
71 a98ae1d8 Andreas Färber
static bool cpu_thread_is_idle(CPUState *cpu)
72 ac873f1e Peter Maydell
{
73 c64ca814 Andreas Färber
    if (cpu->stop || cpu->queued_work_first) {
74 ac873f1e Peter Maydell
        return false;
75 ac873f1e Peter Maydell
    }
76 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
77 ac873f1e Peter Maydell
        return true;
78 ac873f1e Peter Maydell
    }
79 259186a7 Andreas Färber
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
80 215e79c0 Alexander Graf
        kvm_halt_in_kernel()) {
81 ac873f1e Peter Maydell
        return false;
82 ac873f1e Peter Maydell
    }
83 ac873f1e Peter Maydell
    return true;
84 ac873f1e Peter Maydell
}
85 ac873f1e Peter Maydell
86 ac873f1e Peter Maydell
static bool all_cpu_threads_idle(void)
87 ac873f1e Peter Maydell
{
88 182735ef Andreas Färber
    CPUState *cpu;
89 ac873f1e Peter Maydell
90 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
91 182735ef Andreas Färber
        if (!cpu_thread_is_idle(cpu)) {
92 ac873f1e Peter Maydell
            return false;
93 ac873f1e Peter Maydell
        }
94 ac873f1e Peter Maydell
    }
95 ac873f1e Peter Maydell
    return true;
96 ac873f1e Peter Maydell
}
97 ac873f1e Peter Maydell
98 296af7c9 Blue Swirl
/***********************************************************/
99 946fb27c Paolo Bonzini
/* guest cycle counter */
100 946fb27c Paolo Bonzini
101 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
102 946fb27c Paolo Bonzini
static int icount_time_shift;
103 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
104 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
105 946fb27c Paolo Bonzini
/* Compensate for varying guest execution speed.  */
106 946fb27c Paolo Bonzini
static int64_t qemu_icount_bias;
107 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
108 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
109 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
110 946fb27c Paolo Bonzini
static int64_t vm_clock_warp_start;
111 946fb27c Paolo Bonzini
static int64_t qemu_icount;
112 946fb27c Paolo Bonzini
113 946fb27c Paolo Bonzini
typedef struct TimersState {
114 cb365646 Liu Ping Fan
    /* Protected by BQL.  */
115 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
116 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
117 cb365646 Liu Ping Fan
118 cb365646 Liu Ping Fan
    /* cpu_clock_offset can be read out of BQL, so protect it with
119 cb365646 Liu Ping Fan
     * this lock.
120 cb365646 Liu Ping Fan
     */
121 cb365646 Liu Ping Fan
    QemuSeqLock vm_clock_seqlock;
122 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
123 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
124 946fb27c Paolo Bonzini
    int64_t dummy;
125 946fb27c Paolo Bonzini
} TimersState;
126 946fb27c Paolo Bonzini
127 d9cd4007 Liu Ping Fan
static TimersState timers_state;
128 946fb27c Paolo Bonzini
129 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
130 946fb27c Paolo Bonzini
int64_t cpu_get_icount(void)
131 946fb27c Paolo Bonzini
{
132 946fb27c Paolo Bonzini
    int64_t icount;
133 4917cf44 Andreas Färber
    CPUState *cpu = current_cpu;
134 946fb27c Paolo Bonzini
135 946fb27c Paolo Bonzini
    icount = qemu_icount;
136 4917cf44 Andreas Färber
    if (cpu) {
137 4917cf44 Andreas Färber
        CPUArchState *env = cpu->env_ptr;
138 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
139 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
140 946fb27c Paolo Bonzini
        }
141 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
142 946fb27c Paolo Bonzini
    }
143 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
144 946fb27c Paolo Bonzini
}
145 946fb27c Paolo Bonzini
146 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
147 cb365646 Liu Ping Fan
/* Caller must hold the BQL */
148 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
149 946fb27c Paolo Bonzini
{
150 946fb27c Paolo Bonzini
    if (use_icount) {
151 946fb27c Paolo Bonzini
        return cpu_get_icount();
152 946fb27c Paolo Bonzini
    }
153 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
154 946fb27c Paolo Bonzini
        return timers_state.cpu_ticks_offset;
155 946fb27c Paolo Bonzini
    } else {
156 946fb27c Paolo Bonzini
        int64_t ticks;
157 946fb27c Paolo Bonzini
        ticks = cpu_get_real_ticks();
158 946fb27c Paolo Bonzini
        if (timers_state.cpu_ticks_prev > ticks) {
159 946fb27c Paolo Bonzini
            /* Note: non increasing ticks may happen if the host uses
160 946fb27c Paolo Bonzini
               software suspend */
161 946fb27c Paolo Bonzini
            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
162 946fb27c Paolo Bonzini
        }
163 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_prev = ticks;
164 946fb27c Paolo Bonzini
        return ticks + timers_state.cpu_ticks_offset;
165 946fb27c Paolo Bonzini
    }
166 946fb27c Paolo Bonzini
}
167 946fb27c Paolo Bonzini
168 cb365646 Liu Ping Fan
static int64_t cpu_get_clock_locked(void)
169 946fb27c Paolo Bonzini
{
170 946fb27c Paolo Bonzini
    int64_t ti;
171 cb365646 Liu Ping Fan
172 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
173 cb365646 Liu Ping Fan
        ti = timers_state.cpu_clock_offset;
174 946fb27c Paolo Bonzini
    } else {
175 946fb27c Paolo Bonzini
        ti = get_clock();
176 cb365646 Liu Ping Fan
        ti += timers_state.cpu_clock_offset;
177 946fb27c Paolo Bonzini
    }
178 cb365646 Liu Ping Fan
179 cb365646 Liu Ping Fan
    return ti;
180 cb365646 Liu Ping Fan
}
181 cb365646 Liu Ping Fan
182 cb365646 Liu Ping Fan
/* return the host CPU monotonic timer and handle stop/restart */
183 cb365646 Liu Ping Fan
int64_t cpu_get_clock(void)
184 cb365646 Liu Ping Fan
{
185 cb365646 Liu Ping Fan
    int64_t ti;
186 cb365646 Liu Ping Fan
    unsigned start;
187 cb365646 Liu Ping Fan
188 cb365646 Liu Ping Fan
    do {
189 cb365646 Liu Ping Fan
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
190 cb365646 Liu Ping Fan
        ti = cpu_get_clock_locked();
191 cb365646 Liu Ping Fan
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
192 cb365646 Liu Ping Fan
193 cb365646 Liu Ping Fan
    return ti;
194 946fb27c Paolo Bonzini
}
195 946fb27c Paolo Bonzini
196 cb365646 Liu Ping Fan
/* enable cpu_get_ticks()
197 cb365646 Liu Ping Fan
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
198 cb365646 Liu Ping Fan
 */
199 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
200 946fb27c Paolo Bonzini
{
201 cb365646 Liu Ping Fan
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
202 cb365646 Liu Ping Fan
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
203 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
204 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
205 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
206 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
207 946fb27c Paolo Bonzini
    }
208 cb365646 Liu Ping Fan
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
209 946fb27c Paolo Bonzini
}
210 946fb27c Paolo Bonzini
211 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
212 cb365646 Liu Ping Fan
 * cpu_get_ticks() after that.
213 cb365646 Liu Ping Fan
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
214 cb365646 Liu Ping Fan
 */
215 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
216 946fb27c Paolo Bonzini
{
217 cb365646 Liu Ping Fan
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
218 cb365646 Liu Ping Fan
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
219 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
220 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset = cpu_get_ticks();
221 cb365646 Liu Ping Fan
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
222 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
223 946fb27c Paolo Bonzini
    }
224 cb365646 Liu Ping Fan
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
225 946fb27c Paolo Bonzini
}
226 946fb27c Paolo Bonzini
227 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
228 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
229 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
230 946fb27c Paolo Bonzini
   the IO wait loop.  */
231 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
232 946fb27c Paolo Bonzini
233 946fb27c Paolo Bonzini
static void icount_adjust(void)
234 946fb27c Paolo Bonzini
{
235 946fb27c Paolo Bonzini
    int64_t cur_time;
236 946fb27c Paolo Bonzini
    int64_t cur_icount;
237 946fb27c Paolo Bonzini
    int64_t delta;
238 946fb27c Paolo Bonzini
    static int64_t last_delta;
239 468cc7cf Paolo Bonzini
240 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
241 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
242 946fb27c Paolo Bonzini
        return;
243 946fb27c Paolo Bonzini
    }
244 468cc7cf Paolo Bonzini
245 946fb27c Paolo Bonzini
    cur_time = cpu_get_clock();
246 468cc7cf Paolo Bonzini
    cur_icount = cpu_get_icount();
247 468cc7cf Paolo Bonzini
248 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
249 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
250 946fb27c Paolo Bonzini
    if (delta > 0
251 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
252 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
253 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
254 946fb27c Paolo Bonzini
        icount_time_shift--;
255 946fb27c Paolo Bonzini
    }
256 946fb27c Paolo Bonzini
    if (delta < 0
257 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
258 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
259 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
260 946fb27c Paolo Bonzini
        icount_time_shift++;
261 946fb27c Paolo Bonzini
    }
262 946fb27c Paolo Bonzini
    last_delta = delta;
263 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
264 946fb27c Paolo Bonzini
}
265 946fb27c Paolo Bonzini
266 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
267 946fb27c Paolo Bonzini
{
268 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
269 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
270 946fb27c Paolo Bonzini
    icount_adjust();
271 946fb27c Paolo Bonzini
}
272 946fb27c Paolo Bonzini
273 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
274 946fb27c Paolo Bonzini
{
275 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
276 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
277 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
278 946fb27c Paolo Bonzini
    icount_adjust();
279 946fb27c Paolo Bonzini
}
280 946fb27c Paolo Bonzini
281 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
282 946fb27c Paolo Bonzini
{
283 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
284 946fb27c Paolo Bonzini
}
285 946fb27c Paolo Bonzini
286 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
287 946fb27c Paolo Bonzini
{
288 946fb27c Paolo Bonzini
    if (vm_clock_warp_start == -1) {
289 946fb27c Paolo Bonzini
        return;
290 946fb27c Paolo Bonzini
    }
291 946fb27c Paolo Bonzini
292 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
293 40daca54 Alex Bligh
        int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
294 8ed961d9 Paolo Bonzini
        int64_t warp_delta;
295 8ed961d9 Paolo Bonzini
296 8ed961d9 Paolo Bonzini
        warp_delta = clock - vm_clock_warp_start;
297 8ed961d9 Paolo Bonzini
        if (use_icount == 2) {
298 946fb27c Paolo Bonzini
            /*
299 40daca54 Alex Bligh
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
300 946fb27c Paolo Bonzini
             * far ahead of real time.
301 946fb27c Paolo Bonzini
             */
302 946fb27c Paolo Bonzini
            int64_t cur_time = cpu_get_clock();
303 468cc7cf Paolo Bonzini
            int64_t cur_icount = cpu_get_icount();
304 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
305 8ed961d9 Paolo Bonzini
            warp_delta = MIN(warp_delta, delta);
306 946fb27c Paolo Bonzini
        }
307 8ed961d9 Paolo Bonzini
        qemu_icount_bias += warp_delta;
308 946fb27c Paolo Bonzini
    }
309 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
310 8ed961d9 Paolo Bonzini
311 8ed961d9 Paolo Bonzini
    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
312 8ed961d9 Paolo Bonzini
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
313 8ed961d9 Paolo Bonzini
    }
314 946fb27c Paolo Bonzini
}
315 946fb27c Paolo Bonzini
316 8156be56 Paolo Bonzini
void qtest_clock_warp(int64_t dest)
317 8156be56 Paolo Bonzini
{
318 40daca54 Alex Bligh
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
319 8156be56 Paolo Bonzini
    assert(qtest_enabled());
320 8156be56 Paolo Bonzini
    while (clock < dest) {
321 40daca54 Alex Bligh
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
322 8156be56 Paolo Bonzini
        int64_t warp = MIN(dest - clock, deadline);
323 8156be56 Paolo Bonzini
        qemu_icount_bias += warp;
324 40daca54 Alex Bligh
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
325 40daca54 Alex Bligh
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
326 8156be56 Paolo Bonzini
    }
327 40daca54 Alex Bligh
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
328 8156be56 Paolo Bonzini
}
329 8156be56 Paolo Bonzini
330 40daca54 Alex Bligh
void qemu_clock_warp(QEMUClockType type)
331 946fb27c Paolo Bonzini
{
332 946fb27c Paolo Bonzini
    int64_t deadline;
333 946fb27c Paolo Bonzini
334 946fb27c Paolo Bonzini
    /*
335 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
336 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
337 946fb27c Paolo Bonzini
     * need for if statements all over the place.
338 946fb27c Paolo Bonzini
     */
339 40daca54 Alex Bligh
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
340 946fb27c Paolo Bonzini
        return;
341 946fb27c Paolo Bonzini
    }
342 946fb27c Paolo Bonzini
343 946fb27c Paolo Bonzini
    /*
344 40daca54 Alex Bligh
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
345 40daca54 Alex Bligh
     * This ensures that the deadline for the timer is computed correctly below.
346 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
347 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
348 40daca54 Alex Bligh
     * the earliest QEMU_CLOCK_VIRTUAL timer.
349 946fb27c Paolo Bonzini
     */
350 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
351 40daca54 Alex Bligh
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
352 40daca54 Alex Bligh
        timer_del(icount_warp_timer);
353 946fb27c Paolo Bonzini
        return;
354 946fb27c Paolo Bonzini
    }
355 946fb27c Paolo Bonzini
356 8156be56 Paolo Bonzini
    if (qtest_enabled()) {
357 8156be56 Paolo Bonzini
        /* When testing, qtest commands advance icount.  */
358 8156be56 Paolo Bonzini
        return;
359 8156be56 Paolo Bonzini
    }
360 8156be56 Paolo Bonzini
361 40daca54 Alex Bligh
    vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
362 ac70aafc Alex Bligh
    /* We want to use the earliest deadline from ALL vm_clocks */
363 40daca54 Alex Bligh
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
364 ac70aafc Alex Bligh
365 ac70aafc Alex Bligh
    /* Maintain prior (possibly buggy) behaviour where if no deadline
366 40daca54 Alex Bligh
     * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
367 ac70aafc Alex Bligh
     * INT32_MAX nanoseconds ahead, we still use INT32_MAX
368 ac70aafc Alex Bligh
     * nanoseconds.
369 ac70aafc Alex Bligh
     */
370 ac70aafc Alex Bligh
    if ((deadline < 0) || (deadline > INT32_MAX)) {
371 ac70aafc Alex Bligh
        deadline = INT32_MAX;
372 ac70aafc Alex Bligh
    }
373 ac70aafc Alex Bligh
374 946fb27c Paolo Bonzini
    if (deadline > 0) {
375 946fb27c Paolo Bonzini
        /*
376 40daca54 Alex Bligh
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
377 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
378 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
379 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
380 40daca54 Alex Bligh
         * QEMU_CLOCK_VIRTUAL.
381 946fb27c Paolo Bonzini
         *
382 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
383 40daca54 Alex Bligh
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
384 40daca54 Alex Bligh
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
385 40daca54 Alex Bligh
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
386 40daca54 Alex Bligh
         * after some e"real" time, (related to the time left until the next
387 40daca54 Alex Bligh
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
388 40daca54 Alex Bligh
         * This avoids that the warps are visible externally; for example,
389 40daca54 Alex Bligh
         * you will not be sending network packets continuously instead of
390 40daca54 Alex Bligh
         * every 100ms.
391 946fb27c Paolo Bonzini
         */
392 40daca54 Alex Bligh
        timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
393 ac70aafc Alex Bligh
    } else if (deadline == 0) {
394 40daca54 Alex Bligh
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
395 946fb27c Paolo Bonzini
    }
396 946fb27c Paolo Bonzini
}
397 946fb27c Paolo Bonzini
398 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
399 946fb27c Paolo Bonzini
    .name = "timer",
400 946fb27c Paolo Bonzini
    .version_id = 2,
401 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
402 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
403 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
404 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
405 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
406 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
407 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
408 946fb27c Paolo Bonzini
    }
409 946fb27c Paolo Bonzini
};
410 946fb27c Paolo Bonzini
411 946fb27c Paolo Bonzini
void configure_icount(const char *option)
412 946fb27c Paolo Bonzini
{
413 cb365646 Liu Ping Fan
    seqlock_init(&timers_state.vm_clock_seqlock, NULL);
414 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
415 946fb27c Paolo Bonzini
    if (!option) {
416 946fb27c Paolo Bonzini
        return;
417 946fb27c Paolo Bonzini
    }
418 946fb27c Paolo Bonzini
419 40daca54 Alex Bligh
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
420 40daca54 Alex Bligh
                                          icount_warp_rt, NULL);
421 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
422 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
423 946fb27c Paolo Bonzini
        use_icount = 1;
424 946fb27c Paolo Bonzini
        return;
425 946fb27c Paolo Bonzini
    }
426 946fb27c Paolo Bonzini
427 946fb27c Paolo Bonzini
    use_icount = 2;
428 946fb27c Paolo Bonzini
429 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
430 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
431 946fb27c Paolo Bonzini
    icount_time_shift = 3;
432 946fb27c Paolo Bonzini
433 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
434 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
435 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
436 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
437 946fb27c Paolo Bonzini
       than VM triggers.  */
438 40daca54 Alex Bligh
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
439 40daca54 Alex Bligh
                                        icount_adjust_rt, NULL);
440 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
441 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
442 40daca54 Alex Bligh
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
443 40daca54 Alex Bligh
                                        icount_adjust_vm, NULL);
444 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
445 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
446 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
447 946fb27c Paolo Bonzini
}
448 946fb27c Paolo Bonzini
449 946fb27c Paolo Bonzini
/***********************************************************/
450 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
451 296af7c9 Blue Swirl
{
452 296af7c9 Blue Swirl
    va_list ap;
453 55e5c285 Andreas Färber
    CPUState *cpu;
454 296af7c9 Blue Swirl
455 296af7c9 Blue Swirl
    va_start(ap, fmt);
456 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
457 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
458 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
459 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
460 55e5c285 Andreas Färber
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
461 878096ee Andreas Färber
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
462 296af7c9 Blue Swirl
    }
463 296af7c9 Blue Swirl
    va_end(ap);
464 296af7c9 Blue Swirl
    abort();
465 296af7c9 Blue Swirl
}
466 296af7c9 Blue Swirl
467 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
468 296af7c9 Blue Swirl
{
469 182735ef Andreas Färber
    CPUState *cpu;
470 296af7c9 Blue Swirl
471 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
472 182735ef Andreas Färber
        cpu_synchronize_state(cpu);
473 296af7c9 Blue Swirl
    }
474 296af7c9 Blue Swirl
}
475 296af7c9 Blue Swirl
476 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
477 296af7c9 Blue Swirl
{
478 182735ef Andreas Färber
    CPUState *cpu;
479 296af7c9 Blue Swirl
480 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
481 182735ef Andreas Färber
        cpu_synchronize_post_reset(cpu);
482 296af7c9 Blue Swirl
    }
483 296af7c9 Blue Swirl
}
484 296af7c9 Blue Swirl
485 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
486 296af7c9 Blue Swirl
{
487 182735ef Andreas Färber
    CPUState *cpu;
488 296af7c9 Blue Swirl
489 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
490 182735ef Andreas Färber
        cpu_synchronize_post_init(cpu);
491 296af7c9 Blue Swirl
    }
492 296af7c9 Blue Swirl
}
493 296af7c9 Blue Swirl
494 56983463 Kevin Wolf
static int do_vm_stop(RunState state)
495 296af7c9 Blue Swirl
{
496 56983463 Kevin Wolf
    int ret = 0;
497 56983463 Kevin Wolf
498 1354869c Luiz Capitulino
    if (runstate_is_running()) {
499 296af7c9 Blue Swirl
        cpu_disable_ticks();
500 296af7c9 Blue Swirl
        pause_all_vcpus();
501 f5bbfba1 Luiz Capitulino
        runstate_set(state);
502 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
503 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
504 296af7c9 Blue Swirl
    }
505 56983463 Kevin Wolf
506 594a45ce Kevin Wolf
    bdrv_drain_all();
507 594a45ce Kevin Wolf
    ret = bdrv_flush_all();
508 594a45ce Kevin Wolf
509 56983463 Kevin Wolf
    return ret;
510 296af7c9 Blue Swirl
}
511 296af7c9 Blue Swirl
512 a1fcaa73 Andreas Färber
static bool cpu_can_run(CPUState *cpu)
513 296af7c9 Blue Swirl
{
514 4fdeee7c Andreas Färber
    if (cpu->stop) {
515 a1fcaa73 Andreas Färber
        return false;
516 0ab07c62 Jan Kiszka
    }
517 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
518 a1fcaa73 Andreas Färber
        return false;
519 0ab07c62 Jan Kiszka
    }
520 a1fcaa73 Andreas Färber
    return true;
521 296af7c9 Blue Swirl
}
522 296af7c9 Blue Swirl
523 91325046 Andreas Färber
static void cpu_handle_guest_debug(CPUState *cpu)
524 83f338f7 Jan Kiszka
{
525 64f6b346 Andreas Färber
    gdb_set_stop_cpu(cpu);
526 8cf71710 Jan Kiszka
    qemu_system_debug_request();
527 f324e766 Andreas Färber
    cpu->stopped = true;
528 3c638d06 Jan Kiszka
}
529 3c638d06 Jan Kiszka
530 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
531 714bd040 Paolo Bonzini
{
532 4917cf44 Andreas Färber
    if (current_cpu) {
533 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
534 714bd040 Paolo Bonzini
    }
535 714bd040 Paolo Bonzini
    exit_request = 1;
536 714bd040 Paolo Bonzini
}
537 714bd040 Paolo Bonzini
538 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
539 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
540 6d9cb73c Jan Kiszka
{
541 6d9cb73c Jan Kiszka
    sigset_t set;
542 6d9cb73c Jan Kiszka
    struct sigaction action;
543 6d9cb73c Jan Kiszka
544 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
545 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
546 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
547 6d9cb73c Jan Kiszka
        raise(SIGBUS);
548 6d9cb73c Jan Kiszka
        sigemptyset(&set);
549 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
550 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
551 6d9cb73c Jan Kiszka
    }
552 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
553 6d9cb73c Jan Kiszka
    abort();
554 6d9cb73c Jan Kiszka
}
555 6d9cb73c Jan Kiszka
556 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
557 6d9cb73c Jan Kiszka
                           void *ctx)
558 6d9cb73c Jan Kiszka
{
559 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
560 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
561 6d9cb73c Jan Kiszka
        sigbus_reraise();
562 6d9cb73c Jan Kiszka
    }
563 6d9cb73c Jan Kiszka
}
564 6d9cb73c Jan Kiszka
565 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
566 6d9cb73c Jan Kiszka
{
567 6d9cb73c Jan Kiszka
    struct sigaction action;
568 6d9cb73c Jan Kiszka
569 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
570 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
571 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
572 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
573 6d9cb73c Jan Kiszka
574 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
575 6d9cb73c Jan Kiszka
}
576 6d9cb73c Jan Kiszka
577 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
578 1ab3c6c0 Jan Kiszka
{
579 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
580 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
581 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
582 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
583 1ab3c6c0 Jan Kiszka
    int r;
584 1ab3c6c0 Jan Kiszka
585 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
586 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
587 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
588 1ab3c6c0 Jan Kiszka
589 1ab3c6c0 Jan Kiszka
    do {
590 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
591 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
592 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
593 1ab3c6c0 Jan Kiszka
            exit(1);
594 1ab3c6c0 Jan Kiszka
        }
595 1ab3c6c0 Jan Kiszka
596 1ab3c6c0 Jan Kiszka
        switch (r) {
597 1ab3c6c0 Jan Kiszka
        case SIGBUS:
598 290adf38 Andreas Färber
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
599 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
600 1ab3c6c0 Jan Kiszka
            }
601 1ab3c6c0 Jan Kiszka
            break;
602 1ab3c6c0 Jan Kiszka
        default:
603 1ab3c6c0 Jan Kiszka
            break;
604 1ab3c6c0 Jan Kiszka
        }
605 1ab3c6c0 Jan Kiszka
606 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
607 1ab3c6c0 Jan Kiszka
        if (r == -1) {
608 1ab3c6c0 Jan Kiszka
            perror("sigpending");
609 1ab3c6c0 Jan Kiszka
            exit(1);
610 1ab3c6c0 Jan Kiszka
        }
611 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
612 1ab3c6c0 Jan Kiszka
}
613 1ab3c6c0 Jan Kiszka
614 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
615 6d9cb73c Jan Kiszka
616 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
617 6d9cb73c Jan Kiszka
{
618 6d9cb73c Jan Kiszka
}
619 1ab3c6c0 Jan Kiszka
620 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
621 1ab3c6c0 Jan Kiszka
{
622 1ab3c6c0 Jan Kiszka
}
623 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
624 6d9cb73c Jan Kiszka
625 296af7c9 Blue Swirl
#ifndef _WIN32
626 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
627 55f8d6ac Jan Kiszka
{
628 55f8d6ac Jan Kiszka
}
629 55f8d6ac Jan Kiszka
630 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
631 714bd040 Paolo Bonzini
{
632 714bd040 Paolo Bonzini
    int r;
633 714bd040 Paolo Bonzini
    sigset_t set;
634 714bd040 Paolo Bonzini
    struct sigaction sigact;
635 714bd040 Paolo Bonzini
636 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
637 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
638 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
639 714bd040 Paolo Bonzini
640 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
641 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
642 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
643 491d6e80 Andreas Färber
    r = kvm_set_signal_mask(cpu, &set);
644 714bd040 Paolo Bonzini
    if (r) {
645 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
646 714bd040 Paolo Bonzini
        exit(1);
647 714bd040 Paolo Bonzini
    }
648 714bd040 Paolo Bonzini
}
649 714bd040 Paolo Bonzini
650 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
651 714bd040 Paolo Bonzini
{
652 714bd040 Paolo Bonzini
    sigset_t set;
653 714bd040 Paolo Bonzini
    struct sigaction sigact;
654 714bd040 Paolo Bonzini
655 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
656 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
657 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
658 714bd040 Paolo Bonzini
659 714bd040 Paolo Bonzini
    sigemptyset(&set);
660 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
661 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
662 714bd040 Paolo Bonzini
}
663 714bd040 Paolo Bonzini
664 55f8d6ac Jan Kiszka
#else /* _WIN32 */
665 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
666 ff48eb5f Jan Kiszka
{
667 714bd040 Paolo Bonzini
    abort();
668 714bd040 Paolo Bonzini
}
669 ff48eb5f Jan Kiszka
670 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
671 714bd040 Paolo Bonzini
{
672 ff48eb5f Jan Kiszka
}
673 714bd040 Paolo Bonzini
#endif /* _WIN32 */
674 ff48eb5f Jan Kiszka
675 b2532d88 Stefan Weil
static QemuMutex qemu_global_mutex;
676 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
677 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
678 296af7c9 Blue Swirl
679 296af7c9 Blue Swirl
static QemuThread io_thread;
680 296af7c9 Blue Swirl
681 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
682 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
683 296af7c9 Blue Swirl
684 296af7c9 Blue Swirl
/* cpu creation */
685 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
686 296af7c9 Blue Swirl
/* system init */
687 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
688 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
689 296af7c9 Blue Swirl
690 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
691 296af7c9 Blue Swirl
{
692 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
693 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
694 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
695 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
696 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
697 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
698 296af7c9 Blue Swirl
699 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
700 296af7c9 Blue Swirl
}
701 296af7c9 Blue Swirl
702 f100f0b3 Andreas Färber
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
703 e82bcec2 Marcelo Tosatti
{
704 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
705 e82bcec2 Marcelo Tosatti
706 60e82579 Andreas Färber
    if (qemu_cpu_is_self(cpu)) {
707 e82bcec2 Marcelo Tosatti
        func(data);
708 e82bcec2 Marcelo Tosatti
        return;
709 e82bcec2 Marcelo Tosatti
    }
710 e82bcec2 Marcelo Tosatti
711 e82bcec2 Marcelo Tosatti
    wi.func = func;
712 e82bcec2 Marcelo Tosatti
    wi.data = data;
713 3c02270d Chegu Vinod
    wi.free = false;
714 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
715 c64ca814 Andreas Färber
        cpu->queued_work_first = &wi;
716 0ab07c62 Jan Kiszka
    } else {
717 c64ca814 Andreas Färber
        cpu->queued_work_last->next = &wi;
718 0ab07c62 Jan Kiszka
    }
719 c64ca814 Andreas Färber
    cpu->queued_work_last = &wi;
720 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
721 e82bcec2 Marcelo Tosatti
    wi.done = false;
722 e82bcec2 Marcelo Tosatti
723 c08d7424 Andreas Färber
    qemu_cpu_kick(cpu);
724 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
725 4917cf44 Andreas Färber
        CPUState *self_cpu = current_cpu;
726 e82bcec2 Marcelo Tosatti
727 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
728 4917cf44 Andreas Färber
        current_cpu = self_cpu;
729 e82bcec2 Marcelo Tosatti
    }
730 e82bcec2 Marcelo Tosatti
}
731 e82bcec2 Marcelo Tosatti
732 3c02270d Chegu Vinod
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
733 3c02270d Chegu Vinod
{
734 3c02270d Chegu Vinod
    struct qemu_work_item *wi;
735 3c02270d Chegu Vinod
736 3c02270d Chegu Vinod
    if (qemu_cpu_is_self(cpu)) {
737 3c02270d Chegu Vinod
        func(data);
738 3c02270d Chegu Vinod
        return;
739 3c02270d Chegu Vinod
    }
740 3c02270d Chegu Vinod
741 3c02270d Chegu Vinod
    wi = g_malloc0(sizeof(struct qemu_work_item));
742 3c02270d Chegu Vinod
    wi->func = func;
743 3c02270d Chegu Vinod
    wi->data = data;
744 3c02270d Chegu Vinod
    wi->free = true;
745 3c02270d Chegu Vinod
    if (cpu->queued_work_first == NULL) {
746 3c02270d Chegu Vinod
        cpu->queued_work_first = wi;
747 3c02270d Chegu Vinod
    } else {
748 3c02270d Chegu Vinod
        cpu->queued_work_last->next = wi;
749 3c02270d Chegu Vinod
    }
750 3c02270d Chegu Vinod
    cpu->queued_work_last = wi;
751 3c02270d Chegu Vinod
    wi->next = NULL;
752 3c02270d Chegu Vinod
    wi->done = false;
753 3c02270d Chegu Vinod
754 3c02270d Chegu Vinod
    qemu_cpu_kick(cpu);
755 3c02270d Chegu Vinod
}
756 3c02270d Chegu Vinod
757 6d45b109 Andreas Färber
static void flush_queued_work(CPUState *cpu)
758 e82bcec2 Marcelo Tosatti
{
759 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
760 e82bcec2 Marcelo Tosatti
761 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
762 e82bcec2 Marcelo Tosatti
        return;
763 0ab07c62 Jan Kiszka
    }
764 e82bcec2 Marcelo Tosatti
765 c64ca814 Andreas Färber
    while ((wi = cpu->queued_work_first)) {
766 c64ca814 Andreas Färber
        cpu->queued_work_first = wi->next;
767 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
768 e82bcec2 Marcelo Tosatti
        wi->done = true;
769 3c02270d Chegu Vinod
        if (wi->free) {
770 3c02270d Chegu Vinod
            g_free(wi);
771 3c02270d Chegu Vinod
        }
772 e82bcec2 Marcelo Tosatti
    }
773 c64ca814 Andreas Färber
    cpu->queued_work_last = NULL;
774 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
775 e82bcec2 Marcelo Tosatti
}
776 e82bcec2 Marcelo Tosatti
777 509a0d78 Andreas Färber
static void qemu_wait_io_event_common(CPUState *cpu)
778 296af7c9 Blue Swirl
{
779 4fdeee7c Andreas Färber
    if (cpu->stop) {
780 4fdeee7c Andreas Färber
        cpu->stop = false;
781 f324e766 Andreas Färber
        cpu->stopped = true;
782 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
783 296af7c9 Blue Swirl
    }
784 6d45b109 Andreas Färber
    flush_queued_work(cpu);
785 216fc9a4 Andreas Färber
    cpu->thread_kicked = false;
786 296af7c9 Blue Swirl
}
787 296af7c9 Blue Swirl
788 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
789 296af7c9 Blue Swirl
{
790 182735ef Andreas Färber
    CPUState *cpu;
791 6cabe1f3 Jan Kiszka
792 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
793 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
794 ab33fcda Paolo Bonzini
          are idle.  */
795 40daca54 Alex Bligh
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
796 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
797 16400322 Jan Kiszka
    }
798 296af7c9 Blue Swirl
799 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
800 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
801 46daff13 Paolo Bonzini
    }
802 6cabe1f3 Jan Kiszka
803 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
804 182735ef Andreas Färber
        qemu_wait_io_event_common(cpu);
805 6cabe1f3 Jan Kiszka
    }
806 296af7c9 Blue Swirl
}
807 296af7c9 Blue Swirl
808 fd529e8f Andreas Färber
static void qemu_kvm_wait_io_event(CPUState *cpu)
809 296af7c9 Blue Swirl
{
810 a98ae1d8 Andreas Färber
    while (cpu_thread_is_idle(cpu)) {
811 f5c121b8 Andreas Färber
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
812 16400322 Jan Kiszka
    }
813 296af7c9 Blue Swirl
814 290adf38 Andreas Färber
    qemu_kvm_eat_signals(cpu);
815 509a0d78 Andreas Färber
    qemu_wait_io_event_common(cpu);
816 296af7c9 Blue Swirl
}
817 296af7c9 Blue Swirl
818 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
819 296af7c9 Blue Swirl
{
820 48a106bd Andreas Färber
    CPUState *cpu = arg;
821 84b4915d Jan Kiszka
    int r;
822 296af7c9 Blue Swirl
823 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
824 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
825 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
826 4917cf44 Andreas Färber
    current_cpu = cpu;
827 296af7c9 Blue Swirl
828 504134d2 Andreas Färber
    r = kvm_init_vcpu(cpu);
829 84b4915d Jan Kiszka
    if (r < 0) {
830 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
831 84b4915d Jan Kiszka
        exit(1);
832 84b4915d Jan Kiszka
    }
833 296af7c9 Blue Swirl
834 13618e05 Andreas Färber
    qemu_kvm_init_cpu_signals(cpu);
835 296af7c9 Blue Swirl
836 296af7c9 Blue Swirl
    /* signal CPU creation */
837 61a46217 Andreas Färber
    cpu->created = true;
838 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
839 296af7c9 Blue Swirl
840 296af7c9 Blue Swirl
    while (1) {
841 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
842 1458c363 Andreas Färber
            r = kvm_cpu_exec(cpu);
843 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
844 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
845 83f338f7 Jan Kiszka
            }
846 0ab07c62 Jan Kiszka
        }
847 fd529e8f Andreas Färber
        qemu_kvm_wait_io_event(cpu);
848 296af7c9 Blue Swirl
    }
849 296af7c9 Blue Swirl
850 296af7c9 Blue Swirl
    return NULL;
851 296af7c9 Blue Swirl
}
852 296af7c9 Blue Swirl
853 c7f0f3b1 Anthony Liguori
static void *qemu_dummy_cpu_thread_fn(void *arg)
854 c7f0f3b1 Anthony Liguori
{
855 c7f0f3b1 Anthony Liguori
#ifdef _WIN32
856 c7f0f3b1 Anthony Liguori
    fprintf(stderr, "qtest is not supported under Windows\n");
857 c7f0f3b1 Anthony Liguori
    exit(1);
858 c7f0f3b1 Anthony Liguori
#else
859 10a9021d Andreas Färber
    CPUState *cpu = arg;
860 c7f0f3b1 Anthony Liguori
    sigset_t waitset;
861 c7f0f3b1 Anthony Liguori
    int r;
862 c7f0f3b1 Anthony Liguori
863 c7f0f3b1 Anthony Liguori
    qemu_mutex_lock_iothread();
864 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
865 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
866 c7f0f3b1 Anthony Liguori
867 c7f0f3b1 Anthony Liguori
    sigemptyset(&waitset);
868 c7f0f3b1 Anthony Liguori
    sigaddset(&waitset, SIG_IPI);
869 c7f0f3b1 Anthony Liguori
870 c7f0f3b1 Anthony Liguori
    /* signal CPU creation */
871 61a46217 Andreas Färber
    cpu->created = true;
872 c7f0f3b1 Anthony Liguori
    qemu_cond_signal(&qemu_cpu_cond);
873 c7f0f3b1 Anthony Liguori
874 4917cf44 Andreas Färber
    current_cpu = cpu;
875 c7f0f3b1 Anthony Liguori
    while (1) {
876 4917cf44 Andreas Färber
        current_cpu = NULL;
877 c7f0f3b1 Anthony Liguori
        qemu_mutex_unlock_iothread();
878 c7f0f3b1 Anthony Liguori
        do {
879 c7f0f3b1 Anthony Liguori
            int sig;
880 c7f0f3b1 Anthony Liguori
            r = sigwait(&waitset, &sig);
881 c7f0f3b1 Anthony Liguori
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
882 c7f0f3b1 Anthony Liguori
        if (r == -1) {
883 c7f0f3b1 Anthony Liguori
            perror("sigwait");
884 c7f0f3b1 Anthony Liguori
            exit(1);
885 c7f0f3b1 Anthony Liguori
        }
886 c7f0f3b1 Anthony Liguori
        qemu_mutex_lock_iothread();
887 4917cf44 Andreas Färber
        current_cpu = cpu;
888 509a0d78 Andreas Färber
        qemu_wait_io_event_common(cpu);
889 c7f0f3b1 Anthony Liguori
    }
890 c7f0f3b1 Anthony Liguori
891 c7f0f3b1 Anthony Liguori
    return NULL;
892 c7f0f3b1 Anthony Liguori
#endif
893 c7f0f3b1 Anthony Liguori
}
894 c7f0f3b1 Anthony Liguori
895 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
896 bdb7ca67 Jan Kiszka
897 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
898 296af7c9 Blue Swirl
{
899 c3586ba7 Andreas Färber
    CPUState *cpu = arg;
900 296af7c9 Blue Swirl
901 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
902 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
903 296af7c9 Blue Swirl
904 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
905 38fcbd3f Andreas Färber
    CPU_FOREACH(cpu) {
906 38fcbd3f Andreas Färber
        cpu->thread_id = qemu_get_thread_id();
907 38fcbd3f Andreas Färber
        cpu->created = true;
908 38fcbd3f Andreas Färber
    }
909 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
910 296af7c9 Blue Swirl
911 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
912 bdc44640 Andreas Färber
    while (QTAILQ_FIRST(&cpus)->stopped) {
913 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
914 8e564b4e Jan Kiszka
915 8e564b4e Jan Kiszka
        /* process any pending work */
916 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
917 182735ef Andreas Färber
            qemu_wait_io_event_common(cpu);
918 8e564b4e Jan Kiszka
        }
919 0ab07c62 Jan Kiszka
    }
920 296af7c9 Blue Swirl
921 296af7c9 Blue Swirl
    while (1) {
922 bdb7ca67 Jan Kiszka
        tcg_exec_all();
923 ac70aafc Alex Bligh
924 ac70aafc Alex Bligh
        if (use_icount) {
925 40daca54 Alex Bligh
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
926 ac70aafc Alex Bligh
927 ac70aafc Alex Bligh
            if (deadline == 0) {
928 40daca54 Alex Bligh
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
929 ac70aafc Alex Bligh
            }
930 3b2319a3 Paolo Bonzini
        }
931 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
932 296af7c9 Blue Swirl
    }
933 296af7c9 Blue Swirl
934 296af7c9 Blue Swirl
    return NULL;
935 296af7c9 Blue Swirl
}
936 296af7c9 Blue Swirl
937 2ff09a40 Andreas Färber
static void qemu_cpu_kick_thread(CPUState *cpu)
938 cc015e9a Paolo Bonzini
{
939 cc015e9a Paolo Bonzini
#ifndef _WIN32
940 cc015e9a Paolo Bonzini
    int err;
941 cc015e9a Paolo Bonzini
942 814e612e Andreas Färber
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
943 cc015e9a Paolo Bonzini
    if (err) {
944 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
945 cc015e9a Paolo Bonzini
        exit(1);
946 cc015e9a Paolo Bonzini
    }
947 cc015e9a Paolo Bonzini
#else /* _WIN32 */
948 60e82579 Andreas Färber
    if (!qemu_cpu_is_self(cpu)) {
949 ed9164a3 Olivier Hainque
        CONTEXT tcgContext;
950 ed9164a3 Olivier Hainque
951 ed9164a3 Olivier Hainque
        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
952 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
953 ed9164a3 Olivier Hainque
                    GetLastError());
954 ed9164a3 Olivier Hainque
            exit(1);
955 ed9164a3 Olivier Hainque
        }
956 ed9164a3 Olivier Hainque
957 ed9164a3 Olivier Hainque
        /* On multi-core systems, we are not sure that the thread is actually
958 ed9164a3 Olivier Hainque
         * suspended until we can get the context.
959 ed9164a3 Olivier Hainque
         */
960 ed9164a3 Olivier Hainque
        tcgContext.ContextFlags = CONTEXT_CONTROL;
961 ed9164a3 Olivier Hainque
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
962 ed9164a3 Olivier Hainque
            continue;
963 ed9164a3 Olivier Hainque
        }
964 ed9164a3 Olivier Hainque
965 cc015e9a Paolo Bonzini
        cpu_signal(0);
966 ed9164a3 Olivier Hainque
967 ed9164a3 Olivier Hainque
        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
968 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
969 ed9164a3 Olivier Hainque
                    GetLastError());
970 ed9164a3 Olivier Hainque
            exit(1);
971 ed9164a3 Olivier Hainque
        }
972 cc015e9a Paolo Bonzini
    }
973 cc015e9a Paolo Bonzini
#endif
974 cc015e9a Paolo Bonzini
}
975 cc015e9a Paolo Bonzini
976 c08d7424 Andreas Färber
void qemu_cpu_kick(CPUState *cpu)
977 296af7c9 Blue Swirl
{
978 f5c121b8 Andreas Färber
    qemu_cond_broadcast(cpu->halt_cond);
979 216fc9a4 Andreas Färber
    if (!tcg_enabled() && !cpu->thread_kicked) {
980 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu);
981 216fc9a4 Andreas Färber
        cpu->thread_kicked = true;
982 aa2c364b Jan Kiszka
    }
983 296af7c9 Blue Swirl
}
984 296af7c9 Blue Swirl
985 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
986 296af7c9 Blue Swirl
{
987 b55c22c6 Paolo Bonzini
#ifndef _WIN32
988 4917cf44 Andreas Färber
    assert(current_cpu);
989 296af7c9 Blue Swirl
990 4917cf44 Andreas Färber
    if (!current_cpu->thread_kicked) {
991 4917cf44 Andreas Färber
        qemu_cpu_kick_thread(current_cpu);
992 4917cf44 Andreas Färber
        current_cpu->thread_kicked = true;
993 296af7c9 Blue Swirl
    }
994 b55c22c6 Paolo Bonzini
#else
995 b55c22c6 Paolo Bonzini
    abort();
996 b55c22c6 Paolo Bonzini
#endif
997 296af7c9 Blue Swirl
}
998 296af7c9 Blue Swirl
999 60e82579 Andreas Färber
bool qemu_cpu_is_self(CPUState *cpu)
1000 296af7c9 Blue Swirl
{
1001 814e612e Andreas Färber
    return qemu_thread_is_self(cpu->thread);
1002 296af7c9 Blue Swirl
}
1003 296af7c9 Blue Swirl
1004 aa723c23 Juan Quintela
static bool qemu_in_vcpu_thread(void)
1005 aa723c23 Juan Quintela
{
1006 4917cf44 Andreas Färber
    return current_cpu && qemu_cpu_is_self(current_cpu);
1007 aa723c23 Juan Quintela
}
1008 aa723c23 Juan Quintela
1009 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
1010 296af7c9 Blue Swirl
{
1011 c7f0f3b1 Anthony Liguori
    if (!tcg_enabled()) {
1012 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
1013 1a28cac3 Marcelo Tosatti
    } else {
1014 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
1015 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
1016 182735ef Andreas Färber
            qemu_cpu_kick_thread(first_cpu);
1017 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
1018 1a28cac3 Marcelo Tosatti
        }
1019 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
1020 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
1021 1a28cac3 Marcelo Tosatti
    }
1022 296af7c9 Blue Swirl
}
1023 296af7c9 Blue Swirl
1024 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
1025 296af7c9 Blue Swirl
{
1026 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
1027 296af7c9 Blue Swirl
}
1028 296af7c9 Blue Swirl
1029 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
1030 296af7c9 Blue Swirl
{
1031 bdc44640 Andreas Färber
    CPUState *cpu;
1032 296af7c9 Blue Swirl
1033 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1034 182735ef Andreas Färber
        if (!cpu->stopped) {
1035 296af7c9 Blue Swirl
            return 0;
1036 0ab07c62 Jan Kiszka
        }
1037 296af7c9 Blue Swirl
    }
1038 296af7c9 Blue Swirl
1039 296af7c9 Blue Swirl
    return 1;
1040 296af7c9 Blue Swirl
}
1041 296af7c9 Blue Swirl
1042 296af7c9 Blue Swirl
void pause_all_vcpus(void)
1043 296af7c9 Blue Swirl
{
1044 bdc44640 Andreas Färber
    CPUState *cpu;
1045 296af7c9 Blue Swirl
1046 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1047 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1048 182735ef Andreas Färber
        cpu->stop = true;
1049 182735ef Andreas Färber
        qemu_cpu_kick(cpu);
1050 296af7c9 Blue Swirl
    }
1051 296af7c9 Blue Swirl
1052 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1053 d798e974 Jan Kiszka
        cpu_stop_current();
1054 d798e974 Jan Kiszka
        if (!kvm_enabled()) {
1055 bdc44640 Andreas Färber
            CPU_FOREACH(cpu) {
1056 182735ef Andreas Färber
                cpu->stop = false;
1057 182735ef Andreas Färber
                cpu->stopped = true;
1058 d798e974 Jan Kiszka
            }
1059 d798e974 Jan Kiszka
            return;
1060 d798e974 Jan Kiszka
        }
1061 d798e974 Jan Kiszka
    }
1062 d798e974 Jan Kiszka
1063 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
1064 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1065 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
1066 182735ef Andreas Färber
            qemu_cpu_kick(cpu);
1067 296af7c9 Blue Swirl
        }
1068 296af7c9 Blue Swirl
    }
1069 296af7c9 Blue Swirl
}
1070 296af7c9 Blue Swirl
1071 2993683b Igor Mammedov
void cpu_resume(CPUState *cpu)
1072 2993683b Igor Mammedov
{
1073 2993683b Igor Mammedov
    cpu->stop = false;
1074 2993683b Igor Mammedov
    cpu->stopped = false;
1075 2993683b Igor Mammedov
    qemu_cpu_kick(cpu);
1076 2993683b Igor Mammedov
}
1077 2993683b Igor Mammedov
1078 296af7c9 Blue Swirl
void resume_all_vcpus(void)
1079 296af7c9 Blue Swirl
{
1080 bdc44640 Andreas Färber
    CPUState *cpu;
1081 296af7c9 Blue Swirl
1082 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1083 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1084 182735ef Andreas Färber
        cpu_resume(cpu);
1085 296af7c9 Blue Swirl
    }
1086 296af7c9 Blue Swirl
}
1087 296af7c9 Blue Swirl
1088 e5ab30a2 Andreas Färber
static void qemu_tcg_init_vcpu(CPUState *cpu)
1089 296af7c9 Blue Swirl
{
1090 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
1091 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
1092 814e612e Andreas Färber
        cpu->thread = g_malloc0(sizeof(QemuThread));
1093 f5c121b8 Andreas Färber
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1094 f5c121b8 Andreas Färber
        qemu_cond_init(cpu->halt_cond);
1095 f5c121b8 Andreas Färber
        tcg_halt_cond = cpu->halt_cond;
1096 c3586ba7 Andreas Färber
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1097 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
1098 1ecf47bf Paolo Bonzini
#ifdef _WIN32
1099 814e612e Andreas Färber
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
1100 1ecf47bf Paolo Bonzini
#endif
1101 61a46217 Andreas Färber
        while (!cpu->created) {
1102 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1103 0ab07c62 Jan Kiszka
        }
1104 814e612e Andreas Färber
        tcg_cpu_thread = cpu->thread;
1105 296af7c9 Blue Swirl
    } else {
1106 814e612e Andreas Färber
        cpu->thread = tcg_cpu_thread;
1107 f5c121b8 Andreas Färber
        cpu->halt_cond = tcg_halt_cond;
1108 296af7c9 Blue Swirl
    }
1109 296af7c9 Blue Swirl
}
1110 296af7c9 Blue Swirl
1111 48a106bd Andreas Färber
static void qemu_kvm_start_vcpu(CPUState *cpu)
1112 296af7c9 Blue Swirl
{
1113 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1114 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1115 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1116 48a106bd Andreas Färber
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
1117 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
1118 61a46217 Andreas Färber
    while (!cpu->created) {
1119 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1120 0ab07c62 Jan Kiszka
    }
1121 296af7c9 Blue Swirl
}
1122 296af7c9 Blue Swirl
1123 10a9021d Andreas Färber
static void qemu_dummy_start_vcpu(CPUState *cpu)
1124 c7f0f3b1 Anthony Liguori
{
1125 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1126 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1127 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1128 10a9021d Andreas Färber
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
1129 c7f0f3b1 Anthony Liguori
                       QEMU_THREAD_JOINABLE);
1130 61a46217 Andreas Färber
    while (!cpu->created) {
1131 c7f0f3b1 Anthony Liguori
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1132 c7f0f3b1 Anthony Liguori
    }
1133 c7f0f3b1 Anthony Liguori
}
1134 c7f0f3b1 Anthony Liguori
1135 c643bed9 Andreas Färber
void qemu_init_vcpu(CPUState *cpu)
1136 296af7c9 Blue Swirl
{
1137 ce3960eb Andreas Färber
    cpu->nr_cores = smp_cores;
1138 ce3960eb Andreas Färber
    cpu->nr_threads = smp_threads;
1139 f324e766 Andreas Färber
    cpu->stopped = true;
1140 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
1141 48a106bd Andreas Färber
        qemu_kvm_start_vcpu(cpu);
1142 c7f0f3b1 Anthony Liguori
    } else if (tcg_enabled()) {
1143 e5ab30a2 Andreas Färber
        qemu_tcg_init_vcpu(cpu);
1144 c7f0f3b1 Anthony Liguori
    } else {
1145 10a9021d Andreas Färber
        qemu_dummy_start_vcpu(cpu);
1146 0ab07c62 Jan Kiszka
    }
1147 296af7c9 Blue Swirl
}
1148 296af7c9 Blue Swirl
1149 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
1150 296af7c9 Blue Swirl
{
1151 4917cf44 Andreas Färber
    if (current_cpu) {
1152 4917cf44 Andreas Färber
        current_cpu->stop = false;
1153 4917cf44 Andreas Färber
        current_cpu->stopped = true;
1154 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
1155 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
1156 b4a3d965 Jan Kiszka
    }
1157 296af7c9 Blue Swirl
}
1158 296af7c9 Blue Swirl
1159 56983463 Kevin Wolf
int vm_stop(RunState state)
1160 296af7c9 Blue Swirl
{
1161 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1162 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
1163 296af7c9 Blue Swirl
        /*
1164 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
1165 296af7c9 Blue Swirl
         * vm_stop() has been requested.
1166 296af7c9 Blue Swirl
         */
1167 b4a3d965 Jan Kiszka
        cpu_stop_current();
1168 56983463 Kevin Wolf
        return 0;
1169 296af7c9 Blue Swirl
    }
1170 56983463 Kevin Wolf
1171 56983463 Kevin Wolf
    return do_vm_stop(state);
1172 296af7c9 Blue Swirl
}
1173 296af7c9 Blue Swirl
1174 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
1175 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
1176 56983463 Kevin Wolf
int vm_stop_force_state(RunState state)
1177 8a9236f1 Luiz Capitulino
{
1178 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
1179 56983463 Kevin Wolf
        return vm_stop(state);
1180 8a9236f1 Luiz Capitulino
    } else {
1181 8a9236f1 Luiz Capitulino
        runstate_set(state);
1182 594a45ce Kevin Wolf
        /* Make sure to return an error if the flush in a previous vm_stop()
1183 594a45ce Kevin Wolf
         * failed. */
1184 594a45ce Kevin Wolf
        return bdrv_flush_all();
1185 8a9236f1 Luiz Capitulino
    }
1186 8a9236f1 Luiz Capitulino
}
1187 8a9236f1 Luiz Capitulino
1188 9349b4f9 Andreas Färber
static int tcg_cpu_exec(CPUArchState *env)
1189 296af7c9 Blue Swirl
{
1190 296af7c9 Blue Swirl
    int ret;
1191 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1192 296af7c9 Blue Swirl
    int64_t ti;
1193 296af7c9 Blue Swirl
#endif
1194 296af7c9 Blue Swirl
1195 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1196 296af7c9 Blue Swirl
    ti = profile_getclock();
1197 296af7c9 Blue Swirl
#endif
1198 296af7c9 Blue Swirl
    if (use_icount) {
1199 296af7c9 Blue Swirl
        int64_t count;
1200 ac70aafc Alex Bligh
        int64_t deadline;
1201 296af7c9 Blue Swirl
        int decr;
1202 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1203 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1204 296af7c9 Blue Swirl
        env->icount_extra = 0;
1205 40daca54 Alex Bligh
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1206 ac70aafc Alex Bligh
1207 ac70aafc Alex Bligh
        /* Maintain prior (possibly buggy) behaviour where if no deadline
1208 40daca54 Alex Bligh
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1209 ac70aafc Alex Bligh
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1210 ac70aafc Alex Bligh
         * nanoseconds.
1211 ac70aafc Alex Bligh
         */
1212 ac70aafc Alex Bligh
        if ((deadline < 0) || (deadline > INT32_MAX)) {
1213 ac70aafc Alex Bligh
            deadline = INT32_MAX;
1214 ac70aafc Alex Bligh
        }
1215 ac70aafc Alex Bligh
1216 ac70aafc Alex Bligh
        count = qemu_icount_round(deadline);
1217 296af7c9 Blue Swirl
        qemu_icount += count;
1218 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1219 296af7c9 Blue Swirl
        count -= decr;
1220 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1221 296af7c9 Blue Swirl
        env->icount_extra = count;
1222 296af7c9 Blue Swirl
    }
1223 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1224 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1225 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1226 296af7c9 Blue Swirl
#endif
1227 296af7c9 Blue Swirl
    if (use_icount) {
1228 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1229 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1230 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1231 296af7c9 Blue Swirl
                        + env->icount_extra);
1232 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1233 296af7c9 Blue Swirl
        env->icount_extra = 0;
1234 296af7c9 Blue Swirl
    }
1235 296af7c9 Blue Swirl
    return ret;
1236 296af7c9 Blue Swirl
}
1237 296af7c9 Blue Swirl
1238 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1239 296af7c9 Blue Swirl
{
1240 9a36085b Jan Kiszka
    int r;
1241 9a36085b Jan Kiszka
1242 40daca54 Alex Bligh
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
1243 40daca54 Alex Bligh
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1244 ab33fcda Paolo Bonzini
1245 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1246 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1247 0ab07c62 Jan Kiszka
    }
1248 bdc44640 Andreas Färber
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1249 182735ef Andreas Färber
        CPUState *cpu = next_cpu;
1250 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1251 296af7c9 Blue Swirl
1252 40daca54 Alex Bligh
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1253 ed2803da Andreas Färber
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1254 296af7c9 Blue Swirl
1255 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
1256 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1257 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1258 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
1259 3c638d06 Jan Kiszka
                break;
1260 3c638d06 Jan Kiszka
            }
1261 f324e766 Andreas Färber
        } else if (cpu->stop || cpu->stopped) {
1262 296af7c9 Blue Swirl
            break;
1263 296af7c9 Blue Swirl
        }
1264 296af7c9 Blue Swirl
    }
1265 c629a4bc Jan Kiszka
    exit_request = 0;
1266 296af7c9 Blue Swirl
}
1267 296af7c9 Blue Swirl
1268 296af7c9 Blue Swirl
void set_numa_modes(void)
1269 296af7c9 Blue Swirl
{
1270 1b1ed8dc Andreas Färber
    CPUState *cpu;
1271 296af7c9 Blue Swirl
    int i;
1272 296af7c9 Blue Swirl
1273 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1274 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1275 55e5c285 Andreas Färber
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1276 1b1ed8dc Andreas Färber
                cpu->numa_node = i;
1277 296af7c9 Blue Swirl
            }
1278 296af7c9 Blue Swirl
        }
1279 296af7c9 Blue Swirl
    }
1280 296af7c9 Blue Swirl
}
1281 296af7c9 Blue Swirl
1282 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1283 262353cb Blue Swirl
{
1284 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1285 e916cbf8 Peter Maydell
#if defined(cpu_list)
1286 e916cbf8 Peter Maydell
    cpu_list(f, cpu_fprintf);
1287 262353cb Blue Swirl
#endif
1288 262353cb Blue Swirl
}
1289 de0b36b6 Luiz Capitulino
1290 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1291 de0b36b6 Luiz Capitulino
{
1292 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1293 182735ef Andreas Färber
    CPUState *cpu;
1294 de0b36b6 Luiz Capitulino
1295 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1296 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1297 182735ef Andreas Färber
#if defined(TARGET_I386)
1298 182735ef Andreas Färber
        X86CPU *x86_cpu = X86_CPU(cpu);
1299 182735ef Andreas Färber
        CPUX86State *env = &x86_cpu->env;
1300 182735ef Andreas Färber
#elif defined(TARGET_PPC)
1301 182735ef Andreas Färber
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1302 182735ef Andreas Färber
        CPUPPCState *env = &ppc_cpu->env;
1303 182735ef Andreas Färber
#elif defined(TARGET_SPARC)
1304 182735ef Andreas Färber
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1305 182735ef Andreas Färber
        CPUSPARCState *env = &sparc_cpu->env;
1306 182735ef Andreas Färber
#elif defined(TARGET_MIPS)
1307 182735ef Andreas Färber
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1308 182735ef Andreas Färber
        CPUMIPSState *env = &mips_cpu->env;
1309 182735ef Andreas Färber
#endif
1310 de0b36b6 Luiz Capitulino
1311 cb446eca Andreas Färber
        cpu_synchronize_state(cpu);
1312 de0b36b6 Luiz Capitulino
1313 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1314 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1315 55e5c285 Andreas Färber
        info->value->CPU = cpu->cpu_index;
1316 182735ef Andreas Färber
        info->value->current = (cpu == first_cpu);
1317 259186a7 Andreas Färber
        info->value->halted = cpu->halted;
1318 9f09e18a Andreas Färber
        info->value->thread_id = cpu->thread_id;
1319 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1320 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1321 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1322 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1323 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1324 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1325 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1326 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1327 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1328 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1329 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1330 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1331 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1332 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1333 de0b36b6 Luiz Capitulino
#endif
1334 de0b36b6 Luiz Capitulino
1335 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1336 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1337 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1338 de0b36b6 Luiz Capitulino
        } else {
1339 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1340 de0b36b6 Luiz Capitulino
            cur_item = info;
1341 de0b36b6 Luiz Capitulino
        }
1342 de0b36b6 Luiz Capitulino
    }
1343 de0b36b6 Luiz Capitulino
1344 de0b36b6 Luiz Capitulino
    return head;
1345 de0b36b6 Luiz Capitulino
}
1346 0cfd6a9a Luiz Capitulino
1347 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1348 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1349 0cfd6a9a Luiz Capitulino
{
1350 0cfd6a9a Luiz Capitulino
    FILE *f;
1351 0cfd6a9a Luiz Capitulino
    uint32_t l;
1352 55e5c285 Andreas Färber
    CPUState *cpu;
1353 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1354 0cfd6a9a Luiz Capitulino
1355 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1356 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1357 0cfd6a9a Luiz Capitulino
    }
1358 0cfd6a9a Luiz Capitulino
1359 151d1322 Andreas Färber
    cpu = qemu_get_cpu(cpu_index);
1360 151d1322 Andreas Färber
    if (cpu == NULL) {
1361 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1362 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1363 0cfd6a9a Luiz Capitulino
        return;
1364 0cfd6a9a Luiz Capitulino
    }
1365 0cfd6a9a Luiz Capitulino
1366 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1367 0cfd6a9a Luiz Capitulino
    if (!f) {
1368 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1369 0cfd6a9a Luiz Capitulino
        return;
1370 0cfd6a9a Luiz Capitulino
    }
1371 0cfd6a9a Luiz Capitulino
1372 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1373 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1374 0cfd6a9a Luiz Capitulino
        if (l > size)
1375 0cfd6a9a Luiz Capitulino
            l = size;
1376 f17ec444 Andreas Färber
        cpu_memory_rw_debug(cpu, addr, buf, l, 0);
1377 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1378 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1379 0cfd6a9a Luiz Capitulino
            goto exit;
1380 0cfd6a9a Luiz Capitulino
        }
1381 0cfd6a9a Luiz Capitulino
        addr += l;
1382 0cfd6a9a Luiz Capitulino
        size -= l;
1383 0cfd6a9a Luiz Capitulino
    }
1384 0cfd6a9a Luiz Capitulino
1385 0cfd6a9a Luiz Capitulino
exit:
1386 0cfd6a9a Luiz Capitulino
    fclose(f);
1387 0cfd6a9a Luiz Capitulino
}
1388 6d3962bf Luiz Capitulino
1389 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1390 6d3962bf Luiz Capitulino
                  Error **errp)
1391 6d3962bf Luiz Capitulino
{
1392 6d3962bf Luiz Capitulino
    FILE *f;
1393 6d3962bf Luiz Capitulino
    uint32_t l;
1394 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1395 6d3962bf Luiz Capitulino
1396 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1397 6d3962bf Luiz Capitulino
    if (!f) {
1398 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1399 6d3962bf Luiz Capitulino
        return;
1400 6d3962bf Luiz Capitulino
    }
1401 6d3962bf Luiz Capitulino
1402 6d3962bf Luiz Capitulino
    while (size != 0) {
1403 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1404 6d3962bf Luiz Capitulino
        if (l > size)
1405 6d3962bf Luiz Capitulino
            l = size;
1406 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1407 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1408 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1409 6d3962bf Luiz Capitulino
            goto exit;
1410 6d3962bf Luiz Capitulino
        }
1411 6d3962bf Luiz Capitulino
        addr += l;
1412 6d3962bf Luiz Capitulino
        size -= l;
1413 6d3962bf Luiz Capitulino
    }
1414 6d3962bf Luiz Capitulino
1415 6d3962bf Luiz Capitulino
exit:
1416 6d3962bf Luiz Capitulino
    fclose(f);
1417 6d3962bf Luiz Capitulino
}
1418 ab49ab5c Luiz Capitulino
1419 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1420 ab49ab5c Luiz Capitulino
{
1421 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1422 182735ef Andreas Färber
    CPUState *cs;
1423 182735ef Andreas Färber
1424 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1425 182735ef Andreas Färber
        X86CPU *cpu = X86_CPU(cs);
1426 182735ef Andreas Färber
        CPUX86State *env = &cpu->env;
1427 ab49ab5c Luiz Capitulino
1428 02c09195 Jan Kiszka
        if (!env->apic_state) {
1429 182735ef Andreas Färber
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1430 02c09195 Jan Kiszka
        } else {
1431 02c09195 Jan Kiszka
            apic_deliver_nmi(env->apic_state);
1432 02c09195 Jan Kiszka
        }
1433 ab49ab5c Luiz Capitulino
    }
1434 7f7f9752 Eugene (jno) Dvurechenski
#elif defined(TARGET_S390X)
1435 7f7f9752 Eugene (jno) Dvurechenski
    CPUState *cs;
1436 7f7f9752 Eugene (jno) Dvurechenski
    S390CPU *cpu;
1437 7f7f9752 Eugene (jno) Dvurechenski
1438 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1439 7f7f9752 Eugene (jno) Dvurechenski
        cpu = S390_CPU(cs);
1440 7f7f9752 Eugene (jno) Dvurechenski
        if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1441 7f7f9752 Eugene (jno) Dvurechenski
            if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1442 7f7f9752 Eugene (jno) Dvurechenski
                error_set(errp, QERR_UNSUPPORTED);
1443 7f7f9752 Eugene (jno) Dvurechenski
                return;
1444 7f7f9752 Eugene (jno) Dvurechenski
            }
1445 7f7f9752 Eugene (jno) Dvurechenski
            break;
1446 7f7f9752 Eugene (jno) Dvurechenski
        }
1447 7f7f9752 Eugene (jno) Dvurechenski
    }
1448 ab49ab5c Luiz Capitulino
#else
1449 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1450 ab49ab5c Luiz Capitulino
#endif
1451 ab49ab5c Luiz Capitulino
}