Statistics
| Branch: | Revision:

root / cpus.c @ feature-archipelago

History | View | Annotate | Download (37 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
29 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
30 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/qtest.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
39 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
40 cb365646 Liu Ping Fan
#include "qemu/seqlock.h"
41 0ff0fc19 Jan Kiszka
42 0ff0fc19 Jan Kiszka
#ifndef _WIN32
43 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
44 0ff0fc19 Jan Kiszka
#endif
45 296af7c9 Blue Swirl
46 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
47 6d9cb73c Jan Kiszka
48 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
49 6d9cb73c Jan Kiszka
50 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
51 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
52 c0532a76 Marcelo Tosatti
#endif
53 c0532a76 Marcelo Tosatti
54 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
55 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
56 6d9cb73c Jan Kiszka
#endif
57 6d9cb73c Jan Kiszka
58 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
59 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
60 6d9cb73c Jan Kiszka
#endif
61 6d9cb73c Jan Kiszka
62 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
63 6d9cb73c Jan Kiszka
64 182735ef Andreas Färber
static CPUState *next_cpu;
65 296af7c9 Blue Swirl
66 321bc0b2 Tiejun Chen
bool cpu_is_stopped(CPUState *cpu)
67 321bc0b2 Tiejun Chen
{
68 321bc0b2 Tiejun Chen
    return cpu->stopped || !runstate_is_running();
69 321bc0b2 Tiejun Chen
}
70 321bc0b2 Tiejun Chen
71 a98ae1d8 Andreas Färber
static bool cpu_thread_is_idle(CPUState *cpu)
72 ac873f1e Peter Maydell
{
73 c64ca814 Andreas Färber
    if (cpu->stop || cpu->queued_work_first) {
74 ac873f1e Peter Maydell
        return false;
75 ac873f1e Peter Maydell
    }
76 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
77 ac873f1e Peter Maydell
        return true;
78 ac873f1e Peter Maydell
    }
79 259186a7 Andreas Färber
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
80 215e79c0 Alexander Graf
        kvm_halt_in_kernel()) {
81 ac873f1e Peter Maydell
        return false;
82 ac873f1e Peter Maydell
    }
83 ac873f1e Peter Maydell
    return true;
84 ac873f1e Peter Maydell
}
85 ac873f1e Peter Maydell
86 ac873f1e Peter Maydell
static bool all_cpu_threads_idle(void)
87 ac873f1e Peter Maydell
{
88 182735ef Andreas Färber
    CPUState *cpu;
89 ac873f1e Peter Maydell
90 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
91 182735ef Andreas Färber
        if (!cpu_thread_is_idle(cpu)) {
92 ac873f1e Peter Maydell
            return false;
93 ac873f1e Peter Maydell
        }
94 ac873f1e Peter Maydell
    }
95 ac873f1e Peter Maydell
    return true;
96 ac873f1e Peter Maydell
}
97 ac873f1e Peter Maydell
98 296af7c9 Blue Swirl
/***********************************************************/
99 946fb27c Paolo Bonzini
/* guest cycle counter */
100 946fb27c Paolo Bonzini
101 a3270e19 Paolo Bonzini
/* Protected by TimersState seqlock */
102 a3270e19 Paolo Bonzini
103 a3270e19 Paolo Bonzini
/* Compensate for varying guest execution speed.  */
104 a3270e19 Paolo Bonzini
static int64_t qemu_icount_bias;
105 a3270e19 Paolo Bonzini
static int64_t vm_clock_warp_start;
106 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
107 946fb27c Paolo Bonzini
static int icount_time_shift;
108 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
109 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
110 a3270e19 Paolo Bonzini
111 a3270e19 Paolo Bonzini
/* Only written by TCG thread */
112 a3270e19 Paolo Bonzini
static int64_t qemu_icount;
113 a3270e19 Paolo Bonzini
114 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
115 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
116 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
117 946fb27c Paolo Bonzini
118 946fb27c Paolo Bonzini
typedef struct TimersState {
119 cb365646 Liu Ping Fan
    /* Protected by BQL.  */
120 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
121 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
122 cb365646 Liu Ping Fan
123 cb365646 Liu Ping Fan
    /* cpu_clock_offset can be read out of BQL, so protect it with
124 cb365646 Liu Ping Fan
     * this lock.
125 cb365646 Liu Ping Fan
     */
126 cb365646 Liu Ping Fan
    QemuSeqLock vm_clock_seqlock;
127 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
128 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
129 946fb27c Paolo Bonzini
    int64_t dummy;
130 946fb27c Paolo Bonzini
} TimersState;
131 946fb27c Paolo Bonzini
132 d9cd4007 Liu Ping Fan
static TimersState timers_state;
133 946fb27c Paolo Bonzini
134 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
135 17a15f1b Paolo Bonzini
static int64_t cpu_get_icount_locked(void)
136 946fb27c Paolo Bonzini
{
137 946fb27c Paolo Bonzini
    int64_t icount;
138 4917cf44 Andreas Färber
    CPUState *cpu = current_cpu;
139 946fb27c Paolo Bonzini
140 946fb27c Paolo Bonzini
    icount = qemu_icount;
141 4917cf44 Andreas Färber
    if (cpu) {
142 4917cf44 Andreas Färber
        CPUArchState *env = cpu->env_ptr;
143 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
144 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
145 946fb27c Paolo Bonzini
        }
146 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
147 946fb27c Paolo Bonzini
    }
148 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
149 946fb27c Paolo Bonzini
}
150 946fb27c Paolo Bonzini
151 17a15f1b Paolo Bonzini
int64_t cpu_get_icount(void)
152 17a15f1b Paolo Bonzini
{
153 17a15f1b Paolo Bonzini
    int64_t icount;
154 17a15f1b Paolo Bonzini
    unsigned start;
155 17a15f1b Paolo Bonzini
156 17a15f1b Paolo Bonzini
    do {
157 17a15f1b Paolo Bonzini
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
158 17a15f1b Paolo Bonzini
        icount = cpu_get_icount_locked();
159 17a15f1b Paolo Bonzini
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
160 17a15f1b Paolo Bonzini
161 17a15f1b Paolo Bonzini
    return icount;
162 17a15f1b Paolo Bonzini
}
163 17a15f1b Paolo Bonzini
164 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
165 cb365646 Liu Ping Fan
/* Caller must hold the BQL */
166 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
167 946fb27c Paolo Bonzini
{
168 5f3e3101 Paolo Bonzini
    int64_t ticks;
169 5f3e3101 Paolo Bonzini
170 946fb27c Paolo Bonzini
    if (use_icount) {
171 946fb27c Paolo Bonzini
        return cpu_get_icount();
172 946fb27c Paolo Bonzini
    }
173 5f3e3101 Paolo Bonzini
174 5f3e3101 Paolo Bonzini
    ticks = timers_state.cpu_ticks_offset;
175 5f3e3101 Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
176 5f3e3101 Paolo Bonzini
        ticks += cpu_get_real_ticks();
177 5f3e3101 Paolo Bonzini
    }
178 5f3e3101 Paolo Bonzini
179 5f3e3101 Paolo Bonzini
    if (timers_state.cpu_ticks_prev > ticks) {
180 5f3e3101 Paolo Bonzini
        /* Note: non increasing ticks may happen if the host uses
181 5f3e3101 Paolo Bonzini
           software suspend */
182 5f3e3101 Paolo Bonzini
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
183 5f3e3101 Paolo Bonzini
        ticks = timers_state.cpu_ticks_prev;
184 946fb27c Paolo Bonzini
    }
185 5f3e3101 Paolo Bonzini
186 5f3e3101 Paolo Bonzini
    timers_state.cpu_ticks_prev = ticks;
187 5f3e3101 Paolo Bonzini
    return ticks;
188 946fb27c Paolo Bonzini
}
189 946fb27c Paolo Bonzini
190 cb365646 Liu Ping Fan
static int64_t cpu_get_clock_locked(void)
191 946fb27c Paolo Bonzini
{
192 5f3e3101 Paolo Bonzini
    int64_t ticks;
193 cb365646 Liu Ping Fan
194 5f3e3101 Paolo Bonzini
    ticks = timers_state.cpu_clock_offset;
195 5f3e3101 Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
196 5f3e3101 Paolo Bonzini
        ticks += get_clock();
197 946fb27c Paolo Bonzini
    }
198 cb365646 Liu Ping Fan
199 5f3e3101 Paolo Bonzini
    return ticks;
200 cb365646 Liu Ping Fan
}
201 cb365646 Liu Ping Fan
202 cb365646 Liu Ping Fan
/* return the host CPU monotonic timer and handle stop/restart */
203 cb365646 Liu Ping Fan
int64_t cpu_get_clock(void)
204 cb365646 Liu Ping Fan
{
205 cb365646 Liu Ping Fan
    int64_t ti;
206 cb365646 Liu Ping Fan
    unsigned start;
207 cb365646 Liu Ping Fan
208 cb365646 Liu Ping Fan
    do {
209 cb365646 Liu Ping Fan
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
210 cb365646 Liu Ping Fan
        ti = cpu_get_clock_locked();
211 cb365646 Liu Ping Fan
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
212 cb365646 Liu Ping Fan
213 cb365646 Liu Ping Fan
    return ti;
214 946fb27c Paolo Bonzini
}
215 946fb27c Paolo Bonzini
216 cb365646 Liu Ping Fan
/* enable cpu_get_ticks()
217 cb365646 Liu Ping Fan
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
218 cb365646 Liu Ping Fan
 */
219 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
220 946fb27c Paolo Bonzini
{
221 cb365646 Liu Ping Fan
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
222 cb365646 Liu Ping Fan
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
223 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
224 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
225 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
226 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
227 946fb27c Paolo Bonzini
    }
228 cb365646 Liu Ping Fan
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
229 946fb27c Paolo Bonzini
}
230 946fb27c Paolo Bonzini
231 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
232 cb365646 Liu Ping Fan
 * cpu_get_ticks() after that.
233 cb365646 Liu Ping Fan
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
234 cb365646 Liu Ping Fan
 */
235 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
236 946fb27c Paolo Bonzini
{
237 cb365646 Liu Ping Fan
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
238 cb365646 Liu Ping Fan
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
239 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
240 5f3e3101 Paolo Bonzini
        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
241 cb365646 Liu Ping Fan
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
242 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
243 946fb27c Paolo Bonzini
    }
244 cb365646 Liu Ping Fan
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
245 946fb27c Paolo Bonzini
}
246 946fb27c Paolo Bonzini
247 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
248 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
249 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
250 946fb27c Paolo Bonzini
   the IO wait loop.  */
251 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
252 946fb27c Paolo Bonzini
253 946fb27c Paolo Bonzini
static void icount_adjust(void)
254 946fb27c Paolo Bonzini
{
255 946fb27c Paolo Bonzini
    int64_t cur_time;
256 946fb27c Paolo Bonzini
    int64_t cur_icount;
257 946fb27c Paolo Bonzini
    int64_t delta;
258 a3270e19 Paolo Bonzini
259 a3270e19 Paolo Bonzini
    /* Protected by TimersState mutex.  */
260 946fb27c Paolo Bonzini
    static int64_t last_delta;
261 468cc7cf Paolo Bonzini
262 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
263 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
264 946fb27c Paolo Bonzini
        return;
265 946fb27c Paolo Bonzini
    }
266 468cc7cf Paolo Bonzini
267 17a15f1b Paolo Bonzini
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
268 17a15f1b Paolo Bonzini
    cur_time = cpu_get_clock_locked();
269 17a15f1b Paolo Bonzini
    cur_icount = cpu_get_icount_locked();
270 468cc7cf Paolo Bonzini
271 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
272 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
273 946fb27c Paolo Bonzini
    if (delta > 0
274 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
275 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
276 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
277 946fb27c Paolo Bonzini
        icount_time_shift--;
278 946fb27c Paolo Bonzini
    }
279 946fb27c Paolo Bonzini
    if (delta < 0
280 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
281 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
282 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
283 946fb27c Paolo Bonzini
        icount_time_shift++;
284 946fb27c Paolo Bonzini
    }
285 946fb27c Paolo Bonzini
    last_delta = delta;
286 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
287 17a15f1b Paolo Bonzini
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
288 946fb27c Paolo Bonzini
}
289 946fb27c Paolo Bonzini
290 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
291 946fb27c Paolo Bonzini
{
292 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
293 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
294 946fb27c Paolo Bonzini
    icount_adjust();
295 946fb27c Paolo Bonzini
}
296 946fb27c Paolo Bonzini
297 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
298 946fb27c Paolo Bonzini
{
299 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
300 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
301 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
302 946fb27c Paolo Bonzini
    icount_adjust();
303 946fb27c Paolo Bonzini
}
304 946fb27c Paolo Bonzini
305 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
306 946fb27c Paolo Bonzini
{
307 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
308 946fb27c Paolo Bonzini
}
309 946fb27c Paolo Bonzini
310 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
311 946fb27c Paolo Bonzini
{
312 17a15f1b Paolo Bonzini
    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
313 17a15f1b Paolo Bonzini
     * changes from -1 to another value, so the race here is okay.
314 17a15f1b Paolo Bonzini
     */
315 17a15f1b Paolo Bonzini
    if (atomic_read(&vm_clock_warp_start) == -1) {
316 946fb27c Paolo Bonzini
        return;
317 946fb27c Paolo Bonzini
    }
318 946fb27c Paolo Bonzini
319 17a15f1b Paolo Bonzini
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
320 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
321 40daca54 Alex Bligh
        int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
322 8ed961d9 Paolo Bonzini
        int64_t warp_delta;
323 8ed961d9 Paolo Bonzini
324 8ed961d9 Paolo Bonzini
        warp_delta = clock - vm_clock_warp_start;
325 8ed961d9 Paolo Bonzini
        if (use_icount == 2) {
326 946fb27c Paolo Bonzini
            /*
327 40daca54 Alex Bligh
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
328 946fb27c Paolo Bonzini
             * far ahead of real time.
329 946fb27c Paolo Bonzini
             */
330 17a15f1b Paolo Bonzini
            int64_t cur_time = cpu_get_clock_locked();
331 17a15f1b Paolo Bonzini
            int64_t cur_icount = cpu_get_icount_locked();
332 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
333 8ed961d9 Paolo Bonzini
            warp_delta = MIN(warp_delta, delta);
334 946fb27c Paolo Bonzini
        }
335 8ed961d9 Paolo Bonzini
        qemu_icount_bias += warp_delta;
336 946fb27c Paolo Bonzini
    }
337 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
338 17a15f1b Paolo Bonzini
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
339 8ed961d9 Paolo Bonzini
340 8ed961d9 Paolo Bonzini
    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
341 8ed961d9 Paolo Bonzini
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
342 8ed961d9 Paolo Bonzini
    }
343 946fb27c Paolo Bonzini
}
344 946fb27c Paolo Bonzini
345 8156be56 Paolo Bonzini
void qtest_clock_warp(int64_t dest)
346 8156be56 Paolo Bonzini
{
347 40daca54 Alex Bligh
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
348 8156be56 Paolo Bonzini
    assert(qtest_enabled());
349 8156be56 Paolo Bonzini
    while (clock < dest) {
350 40daca54 Alex Bligh
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
351 8156be56 Paolo Bonzini
        int64_t warp = MIN(dest - clock, deadline);
352 17a15f1b Paolo Bonzini
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
353 8156be56 Paolo Bonzini
        qemu_icount_bias += warp;
354 17a15f1b Paolo Bonzini
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
355 17a15f1b Paolo Bonzini
356 40daca54 Alex Bligh
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
357 40daca54 Alex Bligh
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
358 8156be56 Paolo Bonzini
    }
359 40daca54 Alex Bligh
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
360 8156be56 Paolo Bonzini
}
361 8156be56 Paolo Bonzini
362 40daca54 Alex Bligh
void qemu_clock_warp(QEMUClockType type)
363 946fb27c Paolo Bonzini
{
364 ce78d18c Paolo Bonzini
    int64_t clock;
365 946fb27c Paolo Bonzini
    int64_t deadline;
366 946fb27c Paolo Bonzini
367 946fb27c Paolo Bonzini
    /*
368 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
369 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
370 946fb27c Paolo Bonzini
     * need for if statements all over the place.
371 946fb27c Paolo Bonzini
     */
372 40daca54 Alex Bligh
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
373 946fb27c Paolo Bonzini
        return;
374 946fb27c Paolo Bonzini
    }
375 946fb27c Paolo Bonzini
376 946fb27c Paolo Bonzini
    /*
377 40daca54 Alex Bligh
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
378 40daca54 Alex Bligh
     * This ensures that the deadline for the timer is computed correctly below.
379 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
380 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
381 40daca54 Alex Bligh
     * the earliest QEMU_CLOCK_VIRTUAL timer.
382 946fb27c Paolo Bonzini
     */
383 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
384 ce78d18c Paolo Bonzini
    timer_del(icount_warp_timer);
385 ce78d18c Paolo Bonzini
    if (!all_cpu_threads_idle()) {
386 946fb27c Paolo Bonzini
        return;
387 946fb27c Paolo Bonzini
    }
388 946fb27c Paolo Bonzini
389 8156be56 Paolo Bonzini
    if (qtest_enabled()) {
390 8156be56 Paolo Bonzini
        /* When testing, qtest commands advance icount.  */
391 8156be56 Paolo Bonzini
        return;
392 8156be56 Paolo Bonzini
    }
393 8156be56 Paolo Bonzini
394 ac70aafc Alex Bligh
    /* We want to use the earliest deadline from ALL vm_clocks */
395 ce78d18c Paolo Bonzini
    clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
396 40daca54 Alex Bligh
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
397 ce78d18c Paolo Bonzini
    if (deadline < 0) {
398 ce78d18c Paolo Bonzini
        return;
399 ac70aafc Alex Bligh
    }
400 ac70aafc Alex Bligh
401 946fb27c Paolo Bonzini
    if (deadline > 0) {
402 946fb27c Paolo Bonzini
        /*
403 40daca54 Alex Bligh
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
404 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
405 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
406 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
407 40daca54 Alex Bligh
         * QEMU_CLOCK_VIRTUAL.
408 946fb27c Paolo Bonzini
         *
409 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
410 40daca54 Alex Bligh
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
411 40daca54 Alex Bligh
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
412 40daca54 Alex Bligh
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
413 40daca54 Alex Bligh
         * after some e"real" time, (related to the time left until the next
414 40daca54 Alex Bligh
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
415 40daca54 Alex Bligh
         * This avoids that the warps are visible externally; for example,
416 40daca54 Alex Bligh
         * you will not be sending network packets continuously instead of
417 40daca54 Alex Bligh
         * every 100ms.
418 946fb27c Paolo Bonzini
         */
419 17a15f1b Paolo Bonzini
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
420 ce78d18c Paolo Bonzini
        if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
421 ce78d18c Paolo Bonzini
            vm_clock_warp_start = clock;
422 ce78d18c Paolo Bonzini
        }
423 17a15f1b Paolo Bonzini
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
424 ce78d18c Paolo Bonzini
        timer_mod_anticipate(icount_warp_timer, clock + deadline);
425 ac70aafc Alex Bligh
    } else if (deadline == 0) {
426 40daca54 Alex Bligh
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
427 946fb27c Paolo Bonzini
    }
428 946fb27c Paolo Bonzini
}
429 946fb27c Paolo Bonzini
430 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
431 946fb27c Paolo Bonzini
    .name = "timer",
432 946fb27c Paolo Bonzini
    .version_id = 2,
433 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
434 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
435 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
436 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
437 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
438 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
439 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
440 946fb27c Paolo Bonzini
    }
441 946fb27c Paolo Bonzini
};
442 946fb27c Paolo Bonzini
443 946fb27c Paolo Bonzini
void configure_icount(const char *option)
444 946fb27c Paolo Bonzini
{
445 cb365646 Liu Ping Fan
    seqlock_init(&timers_state.vm_clock_seqlock, NULL);
446 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
447 946fb27c Paolo Bonzini
    if (!option) {
448 946fb27c Paolo Bonzini
        return;
449 946fb27c Paolo Bonzini
    }
450 946fb27c Paolo Bonzini
451 40daca54 Alex Bligh
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
452 40daca54 Alex Bligh
                                          icount_warp_rt, NULL);
453 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
454 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
455 946fb27c Paolo Bonzini
        use_icount = 1;
456 946fb27c Paolo Bonzini
        return;
457 946fb27c Paolo Bonzini
    }
458 946fb27c Paolo Bonzini
459 946fb27c Paolo Bonzini
    use_icount = 2;
460 946fb27c Paolo Bonzini
461 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
462 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
463 946fb27c Paolo Bonzini
    icount_time_shift = 3;
464 946fb27c Paolo Bonzini
465 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
466 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
467 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
468 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
469 946fb27c Paolo Bonzini
       than VM triggers.  */
470 40daca54 Alex Bligh
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
471 40daca54 Alex Bligh
                                        icount_adjust_rt, NULL);
472 40daca54 Alex Bligh
    timer_mod(icount_rt_timer,
473 40daca54 Alex Bligh
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
474 40daca54 Alex Bligh
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
475 40daca54 Alex Bligh
                                        icount_adjust_vm, NULL);
476 40daca54 Alex Bligh
    timer_mod(icount_vm_timer,
477 40daca54 Alex Bligh
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
478 40daca54 Alex Bligh
                   get_ticks_per_sec() / 10);
479 946fb27c Paolo Bonzini
}
480 946fb27c Paolo Bonzini
481 946fb27c Paolo Bonzini
/***********************************************************/
482 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
483 296af7c9 Blue Swirl
{
484 296af7c9 Blue Swirl
    va_list ap;
485 55e5c285 Andreas Färber
    CPUState *cpu;
486 296af7c9 Blue Swirl
487 296af7c9 Blue Swirl
    va_start(ap, fmt);
488 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
489 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
490 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
491 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
492 55e5c285 Andreas Färber
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
493 878096ee Andreas Färber
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
494 296af7c9 Blue Swirl
    }
495 296af7c9 Blue Swirl
    va_end(ap);
496 296af7c9 Blue Swirl
    abort();
497 296af7c9 Blue Swirl
}
498 296af7c9 Blue Swirl
499 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
500 296af7c9 Blue Swirl
{
501 182735ef Andreas Färber
    CPUState *cpu;
502 296af7c9 Blue Swirl
503 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
504 182735ef Andreas Färber
        cpu_synchronize_state(cpu);
505 296af7c9 Blue Swirl
    }
506 296af7c9 Blue Swirl
}
507 296af7c9 Blue Swirl
508 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
509 296af7c9 Blue Swirl
{
510 182735ef Andreas Färber
    CPUState *cpu;
511 296af7c9 Blue Swirl
512 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
513 182735ef Andreas Färber
        cpu_synchronize_post_reset(cpu);
514 296af7c9 Blue Swirl
    }
515 296af7c9 Blue Swirl
}
516 296af7c9 Blue Swirl
517 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
518 296af7c9 Blue Swirl
{
519 182735ef Andreas Färber
    CPUState *cpu;
520 296af7c9 Blue Swirl
521 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
522 182735ef Andreas Färber
        cpu_synchronize_post_init(cpu);
523 296af7c9 Blue Swirl
    }
524 296af7c9 Blue Swirl
}
525 296af7c9 Blue Swirl
526 56983463 Kevin Wolf
static int do_vm_stop(RunState state)
527 296af7c9 Blue Swirl
{
528 56983463 Kevin Wolf
    int ret = 0;
529 56983463 Kevin Wolf
530 1354869c Luiz Capitulino
    if (runstate_is_running()) {
531 296af7c9 Blue Swirl
        cpu_disable_ticks();
532 296af7c9 Blue Swirl
        pause_all_vcpus();
533 f5bbfba1 Luiz Capitulino
        runstate_set(state);
534 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
535 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
536 296af7c9 Blue Swirl
    }
537 56983463 Kevin Wolf
538 594a45ce Kevin Wolf
    bdrv_drain_all();
539 594a45ce Kevin Wolf
    ret = bdrv_flush_all();
540 594a45ce Kevin Wolf
541 56983463 Kevin Wolf
    return ret;
542 296af7c9 Blue Swirl
}
543 296af7c9 Blue Swirl
544 a1fcaa73 Andreas Färber
static bool cpu_can_run(CPUState *cpu)
545 296af7c9 Blue Swirl
{
546 4fdeee7c Andreas Färber
    if (cpu->stop) {
547 a1fcaa73 Andreas Färber
        return false;
548 0ab07c62 Jan Kiszka
    }
549 321bc0b2 Tiejun Chen
    if (cpu_is_stopped(cpu)) {
550 a1fcaa73 Andreas Färber
        return false;
551 0ab07c62 Jan Kiszka
    }
552 a1fcaa73 Andreas Färber
    return true;
553 296af7c9 Blue Swirl
}
554 296af7c9 Blue Swirl
555 91325046 Andreas Färber
static void cpu_handle_guest_debug(CPUState *cpu)
556 83f338f7 Jan Kiszka
{
557 64f6b346 Andreas Färber
    gdb_set_stop_cpu(cpu);
558 8cf71710 Jan Kiszka
    qemu_system_debug_request();
559 f324e766 Andreas Färber
    cpu->stopped = true;
560 3c638d06 Jan Kiszka
}
561 3c638d06 Jan Kiszka
562 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
563 714bd040 Paolo Bonzini
{
564 4917cf44 Andreas Färber
    if (current_cpu) {
565 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
566 714bd040 Paolo Bonzini
    }
567 714bd040 Paolo Bonzini
    exit_request = 1;
568 714bd040 Paolo Bonzini
}
569 714bd040 Paolo Bonzini
570 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
571 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
572 6d9cb73c Jan Kiszka
{
573 6d9cb73c Jan Kiszka
    sigset_t set;
574 6d9cb73c Jan Kiszka
    struct sigaction action;
575 6d9cb73c Jan Kiszka
576 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
577 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
578 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
579 6d9cb73c Jan Kiszka
        raise(SIGBUS);
580 6d9cb73c Jan Kiszka
        sigemptyset(&set);
581 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
582 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
583 6d9cb73c Jan Kiszka
    }
584 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
585 6d9cb73c Jan Kiszka
    abort();
586 6d9cb73c Jan Kiszka
}
587 6d9cb73c Jan Kiszka
588 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
589 6d9cb73c Jan Kiszka
                           void *ctx)
590 6d9cb73c Jan Kiszka
{
591 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
592 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
593 6d9cb73c Jan Kiszka
        sigbus_reraise();
594 6d9cb73c Jan Kiszka
    }
595 6d9cb73c Jan Kiszka
}
596 6d9cb73c Jan Kiszka
597 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
598 6d9cb73c Jan Kiszka
{
599 6d9cb73c Jan Kiszka
    struct sigaction action;
600 6d9cb73c Jan Kiszka
601 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
602 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
603 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
604 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
605 6d9cb73c Jan Kiszka
606 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
607 6d9cb73c Jan Kiszka
}
608 6d9cb73c Jan Kiszka
609 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
610 1ab3c6c0 Jan Kiszka
{
611 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
612 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
613 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
614 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
615 1ab3c6c0 Jan Kiszka
    int r;
616 1ab3c6c0 Jan Kiszka
617 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
618 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
619 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
620 1ab3c6c0 Jan Kiszka
621 1ab3c6c0 Jan Kiszka
    do {
622 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
623 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
624 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
625 1ab3c6c0 Jan Kiszka
            exit(1);
626 1ab3c6c0 Jan Kiszka
        }
627 1ab3c6c0 Jan Kiszka
628 1ab3c6c0 Jan Kiszka
        switch (r) {
629 1ab3c6c0 Jan Kiszka
        case SIGBUS:
630 290adf38 Andreas Färber
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
631 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
632 1ab3c6c0 Jan Kiszka
            }
633 1ab3c6c0 Jan Kiszka
            break;
634 1ab3c6c0 Jan Kiszka
        default:
635 1ab3c6c0 Jan Kiszka
            break;
636 1ab3c6c0 Jan Kiszka
        }
637 1ab3c6c0 Jan Kiszka
638 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
639 1ab3c6c0 Jan Kiszka
        if (r == -1) {
640 1ab3c6c0 Jan Kiszka
            perror("sigpending");
641 1ab3c6c0 Jan Kiszka
            exit(1);
642 1ab3c6c0 Jan Kiszka
        }
643 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
644 1ab3c6c0 Jan Kiszka
}
645 1ab3c6c0 Jan Kiszka
646 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
647 6d9cb73c Jan Kiszka
648 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
649 6d9cb73c Jan Kiszka
{
650 6d9cb73c Jan Kiszka
}
651 1ab3c6c0 Jan Kiszka
652 290adf38 Andreas Färber
static void qemu_kvm_eat_signals(CPUState *cpu)
653 1ab3c6c0 Jan Kiszka
{
654 1ab3c6c0 Jan Kiszka
}
655 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
656 6d9cb73c Jan Kiszka
657 296af7c9 Blue Swirl
#ifndef _WIN32
658 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
659 55f8d6ac Jan Kiszka
{
660 55f8d6ac Jan Kiszka
}
661 55f8d6ac Jan Kiszka
662 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
663 714bd040 Paolo Bonzini
{
664 714bd040 Paolo Bonzini
    int r;
665 714bd040 Paolo Bonzini
    sigset_t set;
666 714bd040 Paolo Bonzini
    struct sigaction sigact;
667 714bd040 Paolo Bonzini
668 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
669 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
670 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
671 714bd040 Paolo Bonzini
672 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
673 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
674 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
675 491d6e80 Andreas Färber
    r = kvm_set_signal_mask(cpu, &set);
676 714bd040 Paolo Bonzini
    if (r) {
677 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
678 714bd040 Paolo Bonzini
        exit(1);
679 714bd040 Paolo Bonzini
    }
680 714bd040 Paolo Bonzini
}
681 714bd040 Paolo Bonzini
682 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
683 714bd040 Paolo Bonzini
{
684 714bd040 Paolo Bonzini
    sigset_t set;
685 714bd040 Paolo Bonzini
    struct sigaction sigact;
686 714bd040 Paolo Bonzini
687 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
688 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
689 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
690 714bd040 Paolo Bonzini
691 714bd040 Paolo Bonzini
    sigemptyset(&set);
692 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
693 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
694 714bd040 Paolo Bonzini
}
695 714bd040 Paolo Bonzini
696 55f8d6ac Jan Kiszka
#else /* _WIN32 */
697 13618e05 Andreas Färber
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
698 ff48eb5f Jan Kiszka
{
699 714bd040 Paolo Bonzini
    abort();
700 714bd040 Paolo Bonzini
}
701 ff48eb5f Jan Kiszka
702 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
703 714bd040 Paolo Bonzini
{
704 ff48eb5f Jan Kiszka
}
705 714bd040 Paolo Bonzini
#endif /* _WIN32 */
706 ff48eb5f Jan Kiszka
707 b2532d88 Stefan Weil
static QemuMutex qemu_global_mutex;
708 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
709 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
710 296af7c9 Blue Swirl
711 296af7c9 Blue Swirl
static QemuThread io_thread;
712 296af7c9 Blue Swirl
713 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
714 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
715 296af7c9 Blue Swirl
716 296af7c9 Blue Swirl
/* cpu creation */
717 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
718 296af7c9 Blue Swirl
/* system init */
719 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
720 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
721 296af7c9 Blue Swirl
722 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
723 296af7c9 Blue Swirl
{
724 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
725 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
726 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
727 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
728 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
729 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
730 296af7c9 Blue Swirl
731 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
732 296af7c9 Blue Swirl
}
733 296af7c9 Blue Swirl
734 f100f0b3 Andreas Färber
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
735 e82bcec2 Marcelo Tosatti
{
736 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
737 e82bcec2 Marcelo Tosatti
738 60e82579 Andreas Färber
    if (qemu_cpu_is_self(cpu)) {
739 e82bcec2 Marcelo Tosatti
        func(data);
740 e82bcec2 Marcelo Tosatti
        return;
741 e82bcec2 Marcelo Tosatti
    }
742 e82bcec2 Marcelo Tosatti
743 e82bcec2 Marcelo Tosatti
    wi.func = func;
744 e82bcec2 Marcelo Tosatti
    wi.data = data;
745 3c02270d Chegu Vinod
    wi.free = false;
746 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
747 c64ca814 Andreas Färber
        cpu->queued_work_first = &wi;
748 0ab07c62 Jan Kiszka
    } else {
749 c64ca814 Andreas Färber
        cpu->queued_work_last->next = &wi;
750 0ab07c62 Jan Kiszka
    }
751 c64ca814 Andreas Färber
    cpu->queued_work_last = &wi;
752 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
753 e82bcec2 Marcelo Tosatti
    wi.done = false;
754 e82bcec2 Marcelo Tosatti
755 c08d7424 Andreas Färber
    qemu_cpu_kick(cpu);
756 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
757 4917cf44 Andreas Färber
        CPUState *self_cpu = current_cpu;
758 e82bcec2 Marcelo Tosatti
759 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
760 4917cf44 Andreas Färber
        current_cpu = self_cpu;
761 e82bcec2 Marcelo Tosatti
    }
762 e82bcec2 Marcelo Tosatti
}
763 e82bcec2 Marcelo Tosatti
764 3c02270d Chegu Vinod
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
765 3c02270d Chegu Vinod
{
766 3c02270d Chegu Vinod
    struct qemu_work_item *wi;
767 3c02270d Chegu Vinod
768 3c02270d Chegu Vinod
    if (qemu_cpu_is_self(cpu)) {
769 3c02270d Chegu Vinod
        func(data);
770 3c02270d Chegu Vinod
        return;
771 3c02270d Chegu Vinod
    }
772 3c02270d Chegu Vinod
773 3c02270d Chegu Vinod
    wi = g_malloc0(sizeof(struct qemu_work_item));
774 3c02270d Chegu Vinod
    wi->func = func;
775 3c02270d Chegu Vinod
    wi->data = data;
776 3c02270d Chegu Vinod
    wi->free = true;
777 3c02270d Chegu Vinod
    if (cpu->queued_work_first == NULL) {
778 3c02270d Chegu Vinod
        cpu->queued_work_first = wi;
779 3c02270d Chegu Vinod
    } else {
780 3c02270d Chegu Vinod
        cpu->queued_work_last->next = wi;
781 3c02270d Chegu Vinod
    }
782 3c02270d Chegu Vinod
    cpu->queued_work_last = wi;
783 3c02270d Chegu Vinod
    wi->next = NULL;
784 3c02270d Chegu Vinod
    wi->done = false;
785 3c02270d Chegu Vinod
786 3c02270d Chegu Vinod
    qemu_cpu_kick(cpu);
787 3c02270d Chegu Vinod
}
788 3c02270d Chegu Vinod
789 6d45b109 Andreas Färber
static void flush_queued_work(CPUState *cpu)
790 e82bcec2 Marcelo Tosatti
{
791 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
792 e82bcec2 Marcelo Tosatti
793 c64ca814 Andreas Färber
    if (cpu->queued_work_first == NULL) {
794 e82bcec2 Marcelo Tosatti
        return;
795 0ab07c62 Jan Kiszka
    }
796 e82bcec2 Marcelo Tosatti
797 c64ca814 Andreas Färber
    while ((wi = cpu->queued_work_first)) {
798 c64ca814 Andreas Färber
        cpu->queued_work_first = wi->next;
799 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
800 e82bcec2 Marcelo Tosatti
        wi->done = true;
801 3c02270d Chegu Vinod
        if (wi->free) {
802 3c02270d Chegu Vinod
            g_free(wi);
803 3c02270d Chegu Vinod
        }
804 e82bcec2 Marcelo Tosatti
    }
805 c64ca814 Andreas Färber
    cpu->queued_work_last = NULL;
806 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
807 e82bcec2 Marcelo Tosatti
}
808 e82bcec2 Marcelo Tosatti
809 509a0d78 Andreas Färber
static void qemu_wait_io_event_common(CPUState *cpu)
810 296af7c9 Blue Swirl
{
811 4fdeee7c Andreas Färber
    if (cpu->stop) {
812 4fdeee7c Andreas Färber
        cpu->stop = false;
813 f324e766 Andreas Färber
        cpu->stopped = true;
814 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
815 296af7c9 Blue Swirl
    }
816 6d45b109 Andreas Färber
    flush_queued_work(cpu);
817 216fc9a4 Andreas Färber
    cpu->thread_kicked = false;
818 296af7c9 Blue Swirl
}
819 296af7c9 Blue Swirl
820 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
821 296af7c9 Blue Swirl
{
822 182735ef Andreas Färber
    CPUState *cpu;
823 6cabe1f3 Jan Kiszka
824 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
825 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
826 ab33fcda Paolo Bonzini
          are idle.  */
827 40daca54 Alex Bligh
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
828 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
829 16400322 Jan Kiszka
    }
830 296af7c9 Blue Swirl
831 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
832 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
833 46daff13 Paolo Bonzini
    }
834 6cabe1f3 Jan Kiszka
835 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
836 182735ef Andreas Färber
        qemu_wait_io_event_common(cpu);
837 6cabe1f3 Jan Kiszka
    }
838 296af7c9 Blue Swirl
}
839 296af7c9 Blue Swirl
840 fd529e8f Andreas Färber
static void qemu_kvm_wait_io_event(CPUState *cpu)
841 296af7c9 Blue Swirl
{
842 a98ae1d8 Andreas Färber
    while (cpu_thread_is_idle(cpu)) {
843 f5c121b8 Andreas Färber
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
844 16400322 Jan Kiszka
    }
845 296af7c9 Blue Swirl
846 290adf38 Andreas Färber
    qemu_kvm_eat_signals(cpu);
847 509a0d78 Andreas Färber
    qemu_wait_io_event_common(cpu);
848 296af7c9 Blue Swirl
}
849 296af7c9 Blue Swirl
850 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
851 296af7c9 Blue Swirl
{
852 48a106bd Andreas Färber
    CPUState *cpu = arg;
853 84b4915d Jan Kiszka
    int r;
854 296af7c9 Blue Swirl
855 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
856 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
857 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
858 4917cf44 Andreas Färber
    current_cpu = cpu;
859 296af7c9 Blue Swirl
860 504134d2 Andreas Färber
    r = kvm_init_vcpu(cpu);
861 84b4915d Jan Kiszka
    if (r < 0) {
862 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
863 84b4915d Jan Kiszka
        exit(1);
864 84b4915d Jan Kiszka
    }
865 296af7c9 Blue Swirl
866 13618e05 Andreas Färber
    qemu_kvm_init_cpu_signals(cpu);
867 296af7c9 Blue Swirl
868 296af7c9 Blue Swirl
    /* signal CPU creation */
869 61a46217 Andreas Färber
    cpu->created = true;
870 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
871 296af7c9 Blue Swirl
872 296af7c9 Blue Swirl
    while (1) {
873 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
874 1458c363 Andreas Färber
            r = kvm_cpu_exec(cpu);
875 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
876 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
877 83f338f7 Jan Kiszka
            }
878 0ab07c62 Jan Kiszka
        }
879 fd529e8f Andreas Färber
        qemu_kvm_wait_io_event(cpu);
880 296af7c9 Blue Swirl
    }
881 296af7c9 Blue Swirl
882 296af7c9 Blue Swirl
    return NULL;
883 296af7c9 Blue Swirl
}
884 296af7c9 Blue Swirl
885 c7f0f3b1 Anthony Liguori
static void *qemu_dummy_cpu_thread_fn(void *arg)
886 c7f0f3b1 Anthony Liguori
{
887 c7f0f3b1 Anthony Liguori
#ifdef _WIN32
888 c7f0f3b1 Anthony Liguori
    fprintf(stderr, "qtest is not supported under Windows\n");
889 c7f0f3b1 Anthony Liguori
    exit(1);
890 c7f0f3b1 Anthony Liguori
#else
891 10a9021d Andreas Färber
    CPUState *cpu = arg;
892 c7f0f3b1 Anthony Liguori
    sigset_t waitset;
893 c7f0f3b1 Anthony Liguori
    int r;
894 c7f0f3b1 Anthony Liguori
895 c7f0f3b1 Anthony Liguori
    qemu_mutex_lock_iothread();
896 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
897 9f09e18a Andreas Färber
    cpu->thread_id = qemu_get_thread_id();
898 c7f0f3b1 Anthony Liguori
899 c7f0f3b1 Anthony Liguori
    sigemptyset(&waitset);
900 c7f0f3b1 Anthony Liguori
    sigaddset(&waitset, SIG_IPI);
901 c7f0f3b1 Anthony Liguori
902 c7f0f3b1 Anthony Liguori
    /* signal CPU creation */
903 61a46217 Andreas Färber
    cpu->created = true;
904 c7f0f3b1 Anthony Liguori
    qemu_cond_signal(&qemu_cpu_cond);
905 c7f0f3b1 Anthony Liguori
906 4917cf44 Andreas Färber
    current_cpu = cpu;
907 c7f0f3b1 Anthony Liguori
    while (1) {
908 4917cf44 Andreas Färber
        current_cpu = NULL;
909 c7f0f3b1 Anthony Liguori
        qemu_mutex_unlock_iothread();
910 c7f0f3b1 Anthony Liguori
        do {
911 c7f0f3b1 Anthony Liguori
            int sig;
912 c7f0f3b1 Anthony Liguori
            r = sigwait(&waitset, &sig);
913 c7f0f3b1 Anthony Liguori
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
914 c7f0f3b1 Anthony Liguori
        if (r == -1) {
915 c7f0f3b1 Anthony Liguori
            perror("sigwait");
916 c7f0f3b1 Anthony Liguori
            exit(1);
917 c7f0f3b1 Anthony Liguori
        }
918 c7f0f3b1 Anthony Liguori
        qemu_mutex_lock_iothread();
919 4917cf44 Andreas Färber
        current_cpu = cpu;
920 509a0d78 Andreas Färber
        qemu_wait_io_event_common(cpu);
921 c7f0f3b1 Anthony Liguori
    }
922 c7f0f3b1 Anthony Liguori
923 c7f0f3b1 Anthony Liguori
    return NULL;
924 c7f0f3b1 Anthony Liguori
#endif
925 c7f0f3b1 Anthony Liguori
}
926 c7f0f3b1 Anthony Liguori
927 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
928 bdb7ca67 Jan Kiszka
929 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
930 296af7c9 Blue Swirl
{
931 c3586ba7 Andreas Färber
    CPUState *cpu = arg;
932 296af7c9 Blue Swirl
933 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
934 814e612e Andreas Färber
    qemu_thread_get_self(cpu->thread);
935 296af7c9 Blue Swirl
936 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
937 38fcbd3f Andreas Färber
    CPU_FOREACH(cpu) {
938 38fcbd3f Andreas Färber
        cpu->thread_id = qemu_get_thread_id();
939 38fcbd3f Andreas Färber
        cpu->created = true;
940 38fcbd3f Andreas Färber
    }
941 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
942 296af7c9 Blue Swirl
943 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
944 bdc44640 Andreas Färber
    while (QTAILQ_FIRST(&cpus)->stopped) {
945 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
946 8e564b4e Jan Kiszka
947 8e564b4e Jan Kiszka
        /* process any pending work */
948 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
949 182735ef Andreas Färber
            qemu_wait_io_event_common(cpu);
950 8e564b4e Jan Kiszka
        }
951 0ab07c62 Jan Kiszka
    }
952 296af7c9 Blue Swirl
953 296af7c9 Blue Swirl
    while (1) {
954 bdb7ca67 Jan Kiszka
        tcg_exec_all();
955 ac70aafc Alex Bligh
956 ac70aafc Alex Bligh
        if (use_icount) {
957 40daca54 Alex Bligh
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
958 ac70aafc Alex Bligh
959 ac70aafc Alex Bligh
            if (deadline == 0) {
960 40daca54 Alex Bligh
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
961 ac70aafc Alex Bligh
            }
962 3b2319a3 Paolo Bonzini
        }
963 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
964 296af7c9 Blue Swirl
    }
965 296af7c9 Blue Swirl
966 296af7c9 Blue Swirl
    return NULL;
967 296af7c9 Blue Swirl
}
968 296af7c9 Blue Swirl
969 2ff09a40 Andreas Färber
static void qemu_cpu_kick_thread(CPUState *cpu)
970 cc015e9a Paolo Bonzini
{
971 cc015e9a Paolo Bonzini
#ifndef _WIN32
972 cc015e9a Paolo Bonzini
    int err;
973 cc015e9a Paolo Bonzini
974 814e612e Andreas Färber
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
975 cc015e9a Paolo Bonzini
    if (err) {
976 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
977 cc015e9a Paolo Bonzini
        exit(1);
978 cc015e9a Paolo Bonzini
    }
979 cc015e9a Paolo Bonzini
#else /* _WIN32 */
980 60e82579 Andreas Färber
    if (!qemu_cpu_is_self(cpu)) {
981 ed9164a3 Olivier Hainque
        CONTEXT tcgContext;
982 ed9164a3 Olivier Hainque
983 ed9164a3 Olivier Hainque
        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
984 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
985 ed9164a3 Olivier Hainque
                    GetLastError());
986 ed9164a3 Olivier Hainque
            exit(1);
987 ed9164a3 Olivier Hainque
        }
988 ed9164a3 Olivier Hainque
989 ed9164a3 Olivier Hainque
        /* On multi-core systems, we are not sure that the thread is actually
990 ed9164a3 Olivier Hainque
         * suspended until we can get the context.
991 ed9164a3 Olivier Hainque
         */
992 ed9164a3 Olivier Hainque
        tcgContext.ContextFlags = CONTEXT_CONTROL;
993 ed9164a3 Olivier Hainque
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
994 ed9164a3 Olivier Hainque
            continue;
995 ed9164a3 Olivier Hainque
        }
996 ed9164a3 Olivier Hainque
997 cc015e9a Paolo Bonzini
        cpu_signal(0);
998 ed9164a3 Olivier Hainque
999 ed9164a3 Olivier Hainque
        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1000 7f1721df Stefan Weil
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1001 ed9164a3 Olivier Hainque
                    GetLastError());
1002 ed9164a3 Olivier Hainque
            exit(1);
1003 ed9164a3 Olivier Hainque
        }
1004 cc015e9a Paolo Bonzini
    }
1005 cc015e9a Paolo Bonzini
#endif
1006 cc015e9a Paolo Bonzini
}
1007 cc015e9a Paolo Bonzini
1008 c08d7424 Andreas Färber
void qemu_cpu_kick(CPUState *cpu)
1009 296af7c9 Blue Swirl
{
1010 f5c121b8 Andreas Färber
    qemu_cond_broadcast(cpu->halt_cond);
1011 216fc9a4 Andreas Färber
    if (!tcg_enabled() && !cpu->thread_kicked) {
1012 2ff09a40 Andreas Färber
        qemu_cpu_kick_thread(cpu);
1013 216fc9a4 Andreas Färber
        cpu->thread_kicked = true;
1014 aa2c364b Jan Kiszka
    }
1015 296af7c9 Blue Swirl
}
1016 296af7c9 Blue Swirl
1017 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
1018 296af7c9 Blue Swirl
{
1019 b55c22c6 Paolo Bonzini
#ifndef _WIN32
1020 4917cf44 Andreas Färber
    assert(current_cpu);
1021 296af7c9 Blue Swirl
1022 4917cf44 Andreas Färber
    if (!current_cpu->thread_kicked) {
1023 4917cf44 Andreas Färber
        qemu_cpu_kick_thread(current_cpu);
1024 4917cf44 Andreas Färber
        current_cpu->thread_kicked = true;
1025 296af7c9 Blue Swirl
    }
1026 b55c22c6 Paolo Bonzini
#else
1027 b55c22c6 Paolo Bonzini
    abort();
1028 b55c22c6 Paolo Bonzini
#endif
1029 296af7c9 Blue Swirl
}
1030 296af7c9 Blue Swirl
1031 60e82579 Andreas Färber
bool qemu_cpu_is_self(CPUState *cpu)
1032 296af7c9 Blue Swirl
{
1033 814e612e Andreas Färber
    return qemu_thread_is_self(cpu->thread);
1034 296af7c9 Blue Swirl
}
1035 296af7c9 Blue Swirl
1036 aa723c23 Juan Quintela
static bool qemu_in_vcpu_thread(void)
1037 aa723c23 Juan Quintela
{
1038 4917cf44 Andreas Färber
    return current_cpu && qemu_cpu_is_self(current_cpu);
1039 aa723c23 Juan Quintela
}
1040 aa723c23 Juan Quintela
1041 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
1042 296af7c9 Blue Swirl
{
1043 c7f0f3b1 Anthony Liguori
    if (!tcg_enabled()) {
1044 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
1045 1a28cac3 Marcelo Tosatti
    } else {
1046 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
1047 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
1048 182735ef Andreas Färber
            qemu_cpu_kick_thread(first_cpu);
1049 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
1050 1a28cac3 Marcelo Tosatti
        }
1051 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
1052 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
1053 1a28cac3 Marcelo Tosatti
    }
1054 296af7c9 Blue Swirl
}
1055 296af7c9 Blue Swirl
1056 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
1057 296af7c9 Blue Swirl
{
1058 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
1059 296af7c9 Blue Swirl
}
1060 296af7c9 Blue Swirl
1061 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
1062 296af7c9 Blue Swirl
{
1063 bdc44640 Andreas Färber
    CPUState *cpu;
1064 296af7c9 Blue Swirl
1065 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1066 182735ef Andreas Färber
        if (!cpu->stopped) {
1067 296af7c9 Blue Swirl
            return 0;
1068 0ab07c62 Jan Kiszka
        }
1069 296af7c9 Blue Swirl
    }
1070 296af7c9 Blue Swirl
1071 296af7c9 Blue Swirl
    return 1;
1072 296af7c9 Blue Swirl
}
1073 296af7c9 Blue Swirl
1074 296af7c9 Blue Swirl
void pause_all_vcpus(void)
1075 296af7c9 Blue Swirl
{
1076 bdc44640 Andreas Färber
    CPUState *cpu;
1077 296af7c9 Blue Swirl
1078 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1079 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1080 182735ef Andreas Färber
        cpu->stop = true;
1081 182735ef Andreas Färber
        qemu_cpu_kick(cpu);
1082 296af7c9 Blue Swirl
    }
1083 296af7c9 Blue Swirl
1084 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1085 d798e974 Jan Kiszka
        cpu_stop_current();
1086 d798e974 Jan Kiszka
        if (!kvm_enabled()) {
1087 bdc44640 Andreas Färber
            CPU_FOREACH(cpu) {
1088 182735ef Andreas Färber
                cpu->stop = false;
1089 182735ef Andreas Färber
                cpu->stopped = true;
1090 d798e974 Jan Kiszka
            }
1091 d798e974 Jan Kiszka
            return;
1092 d798e974 Jan Kiszka
        }
1093 d798e974 Jan Kiszka
    }
1094 d798e974 Jan Kiszka
1095 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
1096 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1097 bdc44640 Andreas Färber
        CPU_FOREACH(cpu) {
1098 182735ef Andreas Färber
            qemu_cpu_kick(cpu);
1099 296af7c9 Blue Swirl
        }
1100 296af7c9 Blue Swirl
    }
1101 296af7c9 Blue Swirl
}
1102 296af7c9 Blue Swirl
1103 2993683b Igor Mammedov
void cpu_resume(CPUState *cpu)
1104 2993683b Igor Mammedov
{
1105 2993683b Igor Mammedov
    cpu->stop = false;
1106 2993683b Igor Mammedov
    cpu->stopped = false;
1107 2993683b Igor Mammedov
    qemu_cpu_kick(cpu);
1108 2993683b Igor Mammedov
}
1109 2993683b Igor Mammedov
1110 296af7c9 Blue Swirl
void resume_all_vcpus(void)
1111 296af7c9 Blue Swirl
{
1112 bdc44640 Andreas Färber
    CPUState *cpu;
1113 296af7c9 Blue Swirl
1114 40daca54 Alex Bligh
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1115 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1116 182735ef Andreas Färber
        cpu_resume(cpu);
1117 296af7c9 Blue Swirl
    }
1118 296af7c9 Blue Swirl
}
1119 296af7c9 Blue Swirl
1120 e5ab30a2 Andreas Färber
static void qemu_tcg_init_vcpu(CPUState *cpu)
1121 296af7c9 Blue Swirl
{
1122 09daed84 Edgar E. Iglesias
    tcg_cpu_address_space_init(cpu, cpu->as);
1123 09daed84 Edgar E. Iglesias
1124 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
1125 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
1126 814e612e Andreas Färber
        cpu->thread = g_malloc0(sizeof(QemuThread));
1127 f5c121b8 Andreas Färber
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1128 f5c121b8 Andreas Färber
        qemu_cond_init(cpu->halt_cond);
1129 f5c121b8 Andreas Färber
        tcg_halt_cond = cpu->halt_cond;
1130 c3586ba7 Andreas Färber
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1131 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
1132 1ecf47bf Paolo Bonzini
#ifdef _WIN32
1133 814e612e Andreas Färber
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
1134 1ecf47bf Paolo Bonzini
#endif
1135 61a46217 Andreas Färber
        while (!cpu->created) {
1136 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1137 0ab07c62 Jan Kiszka
        }
1138 814e612e Andreas Färber
        tcg_cpu_thread = cpu->thread;
1139 296af7c9 Blue Swirl
    } else {
1140 814e612e Andreas Färber
        cpu->thread = tcg_cpu_thread;
1141 f5c121b8 Andreas Färber
        cpu->halt_cond = tcg_halt_cond;
1142 296af7c9 Blue Swirl
    }
1143 296af7c9 Blue Swirl
}
1144 296af7c9 Blue Swirl
1145 48a106bd Andreas Färber
static void qemu_kvm_start_vcpu(CPUState *cpu)
1146 296af7c9 Blue Swirl
{
1147 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1148 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1149 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1150 48a106bd Andreas Färber
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
1151 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
1152 61a46217 Andreas Färber
    while (!cpu->created) {
1153 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1154 0ab07c62 Jan Kiszka
    }
1155 296af7c9 Blue Swirl
}
1156 296af7c9 Blue Swirl
1157 10a9021d Andreas Färber
static void qemu_dummy_start_vcpu(CPUState *cpu)
1158 c7f0f3b1 Anthony Liguori
{
1159 814e612e Andreas Färber
    cpu->thread = g_malloc0(sizeof(QemuThread));
1160 f5c121b8 Andreas Färber
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1161 f5c121b8 Andreas Färber
    qemu_cond_init(cpu->halt_cond);
1162 10a9021d Andreas Färber
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
1163 c7f0f3b1 Anthony Liguori
                       QEMU_THREAD_JOINABLE);
1164 61a46217 Andreas Färber
    while (!cpu->created) {
1165 c7f0f3b1 Anthony Liguori
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1166 c7f0f3b1 Anthony Liguori
    }
1167 c7f0f3b1 Anthony Liguori
}
1168 c7f0f3b1 Anthony Liguori
1169 c643bed9 Andreas Färber
void qemu_init_vcpu(CPUState *cpu)
1170 296af7c9 Blue Swirl
{
1171 ce3960eb Andreas Färber
    cpu->nr_cores = smp_cores;
1172 ce3960eb Andreas Färber
    cpu->nr_threads = smp_threads;
1173 f324e766 Andreas Färber
    cpu->stopped = true;
1174 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
1175 48a106bd Andreas Färber
        qemu_kvm_start_vcpu(cpu);
1176 c7f0f3b1 Anthony Liguori
    } else if (tcg_enabled()) {
1177 e5ab30a2 Andreas Färber
        qemu_tcg_init_vcpu(cpu);
1178 c7f0f3b1 Anthony Liguori
    } else {
1179 10a9021d Andreas Färber
        qemu_dummy_start_vcpu(cpu);
1180 0ab07c62 Jan Kiszka
    }
1181 296af7c9 Blue Swirl
}
1182 296af7c9 Blue Swirl
1183 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
1184 296af7c9 Blue Swirl
{
1185 4917cf44 Andreas Färber
    if (current_cpu) {
1186 4917cf44 Andreas Färber
        current_cpu->stop = false;
1187 4917cf44 Andreas Färber
        current_cpu->stopped = true;
1188 4917cf44 Andreas Färber
        cpu_exit(current_cpu);
1189 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
1190 b4a3d965 Jan Kiszka
    }
1191 296af7c9 Blue Swirl
}
1192 296af7c9 Blue Swirl
1193 56983463 Kevin Wolf
int vm_stop(RunState state)
1194 296af7c9 Blue Swirl
{
1195 aa723c23 Juan Quintela
    if (qemu_in_vcpu_thread()) {
1196 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
1197 296af7c9 Blue Swirl
        /*
1198 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
1199 296af7c9 Blue Swirl
         * vm_stop() has been requested.
1200 296af7c9 Blue Swirl
         */
1201 b4a3d965 Jan Kiszka
        cpu_stop_current();
1202 56983463 Kevin Wolf
        return 0;
1203 296af7c9 Blue Swirl
    }
1204 56983463 Kevin Wolf
1205 56983463 Kevin Wolf
    return do_vm_stop(state);
1206 296af7c9 Blue Swirl
}
1207 296af7c9 Blue Swirl
1208 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
1209 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
1210 56983463 Kevin Wolf
int vm_stop_force_state(RunState state)
1211 8a9236f1 Luiz Capitulino
{
1212 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
1213 56983463 Kevin Wolf
        return vm_stop(state);
1214 8a9236f1 Luiz Capitulino
    } else {
1215 8a9236f1 Luiz Capitulino
        runstate_set(state);
1216 594a45ce Kevin Wolf
        /* Make sure to return an error if the flush in a previous vm_stop()
1217 594a45ce Kevin Wolf
         * failed. */
1218 594a45ce Kevin Wolf
        return bdrv_flush_all();
1219 8a9236f1 Luiz Capitulino
    }
1220 8a9236f1 Luiz Capitulino
}
1221 8a9236f1 Luiz Capitulino
1222 9349b4f9 Andreas Färber
static int tcg_cpu_exec(CPUArchState *env)
1223 296af7c9 Blue Swirl
{
1224 296af7c9 Blue Swirl
    int ret;
1225 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1226 296af7c9 Blue Swirl
    int64_t ti;
1227 296af7c9 Blue Swirl
#endif
1228 296af7c9 Blue Swirl
1229 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1230 296af7c9 Blue Swirl
    ti = profile_getclock();
1231 296af7c9 Blue Swirl
#endif
1232 296af7c9 Blue Swirl
    if (use_icount) {
1233 296af7c9 Blue Swirl
        int64_t count;
1234 ac70aafc Alex Bligh
        int64_t deadline;
1235 296af7c9 Blue Swirl
        int decr;
1236 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1237 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1238 296af7c9 Blue Swirl
        env->icount_extra = 0;
1239 40daca54 Alex Bligh
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1240 ac70aafc Alex Bligh
1241 ac70aafc Alex Bligh
        /* Maintain prior (possibly buggy) behaviour where if no deadline
1242 40daca54 Alex Bligh
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1243 ac70aafc Alex Bligh
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1244 ac70aafc Alex Bligh
         * nanoseconds.
1245 ac70aafc Alex Bligh
         */
1246 ac70aafc Alex Bligh
        if ((deadline < 0) || (deadline > INT32_MAX)) {
1247 ac70aafc Alex Bligh
            deadline = INT32_MAX;
1248 ac70aafc Alex Bligh
        }
1249 ac70aafc Alex Bligh
1250 ac70aafc Alex Bligh
        count = qemu_icount_round(deadline);
1251 296af7c9 Blue Swirl
        qemu_icount += count;
1252 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1253 296af7c9 Blue Swirl
        count -= decr;
1254 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1255 296af7c9 Blue Swirl
        env->icount_extra = count;
1256 296af7c9 Blue Swirl
    }
1257 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1258 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1259 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1260 296af7c9 Blue Swirl
#endif
1261 296af7c9 Blue Swirl
    if (use_icount) {
1262 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1263 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1264 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1265 296af7c9 Blue Swirl
                        + env->icount_extra);
1266 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1267 296af7c9 Blue Swirl
        env->icount_extra = 0;
1268 296af7c9 Blue Swirl
    }
1269 296af7c9 Blue Swirl
    return ret;
1270 296af7c9 Blue Swirl
}
1271 296af7c9 Blue Swirl
1272 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1273 296af7c9 Blue Swirl
{
1274 9a36085b Jan Kiszka
    int r;
1275 9a36085b Jan Kiszka
1276 40daca54 Alex Bligh
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
1277 40daca54 Alex Bligh
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1278 ab33fcda Paolo Bonzini
1279 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1280 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1281 0ab07c62 Jan Kiszka
    }
1282 bdc44640 Andreas Färber
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1283 182735ef Andreas Färber
        CPUState *cpu = next_cpu;
1284 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1285 296af7c9 Blue Swirl
1286 40daca54 Alex Bligh
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1287 ed2803da Andreas Färber
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1288 296af7c9 Blue Swirl
1289 a1fcaa73 Andreas Färber
        if (cpu_can_run(cpu)) {
1290 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1291 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1292 91325046 Andreas Färber
                cpu_handle_guest_debug(cpu);
1293 3c638d06 Jan Kiszka
                break;
1294 3c638d06 Jan Kiszka
            }
1295 f324e766 Andreas Färber
        } else if (cpu->stop || cpu->stopped) {
1296 296af7c9 Blue Swirl
            break;
1297 296af7c9 Blue Swirl
        }
1298 296af7c9 Blue Swirl
    }
1299 c629a4bc Jan Kiszka
    exit_request = 0;
1300 296af7c9 Blue Swirl
}
1301 296af7c9 Blue Swirl
1302 296af7c9 Blue Swirl
void set_numa_modes(void)
1303 296af7c9 Blue Swirl
{
1304 1b1ed8dc Andreas Färber
    CPUState *cpu;
1305 296af7c9 Blue Swirl
    int i;
1306 296af7c9 Blue Swirl
1307 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1308 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1309 55e5c285 Andreas Färber
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1310 1b1ed8dc Andreas Färber
                cpu->numa_node = i;
1311 296af7c9 Blue Swirl
            }
1312 296af7c9 Blue Swirl
        }
1313 296af7c9 Blue Swirl
    }
1314 296af7c9 Blue Swirl
}
1315 296af7c9 Blue Swirl
1316 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1317 262353cb Blue Swirl
{
1318 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1319 e916cbf8 Peter Maydell
#if defined(cpu_list)
1320 e916cbf8 Peter Maydell
    cpu_list(f, cpu_fprintf);
1321 262353cb Blue Swirl
#endif
1322 262353cb Blue Swirl
}
1323 de0b36b6 Luiz Capitulino
1324 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1325 de0b36b6 Luiz Capitulino
{
1326 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1327 182735ef Andreas Färber
    CPUState *cpu;
1328 de0b36b6 Luiz Capitulino
1329 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1330 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1331 182735ef Andreas Färber
#if defined(TARGET_I386)
1332 182735ef Andreas Färber
        X86CPU *x86_cpu = X86_CPU(cpu);
1333 182735ef Andreas Färber
        CPUX86State *env = &x86_cpu->env;
1334 182735ef Andreas Färber
#elif defined(TARGET_PPC)
1335 182735ef Andreas Färber
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1336 182735ef Andreas Färber
        CPUPPCState *env = &ppc_cpu->env;
1337 182735ef Andreas Färber
#elif defined(TARGET_SPARC)
1338 182735ef Andreas Färber
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1339 182735ef Andreas Färber
        CPUSPARCState *env = &sparc_cpu->env;
1340 182735ef Andreas Färber
#elif defined(TARGET_MIPS)
1341 182735ef Andreas Färber
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1342 182735ef Andreas Färber
        CPUMIPSState *env = &mips_cpu->env;
1343 182735ef Andreas Färber
#endif
1344 de0b36b6 Luiz Capitulino
1345 cb446eca Andreas Färber
        cpu_synchronize_state(cpu);
1346 de0b36b6 Luiz Capitulino
1347 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1348 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1349 55e5c285 Andreas Färber
        info->value->CPU = cpu->cpu_index;
1350 182735ef Andreas Färber
        info->value->current = (cpu == first_cpu);
1351 259186a7 Andreas Färber
        info->value->halted = cpu->halted;
1352 9f09e18a Andreas Färber
        info->value->thread_id = cpu->thread_id;
1353 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1354 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1355 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1356 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1357 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1358 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1359 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1360 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1361 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1362 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1363 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1364 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1365 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1366 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1367 de0b36b6 Luiz Capitulino
#endif
1368 de0b36b6 Luiz Capitulino
1369 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1370 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1371 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1372 de0b36b6 Luiz Capitulino
        } else {
1373 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1374 de0b36b6 Luiz Capitulino
            cur_item = info;
1375 de0b36b6 Luiz Capitulino
        }
1376 de0b36b6 Luiz Capitulino
    }
1377 de0b36b6 Luiz Capitulino
1378 de0b36b6 Luiz Capitulino
    return head;
1379 de0b36b6 Luiz Capitulino
}
1380 0cfd6a9a Luiz Capitulino
1381 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1382 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1383 0cfd6a9a Luiz Capitulino
{
1384 0cfd6a9a Luiz Capitulino
    FILE *f;
1385 0cfd6a9a Luiz Capitulino
    uint32_t l;
1386 55e5c285 Andreas Färber
    CPUState *cpu;
1387 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1388 0cfd6a9a Luiz Capitulino
1389 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1390 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1391 0cfd6a9a Luiz Capitulino
    }
1392 0cfd6a9a Luiz Capitulino
1393 151d1322 Andreas Färber
    cpu = qemu_get_cpu(cpu_index);
1394 151d1322 Andreas Färber
    if (cpu == NULL) {
1395 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1396 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1397 0cfd6a9a Luiz Capitulino
        return;
1398 0cfd6a9a Luiz Capitulino
    }
1399 0cfd6a9a Luiz Capitulino
1400 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1401 0cfd6a9a Luiz Capitulino
    if (!f) {
1402 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1403 0cfd6a9a Luiz Capitulino
        return;
1404 0cfd6a9a Luiz Capitulino
    }
1405 0cfd6a9a Luiz Capitulino
1406 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1407 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1408 0cfd6a9a Luiz Capitulino
        if (l > size)
1409 0cfd6a9a Luiz Capitulino
            l = size;
1410 2f4d0f59 Aneesh Kumar K.V
        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1411 2f4d0f59 Aneesh Kumar K.V
            error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1412 2f4d0f59 Aneesh Kumar K.V
            goto exit;
1413 2f4d0f59 Aneesh Kumar K.V
        }
1414 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1415 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1416 0cfd6a9a Luiz Capitulino
            goto exit;
1417 0cfd6a9a Luiz Capitulino
        }
1418 0cfd6a9a Luiz Capitulino
        addr += l;
1419 0cfd6a9a Luiz Capitulino
        size -= l;
1420 0cfd6a9a Luiz Capitulino
    }
1421 0cfd6a9a Luiz Capitulino
1422 0cfd6a9a Luiz Capitulino
exit:
1423 0cfd6a9a Luiz Capitulino
    fclose(f);
1424 0cfd6a9a Luiz Capitulino
}
1425 6d3962bf Luiz Capitulino
1426 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1427 6d3962bf Luiz Capitulino
                  Error **errp)
1428 6d3962bf Luiz Capitulino
{
1429 6d3962bf Luiz Capitulino
    FILE *f;
1430 6d3962bf Luiz Capitulino
    uint32_t l;
1431 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1432 6d3962bf Luiz Capitulino
1433 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1434 6d3962bf Luiz Capitulino
    if (!f) {
1435 618da851 Luiz Capitulino
        error_setg_file_open(errp, errno, filename);
1436 6d3962bf Luiz Capitulino
        return;
1437 6d3962bf Luiz Capitulino
    }
1438 6d3962bf Luiz Capitulino
1439 6d3962bf Luiz Capitulino
    while (size != 0) {
1440 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1441 6d3962bf Luiz Capitulino
        if (l > size)
1442 6d3962bf Luiz Capitulino
            l = size;
1443 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1444 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1445 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1446 6d3962bf Luiz Capitulino
            goto exit;
1447 6d3962bf Luiz Capitulino
        }
1448 6d3962bf Luiz Capitulino
        addr += l;
1449 6d3962bf Luiz Capitulino
        size -= l;
1450 6d3962bf Luiz Capitulino
    }
1451 6d3962bf Luiz Capitulino
1452 6d3962bf Luiz Capitulino
exit:
1453 6d3962bf Luiz Capitulino
    fclose(f);
1454 6d3962bf Luiz Capitulino
}
1455 ab49ab5c Luiz Capitulino
1456 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1457 ab49ab5c Luiz Capitulino
{
1458 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1459 182735ef Andreas Färber
    CPUState *cs;
1460 182735ef Andreas Färber
1461 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1462 182735ef Andreas Färber
        X86CPU *cpu = X86_CPU(cs);
1463 ab49ab5c Luiz Capitulino
1464 02e51483 Chen Fan
        if (!cpu->apic_state) {
1465 182735ef Andreas Färber
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1466 02c09195 Jan Kiszka
        } else {
1467 02e51483 Chen Fan
            apic_deliver_nmi(cpu->apic_state);
1468 02c09195 Jan Kiszka
        }
1469 ab49ab5c Luiz Capitulino
    }
1470 7f7f9752 Eugene (jno) Dvurechenski
#elif defined(TARGET_S390X)
1471 7f7f9752 Eugene (jno) Dvurechenski
    CPUState *cs;
1472 7f7f9752 Eugene (jno) Dvurechenski
    S390CPU *cpu;
1473 7f7f9752 Eugene (jno) Dvurechenski
1474 bdc44640 Andreas Färber
    CPU_FOREACH(cs) {
1475 7f7f9752 Eugene (jno) Dvurechenski
        cpu = S390_CPU(cs);
1476 7f7f9752 Eugene (jno) Dvurechenski
        if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1477 7f7f9752 Eugene (jno) Dvurechenski
            if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1478 7f7f9752 Eugene (jno) Dvurechenski
                error_set(errp, QERR_UNSUPPORTED);
1479 7f7f9752 Eugene (jno) Dvurechenski
                return;
1480 7f7f9752 Eugene (jno) Dvurechenski
            }
1481 7f7f9752 Eugene (jno) Dvurechenski
            break;
1482 7f7f9752 Eugene (jno) Dvurechenski
        }
1483 7f7f9752 Eugene (jno) Dvurechenski
    }
1484 ab49ab5c Luiz Capitulino
#else
1485 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1486 ab49ab5c Luiz Capitulino
#endif
1487 ab49ab5c Luiz Capitulino
}