Statistics
| Branch: | Revision:

root / cpus.c @ dbfe06c6

History | View | Annotate | Download (30.3 kB)

1 296af7c9 Blue Swirl
/*
2 296af7c9 Blue Swirl
 * QEMU System Emulator
3 296af7c9 Blue Swirl
 *
4 296af7c9 Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 296af7c9 Blue Swirl
 *
6 296af7c9 Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 296af7c9 Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 296af7c9 Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 296af7c9 Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 296af7c9 Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 296af7c9 Blue Swirl
 * furnished to do so, subject to the following conditions:
12 296af7c9 Blue Swirl
 *
13 296af7c9 Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 296af7c9 Blue Swirl
 * all copies or substantial portions of the Software.
15 296af7c9 Blue Swirl
 *
16 296af7c9 Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 296af7c9 Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 296af7c9 Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 296af7c9 Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 296af7c9 Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 296af7c9 Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 296af7c9 Blue Swirl
 * THE SOFTWARE.
23 296af7c9 Blue Swirl
 */
24 296af7c9 Blue Swirl
25 296af7c9 Blue Swirl
/* Needed early for CONFIG_BSD etc. */
26 296af7c9 Blue Swirl
#include "config-host.h"
27 296af7c9 Blue Swirl
28 296af7c9 Blue Swirl
#include "monitor.h"
29 296af7c9 Blue Swirl
#include "sysemu.h"
30 296af7c9 Blue Swirl
#include "gdbstub.h"
31 296af7c9 Blue Swirl
#include "dma.h"
32 296af7c9 Blue Swirl
#include "kvm.h"
33 de0b36b6 Luiz Capitulino
#include "qmp-commands.h"
34 296af7c9 Blue Swirl
35 96284e89 Paolo Bonzini
#include "qemu-thread.h"
36 296af7c9 Blue Swirl
#include "cpus.h"
37 44a9b356 Paolo Bonzini
#include "main-loop.h"
38 0ff0fc19 Jan Kiszka
39 0ff0fc19 Jan Kiszka
#ifndef _WIN32
40 a8486bc9 Marcelo Tosatti
#include "compatfd.h"
41 0ff0fc19 Jan Kiszka
#endif
42 296af7c9 Blue Swirl
43 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
44 6d9cb73c Jan Kiszka
45 6d9cb73c Jan Kiszka
#include <sys/prctl.h>
46 6d9cb73c Jan Kiszka
47 c0532a76 Marcelo Tosatti
#ifndef PR_MCE_KILL
48 c0532a76 Marcelo Tosatti
#define PR_MCE_KILL 33
49 c0532a76 Marcelo Tosatti
#endif
50 c0532a76 Marcelo Tosatti
51 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_SET
52 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_SET 1
53 6d9cb73c Jan Kiszka
#endif
54 6d9cb73c Jan Kiszka
55 6d9cb73c Jan Kiszka
#ifndef PR_MCE_KILL_EARLY
56 6d9cb73c Jan Kiszka
#define PR_MCE_KILL_EARLY 1
57 6d9cb73c Jan Kiszka
#endif
58 6d9cb73c Jan Kiszka
59 6d9cb73c Jan Kiszka
#endif /* CONFIG_LINUX */
60 6d9cb73c Jan Kiszka
61 296af7c9 Blue Swirl
static CPUState *next_cpu;
62 296af7c9 Blue Swirl
63 296af7c9 Blue Swirl
/***********************************************************/
64 946fb27c Paolo Bonzini
/* guest cycle counter */
65 946fb27c Paolo Bonzini
66 946fb27c Paolo Bonzini
/* Conversion factor from emulated instructions to virtual clock ticks.  */
67 946fb27c Paolo Bonzini
static int icount_time_shift;
68 946fb27c Paolo Bonzini
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
69 946fb27c Paolo Bonzini
#define MAX_ICOUNT_SHIFT 10
70 946fb27c Paolo Bonzini
/* Compensate for varying guest execution speed.  */
71 946fb27c Paolo Bonzini
static int64_t qemu_icount_bias;
72 946fb27c Paolo Bonzini
static QEMUTimer *icount_rt_timer;
73 946fb27c Paolo Bonzini
static QEMUTimer *icount_vm_timer;
74 946fb27c Paolo Bonzini
static QEMUTimer *icount_warp_timer;
75 946fb27c Paolo Bonzini
static int64_t vm_clock_warp_start;
76 946fb27c Paolo Bonzini
static int64_t qemu_icount;
77 946fb27c Paolo Bonzini
78 946fb27c Paolo Bonzini
typedef struct TimersState {
79 946fb27c Paolo Bonzini
    int64_t cpu_ticks_prev;
80 946fb27c Paolo Bonzini
    int64_t cpu_ticks_offset;
81 946fb27c Paolo Bonzini
    int64_t cpu_clock_offset;
82 946fb27c Paolo Bonzini
    int32_t cpu_ticks_enabled;
83 946fb27c Paolo Bonzini
    int64_t dummy;
84 946fb27c Paolo Bonzini
} TimersState;
85 946fb27c Paolo Bonzini
86 946fb27c Paolo Bonzini
TimersState timers_state;
87 946fb27c Paolo Bonzini
88 946fb27c Paolo Bonzini
/* Return the virtual CPU time, based on the instruction counter.  */
89 946fb27c Paolo Bonzini
int64_t cpu_get_icount(void)
90 946fb27c Paolo Bonzini
{
91 946fb27c Paolo Bonzini
    int64_t icount;
92 3a93113a Dong Xu Wang
    CPUState *env = cpu_single_env;
93 946fb27c Paolo Bonzini
94 946fb27c Paolo Bonzini
    icount = qemu_icount;
95 946fb27c Paolo Bonzini
    if (env) {
96 946fb27c Paolo Bonzini
        if (!can_do_io(env)) {
97 946fb27c Paolo Bonzini
            fprintf(stderr, "Bad clock read\n");
98 946fb27c Paolo Bonzini
        }
99 946fb27c Paolo Bonzini
        icount -= (env->icount_decr.u16.low + env->icount_extra);
100 946fb27c Paolo Bonzini
    }
101 946fb27c Paolo Bonzini
    return qemu_icount_bias + (icount << icount_time_shift);
102 946fb27c Paolo Bonzini
}
103 946fb27c Paolo Bonzini
104 946fb27c Paolo Bonzini
/* return the host CPU cycle counter and handle stop/restart */
105 946fb27c Paolo Bonzini
int64_t cpu_get_ticks(void)
106 946fb27c Paolo Bonzini
{
107 946fb27c Paolo Bonzini
    if (use_icount) {
108 946fb27c Paolo Bonzini
        return cpu_get_icount();
109 946fb27c Paolo Bonzini
    }
110 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
111 946fb27c Paolo Bonzini
        return timers_state.cpu_ticks_offset;
112 946fb27c Paolo Bonzini
    } else {
113 946fb27c Paolo Bonzini
        int64_t ticks;
114 946fb27c Paolo Bonzini
        ticks = cpu_get_real_ticks();
115 946fb27c Paolo Bonzini
        if (timers_state.cpu_ticks_prev > ticks) {
116 946fb27c Paolo Bonzini
            /* Note: non increasing ticks may happen if the host uses
117 946fb27c Paolo Bonzini
               software suspend */
118 946fb27c Paolo Bonzini
            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
119 946fb27c Paolo Bonzini
        }
120 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_prev = ticks;
121 946fb27c Paolo Bonzini
        return ticks + timers_state.cpu_ticks_offset;
122 946fb27c Paolo Bonzini
    }
123 946fb27c Paolo Bonzini
}
124 946fb27c Paolo Bonzini
125 946fb27c Paolo Bonzini
/* return the host CPU monotonic timer and handle stop/restart */
126 946fb27c Paolo Bonzini
int64_t cpu_get_clock(void)
127 946fb27c Paolo Bonzini
{
128 946fb27c Paolo Bonzini
    int64_t ti;
129 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
130 946fb27c Paolo Bonzini
        return timers_state.cpu_clock_offset;
131 946fb27c Paolo Bonzini
    } else {
132 946fb27c Paolo Bonzini
        ti = get_clock();
133 946fb27c Paolo Bonzini
        return ti + timers_state.cpu_clock_offset;
134 946fb27c Paolo Bonzini
    }
135 946fb27c Paolo Bonzini
}
136 946fb27c Paolo Bonzini
137 946fb27c Paolo Bonzini
/* enable cpu_get_ticks() */
138 946fb27c Paolo Bonzini
void cpu_enable_ticks(void)
139 946fb27c Paolo Bonzini
{
140 946fb27c Paolo Bonzini
    if (!timers_state.cpu_ticks_enabled) {
141 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
142 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset -= get_clock();
143 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 1;
144 946fb27c Paolo Bonzini
    }
145 946fb27c Paolo Bonzini
}
146 946fb27c Paolo Bonzini
147 946fb27c Paolo Bonzini
/* disable cpu_get_ticks() : the clock is stopped. You must not call
148 946fb27c Paolo Bonzini
   cpu_get_ticks() after that.  */
149 946fb27c Paolo Bonzini
void cpu_disable_ticks(void)
150 946fb27c Paolo Bonzini
{
151 946fb27c Paolo Bonzini
    if (timers_state.cpu_ticks_enabled) {
152 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_offset = cpu_get_ticks();
153 946fb27c Paolo Bonzini
        timers_state.cpu_clock_offset = cpu_get_clock();
154 946fb27c Paolo Bonzini
        timers_state.cpu_ticks_enabled = 0;
155 946fb27c Paolo Bonzini
    }
156 946fb27c Paolo Bonzini
}
157 946fb27c Paolo Bonzini
158 946fb27c Paolo Bonzini
/* Correlation between real and virtual time is always going to be
159 946fb27c Paolo Bonzini
   fairly approximate, so ignore small variation.
160 946fb27c Paolo Bonzini
   When the guest is idle real and virtual time will be aligned in
161 946fb27c Paolo Bonzini
   the IO wait loop.  */
162 946fb27c Paolo Bonzini
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
163 946fb27c Paolo Bonzini
164 946fb27c Paolo Bonzini
static void icount_adjust(void)
165 946fb27c Paolo Bonzini
{
166 946fb27c Paolo Bonzini
    int64_t cur_time;
167 946fb27c Paolo Bonzini
    int64_t cur_icount;
168 946fb27c Paolo Bonzini
    int64_t delta;
169 946fb27c Paolo Bonzini
    static int64_t last_delta;
170 946fb27c Paolo Bonzini
    /* If the VM is not running, then do nothing.  */
171 946fb27c Paolo Bonzini
    if (!runstate_is_running()) {
172 946fb27c Paolo Bonzini
        return;
173 946fb27c Paolo Bonzini
    }
174 946fb27c Paolo Bonzini
    cur_time = cpu_get_clock();
175 946fb27c Paolo Bonzini
    cur_icount = qemu_get_clock_ns(vm_clock);
176 946fb27c Paolo Bonzini
    delta = cur_icount - cur_time;
177 946fb27c Paolo Bonzini
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
178 946fb27c Paolo Bonzini
    if (delta > 0
179 946fb27c Paolo Bonzini
        && last_delta + ICOUNT_WOBBLE < delta * 2
180 946fb27c Paolo Bonzini
        && icount_time_shift > 0) {
181 946fb27c Paolo Bonzini
        /* The guest is getting too far ahead.  Slow time down.  */
182 946fb27c Paolo Bonzini
        icount_time_shift--;
183 946fb27c Paolo Bonzini
    }
184 946fb27c Paolo Bonzini
    if (delta < 0
185 946fb27c Paolo Bonzini
        && last_delta - ICOUNT_WOBBLE > delta * 2
186 946fb27c Paolo Bonzini
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
187 946fb27c Paolo Bonzini
        /* The guest is getting too far behind.  Speed time up.  */
188 946fb27c Paolo Bonzini
        icount_time_shift++;
189 946fb27c Paolo Bonzini
    }
190 946fb27c Paolo Bonzini
    last_delta = delta;
191 946fb27c Paolo Bonzini
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
192 946fb27c Paolo Bonzini
}
193 946fb27c Paolo Bonzini
194 946fb27c Paolo Bonzini
static void icount_adjust_rt(void *opaque)
195 946fb27c Paolo Bonzini
{
196 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
197 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
198 946fb27c Paolo Bonzini
    icount_adjust();
199 946fb27c Paolo Bonzini
}
200 946fb27c Paolo Bonzini
201 946fb27c Paolo Bonzini
static void icount_adjust_vm(void *opaque)
202 946fb27c Paolo Bonzini
{
203 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
204 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
205 946fb27c Paolo Bonzini
    icount_adjust();
206 946fb27c Paolo Bonzini
}
207 946fb27c Paolo Bonzini
208 946fb27c Paolo Bonzini
static int64_t qemu_icount_round(int64_t count)
209 946fb27c Paolo Bonzini
{
210 946fb27c Paolo Bonzini
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
211 946fb27c Paolo Bonzini
}
212 946fb27c Paolo Bonzini
213 946fb27c Paolo Bonzini
static void icount_warp_rt(void *opaque)
214 946fb27c Paolo Bonzini
{
215 946fb27c Paolo Bonzini
    if (vm_clock_warp_start == -1) {
216 946fb27c Paolo Bonzini
        return;
217 946fb27c Paolo Bonzini
    }
218 946fb27c Paolo Bonzini
219 946fb27c Paolo Bonzini
    if (runstate_is_running()) {
220 946fb27c Paolo Bonzini
        int64_t clock = qemu_get_clock_ns(rt_clock);
221 946fb27c Paolo Bonzini
        int64_t warp_delta = clock - vm_clock_warp_start;
222 946fb27c Paolo Bonzini
        if (use_icount == 1) {
223 946fb27c Paolo Bonzini
            qemu_icount_bias += warp_delta;
224 946fb27c Paolo Bonzini
        } else {
225 946fb27c Paolo Bonzini
            /*
226 946fb27c Paolo Bonzini
             * In adaptive mode, do not let the vm_clock run too
227 946fb27c Paolo Bonzini
             * far ahead of real time.
228 946fb27c Paolo Bonzini
             */
229 946fb27c Paolo Bonzini
            int64_t cur_time = cpu_get_clock();
230 946fb27c Paolo Bonzini
            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
231 946fb27c Paolo Bonzini
            int64_t delta = cur_time - cur_icount;
232 946fb27c Paolo Bonzini
            qemu_icount_bias += MIN(warp_delta, delta);
233 946fb27c Paolo Bonzini
        }
234 946fb27c Paolo Bonzini
        if (qemu_clock_expired(vm_clock)) {
235 946fb27c Paolo Bonzini
            qemu_notify_event();
236 946fb27c Paolo Bonzini
        }
237 946fb27c Paolo Bonzini
    }
238 946fb27c Paolo Bonzini
    vm_clock_warp_start = -1;
239 946fb27c Paolo Bonzini
}
240 946fb27c Paolo Bonzini
241 946fb27c Paolo Bonzini
void qemu_clock_warp(QEMUClock *clock)
242 946fb27c Paolo Bonzini
{
243 946fb27c Paolo Bonzini
    int64_t deadline;
244 946fb27c Paolo Bonzini
245 946fb27c Paolo Bonzini
    /*
246 946fb27c Paolo Bonzini
     * There are too many global variables to make the "warp" behavior
247 946fb27c Paolo Bonzini
     * applicable to other clocks.  But a clock argument removes the
248 946fb27c Paolo Bonzini
     * need for if statements all over the place.
249 946fb27c Paolo Bonzini
     */
250 946fb27c Paolo Bonzini
    if (clock != vm_clock || !use_icount) {
251 946fb27c Paolo Bonzini
        return;
252 946fb27c Paolo Bonzini
    }
253 946fb27c Paolo Bonzini
254 946fb27c Paolo Bonzini
    /*
255 946fb27c Paolo Bonzini
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
256 946fb27c Paolo Bonzini
     * ensures that the deadline for the timer is computed correctly below.
257 946fb27c Paolo Bonzini
     * This also makes sure that the insn counter is synchronized before the
258 946fb27c Paolo Bonzini
     * CPU starts running, in case the CPU is woken by an event other than
259 946fb27c Paolo Bonzini
     * the earliest vm_clock timer.
260 946fb27c Paolo Bonzini
     */
261 946fb27c Paolo Bonzini
    icount_warp_rt(NULL);
262 946fb27c Paolo Bonzini
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
263 946fb27c Paolo Bonzini
        qemu_del_timer(icount_warp_timer);
264 946fb27c Paolo Bonzini
        return;
265 946fb27c Paolo Bonzini
    }
266 946fb27c Paolo Bonzini
267 946fb27c Paolo Bonzini
    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
268 946fb27c Paolo Bonzini
    deadline = qemu_clock_deadline(vm_clock);
269 946fb27c Paolo Bonzini
    if (deadline > 0) {
270 946fb27c Paolo Bonzini
        /*
271 946fb27c Paolo Bonzini
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
272 946fb27c Paolo Bonzini
         * sleep.  Otherwise, the CPU might be waiting for a future timer
273 946fb27c Paolo Bonzini
         * interrupt to wake it up, but the interrupt never comes because
274 946fb27c Paolo Bonzini
         * the vCPU isn't running any insns and thus doesn't advance the
275 946fb27c Paolo Bonzini
         * vm_clock.
276 946fb27c Paolo Bonzini
         *
277 946fb27c Paolo Bonzini
         * An extreme solution for this problem would be to never let VCPUs
278 946fb27c Paolo Bonzini
         * sleep in icount mode if there is a pending vm_clock timer; rather
279 946fb27c Paolo Bonzini
         * time could just advance to the next vm_clock event.  Instead, we
280 946fb27c Paolo Bonzini
         * do stop VCPUs and only advance vm_clock after some "real" time,
281 946fb27c Paolo Bonzini
         * (related to the time left until the next event) has passed.  This
282 946fb27c Paolo Bonzini
         * rt_clock timer will do this.  This avoids that the warps are too
283 946fb27c Paolo Bonzini
         * visible externally---for example, you will not be sending network
284 07f35073 Dong Xu Wang
         * packets continuously instead of every 100ms.
285 946fb27c Paolo Bonzini
         */
286 946fb27c Paolo Bonzini
        qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
287 946fb27c Paolo Bonzini
    } else {
288 946fb27c Paolo Bonzini
        qemu_notify_event();
289 946fb27c Paolo Bonzini
    }
290 946fb27c Paolo Bonzini
}
291 946fb27c Paolo Bonzini
292 946fb27c Paolo Bonzini
static const VMStateDescription vmstate_timers = {
293 946fb27c Paolo Bonzini
    .name = "timer",
294 946fb27c Paolo Bonzini
    .version_id = 2,
295 946fb27c Paolo Bonzini
    .minimum_version_id = 1,
296 946fb27c Paolo Bonzini
    .minimum_version_id_old = 1,
297 946fb27c Paolo Bonzini
    .fields      = (VMStateField[]) {
298 946fb27c Paolo Bonzini
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
299 946fb27c Paolo Bonzini
        VMSTATE_INT64(dummy, TimersState),
300 946fb27c Paolo Bonzini
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
301 946fb27c Paolo Bonzini
        VMSTATE_END_OF_LIST()
302 946fb27c Paolo Bonzini
    }
303 946fb27c Paolo Bonzini
};
304 946fb27c Paolo Bonzini
305 946fb27c Paolo Bonzini
void configure_icount(const char *option)
306 946fb27c Paolo Bonzini
{
307 946fb27c Paolo Bonzini
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
308 946fb27c Paolo Bonzini
    if (!option) {
309 946fb27c Paolo Bonzini
        return;
310 946fb27c Paolo Bonzini
    }
311 946fb27c Paolo Bonzini
312 946fb27c Paolo Bonzini
    icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
313 946fb27c Paolo Bonzini
    if (strcmp(option, "auto") != 0) {
314 946fb27c Paolo Bonzini
        icount_time_shift = strtol(option, NULL, 0);
315 946fb27c Paolo Bonzini
        use_icount = 1;
316 946fb27c Paolo Bonzini
        return;
317 946fb27c Paolo Bonzini
    }
318 946fb27c Paolo Bonzini
319 946fb27c Paolo Bonzini
    use_icount = 2;
320 946fb27c Paolo Bonzini
321 946fb27c Paolo Bonzini
    /* 125MIPS seems a reasonable initial guess at the guest speed.
322 946fb27c Paolo Bonzini
       It will be corrected fairly quickly anyway.  */
323 946fb27c Paolo Bonzini
    icount_time_shift = 3;
324 946fb27c Paolo Bonzini
325 946fb27c Paolo Bonzini
    /* Have both realtime and virtual time triggers for speed adjustment.
326 946fb27c Paolo Bonzini
       The realtime trigger catches emulated time passing too slowly,
327 946fb27c Paolo Bonzini
       the virtual time trigger catches emulated time passing too fast.
328 946fb27c Paolo Bonzini
       Realtime triggers occur even when idle, so use them less frequently
329 946fb27c Paolo Bonzini
       than VM triggers.  */
330 946fb27c Paolo Bonzini
    icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
331 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_rt_timer,
332 946fb27c Paolo Bonzini
                   qemu_get_clock_ms(rt_clock) + 1000);
333 946fb27c Paolo Bonzini
    icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
334 946fb27c Paolo Bonzini
    qemu_mod_timer(icount_vm_timer,
335 946fb27c Paolo Bonzini
                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
336 946fb27c Paolo Bonzini
}
337 946fb27c Paolo Bonzini
338 946fb27c Paolo Bonzini
/***********************************************************/
339 296af7c9 Blue Swirl
void hw_error(const char *fmt, ...)
340 296af7c9 Blue Swirl
{
341 296af7c9 Blue Swirl
    va_list ap;
342 296af7c9 Blue Swirl
    CPUState *env;
343 296af7c9 Blue Swirl
344 296af7c9 Blue Swirl
    va_start(ap, fmt);
345 296af7c9 Blue Swirl
    fprintf(stderr, "qemu: hardware error: ");
346 296af7c9 Blue Swirl
    vfprintf(stderr, fmt, ap);
347 296af7c9 Blue Swirl
    fprintf(stderr, "\n");
348 296af7c9 Blue Swirl
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 296af7c9 Blue Swirl
        fprintf(stderr, "CPU #%d:\n", env->cpu_index);
350 296af7c9 Blue Swirl
#ifdef TARGET_I386
351 296af7c9 Blue Swirl
        cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
352 296af7c9 Blue Swirl
#else
353 296af7c9 Blue Swirl
        cpu_dump_state(env, stderr, fprintf, 0);
354 296af7c9 Blue Swirl
#endif
355 296af7c9 Blue Swirl
    }
356 296af7c9 Blue Swirl
    va_end(ap);
357 296af7c9 Blue Swirl
    abort();
358 296af7c9 Blue Swirl
}
359 296af7c9 Blue Swirl
360 296af7c9 Blue Swirl
void cpu_synchronize_all_states(void)
361 296af7c9 Blue Swirl
{
362 296af7c9 Blue Swirl
    CPUState *cpu;
363 296af7c9 Blue Swirl
364 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
365 296af7c9 Blue Swirl
        cpu_synchronize_state(cpu);
366 296af7c9 Blue Swirl
    }
367 296af7c9 Blue Swirl
}
368 296af7c9 Blue Swirl
369 296af7c9 Blue Swirl
void cpu_synchronize_all_post_reset(void)
370 296af7c9 Blue Swirl
{
371 296af7c9 Blue Swirl
    CPUState *cpu;
372 296af7c9 Blue Swirl
373 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
374 296af7c9 Blue Swirl
        cpu_synchronize_post_reset(cpu);
375 296af7c9 Blue Swirl
    }
376 296af7c9 Blue Swirl
}
377 296af7c9 Blue Swirl
378 296af7c9 Blue Swirl
void cpu_synchronize_all_post_init(void)
379 296af7c9 Blue Swirl
{
380 296af7c9 Blue Swirl
    CPUState *cpu;
381 296af7c9 Blue Swirl
382 296af7c9 Blue Swirl
    for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
383 296af7c9 Blue Swirl
        cpu_synchronize_post_init(cpu);
384 296af7c9 Blue Swirl
    }
385 296af7c9 Blue Swirl
}
386 296af7c9 Blue Swirl
387 3ae9501c Marcelo Tosatti
int cpu_is_stopped(CPUState *env)
388 3ae9501c Marcelo Tosatti
{
389 1354869c Luiz Capitulino
    return !runstate_is_running() || env->stopped;
390 3ae9501c Marcelo Tosatti
}
391 3ae9501c Marcelo Tosatti
392 1dfb4dd9 Luiz Capitulino
static void do_vm_stop(RunState state)
393 296af7c9 Blue Swirl
{
394 1354869c Luiz Capitulino
    if (runstate_is_running()) {
395 296af7c9 Blue Swirl
        cpu_disable_ticks();
396 296af7c9 Blue Swirl
        pause_all_vcpus();
397 f5bbfba1 Luiz Capitulino
        runstate_set(state);
398 1dfb4dd9 Luiz Capitulino
        vm_state_notify(0, state);
399 922453bc Stefan Hajnoczi
        bdrv_drain_all();
400 55df6f33 Michael S. Tsirkin
        bdrv_flush_all();
401 296af7c9 Blue Swirl
        monitor_protocol_event(QEVENT_STOP, NULL);
402 296af7c9 Blue Swirl
    }
403 296af7c9 Blue Swirl
}
404 296af7c9 Blue Swirl
405 296af7c9 Blue Swirl
static int cpu_can_run(CPUState *env)
406 296af7c9 Blue Swirl
{
407 0ab07c62 Jan Kiszka
    if (env->stop) {
408 296af7c9 Blue Swirl
        return 0;
409 0ab07c62 Jan Kiszka
    }
410 1354869c Luiz Capitulino
    if (env->stopped || !runstate_is_running()) {
411 296af7c9 Blue Swirl
        return 0;
412 0ab07c62 Jan Kiszka
    }
413 296af7c9 Blue Swirl
    return 1;
414 296af7c9 Blue Swirl
}
415 296af7c9 Blue Swirl
416 16400322 Jan Kiszka
static bool cpu_thread_is_idle(CPUState *env)
417 296af7c9 Blue Swirl
{
418 16400322 Jan Kiszka
    if (env->stop || env->queued_work_first) {
419 16400322 Jan Kiszka
        return false;
420 16400322 Jan Kiszka
    }
421 1354869c Luiz Capitulino
    if (env->stopped || !runstate_is_running()) {
422 16400322 Jan Kiszka
        return true;
423 16400322 Jan Kiszka
    }
424 f2c1cc81 Jan Kiszka
    if (!env->halted || qemu_cpu_has_work(env) ||
425 f2c1cc81 Jan Kiszka
        (kvm_enabled() && kvm_irqchip_in_kernel())) {
426 16400322 Jan Kiszka
        return false;
427 16400322 Jan Kiszka
    }
428 16400322 Jan Kiszka
    return true;
429 296af7c9 Blue Swirl
}
430 296af7c9 Blue Swirl
431 ab33fcda Paolo Bonzini
bool all_cpu_threads_idle(void)
432 296af7c9 Blue Swirl
{
433 296af7c9 Blue Swirl
    CPUState *env;
434 296af7c9 Blue Swirl
435 16400322 Jan Kiszka
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
436 16400322 Jan Kiszka
        if (!cpu_thread_is_idle(env)) {
437 16400322 Jan Kiszka
            return false;
438 16400322 Jan Kiszka
        }
439 16400322 Jan Kiszka
    }
440 16400322 Jan Kiszka
    return true;
441 296af7c9 Blue Swirl
}
442 296af7c9 Blue Swirl
443 1009d2ed Jan Kiszka
static void cpu_handle_guest_debug(CPUState *env)
444 83f338f7 Jan Kiszka
{
445 3c638d06 Jan Kiszka
    gdb_set_stop_cpu(env);
446 8cf71710 Jan Kiszka
    qemu_system_debug_request();
447 83f338f7 Jan Kiszka
    env->stopped = 1;
448 3c638d06 Jan Kiszka
}
449 3c638d06 Jan Kiszka
450 714bd040 Paolo Bonzini
static void cpu_signal(int sig)
451 714bd040 Paolo Bonzini
{
452 714bd040 Paolo Bonzini
    if (cpu_single_env) {
453 714bd040 Paolo Bonzini
        cpu_exit(cpu_single_env);
454 714bd040 Paolo Bonzini
    }
455 714bd040 Paolo Bonzini
    exit_request = 1;
456 714bd040 Paolo Bonzini
}
457 714bd040 Paolo Bonzini
458 6d9cb73c Jan Kiszka
#ifdef CONFIG_LINUX
459 6d9cb73c Jan Kiszka
static void sigbus_reraise(void)
460 6d9cb73c Jan Kiszka
{
461 6d9cb73c Jan Kiszka
    sigset_t set;
462 6d9cb73c Jan Kiszka
    struct sigaction action;
463 6d9cb73c Jan Kiszka
464 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
465 6d9cb73c Jan Kiszka
    action.sa_handler = SIG_DFL;
466 6d9cb73c Jan Kiszka
    if (!sigaction(SIGBUS, &action, NULL)) {
467 6d9cb73c Jan Kiszka
        raise(SIGBUS);
468 6d9cb73c Jan Kiszka
        sigemptyset(&set);
469 6d9cb73c Jan Kiszka
        sigaddset(&set, SIGBUS);
470 6d9cb73c Jan Kiszka
        sigprocmask(SIG_UNBLOCK, &set, NULL);
471 6d9cb73c Jan Kiszka
    }
472 6d9cb73c Jan Kiszka
    perror("Failed to re-raise SIGBUS!\n");
473 6d9cb73c Jan Kiszka
    abort();
474 6d9cb73c Jan Kiszka
}
475 6d9cb73c Jan Kiszka
476 6d9cb73c Jan Kiszka
static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
477 6d9cb73c Jan Kiszka
                           void *ctx)
478 6d9cb73c Jan Kiszka
{
479 6d9cb73c Jan Kiszka
    if (kvm_on_sigbus(siginfo->ssi_code,
480 6d9cb73c Jan Kiszka
                      (void *)(intptr_t)siginfo->ssi_addr)) {
481 6d9cb73c Jan Kiszka
        sigbus_reraise();
482 6d9cb73c Jan Kiszka
    }
483 6d9cb73c Jan Kiszka
}
484 6d9cb73c Jan Kiszka
485 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
486 6d9cb73c Jan Kiszka
{
487 6d9cb73c Jan Kiszka
    struct sigaction action;
488 6d9cb73c Jan Kiszka
489 6d9cb73c Jan Kiszka
    memset(&action, 0, sizeof(action));
490 6d9cb73c Jan Kiszka
    action.sa_flags = SA_SIGINFO;
491 6d9cb73c Jan Kiszka
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
492 6d9cb73c Jan Kiszka
    sigaction(SIGBUS, &action, NULL);
493 6d9cb73c Jan Kiszka
494 6d9cb73c Jan Kiszka
    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
495 6d9cb73c Jan Kiszka
}
496 6d9cb73c Jan Kiszka
497 1ab3c6c0 Jan Kiszka
static void qemu_kvm_eat_signals(CPUState *env)
498 1ab3c6c0 Jan Kiszka
{
499 1ab3c6c0 Jan Kiszka
    struct timespec ts = { 0, 0 };
500 1ab3c6c0 Jan Kiszka
    siginfo_t siginfo;
501 1ab3c6c0 Jan Kiszka
    sigset_t waitset;
502 1ab3c6c0 Jan Kiszka
    sigset_t chkset;
503 1ab3c6c0 Jan Kiszka
    int r;
504 1ab3c6c0 Jan Kiszka
505 1ab3c6c0 Jan Kiszka
    sigemptyset(&waitset);
506 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIG_IPI);
507 1ab3c6c0 Jan Kiszka
    sigaddset(&waitset, SIGBUS);
508 1ab3c6c0 Jan Kiszka
509 1ab3c6c0 Jan Kiszka
    do {
510 1ab3c6c0 Jan Kiszka
        r = sigtimedwait(&waitset, &siginfo, &ts);
511 1ab3c6c0 Jan Kiszka
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
512 1ab3c6c0 Jan Kiszka
            perror("sigtimedwait");
513 1ab3c6c0 Jan Kiszka
            exit(1);
514 1ab3c6c0 Jan Kiszka
        }
515 1ab3c6c0 Jan Kiszka
516 1ab3c6c0 Jan Kiszka
        switch (r) {
517 1ab3c6c0 Jan Kiszka
        case SIGBUS:
518 1ab3c6c0 Jan Kiszka
            if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
519 1ab3c6c0 Jan Kiszka
                sigbus_reraise();
520 1ab3c6c0 Jan Kiszka
            }
521 1ab3c6c0 Jan Kiszka
            break;
522 1ab3c6c0 Jan Kiszka
        default:
523 1ab3c6c0 Jan Kiszka
            break;
524 1ab3c6c0 Jan Kiszka
        }
525 1ab3c6c0 Jan Kiszka
526 1ab3c6c0 Jan Kiszka
        r = sigpending(&chkset);
527 1ab3c6c0 Jan Kiszka
        if (r == -1) {
528 1ab3c6c0 Jan Kiszka
            perror("sigpending");
529 1ab3c6c0 Jan Kiszka
            exit(1);
530 1ab3c6c0 Jan Kiszka
        }
531 1ab3c6c0 Jan Kiszka
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
532 1ab3c6c0 Jan Kiszka
}
533 1ab3c6c0 Jan Kiszka
534 6d9cb73c Jan Kiszka
#else /* !CONFIG_LINUX */
535 6d9cb73c Jan Kiszka
536 6d9cb73c Jan Kiszka
static void qemu_init_sigbus(void)
537 6d9cb73c Jan Kiszka
{
538 6d9cb73c Jan Kiszka
}
539 1ab3c6c0 Jan Kiszka
540 1ab3c6c0 Jan Kiszka
static void qemu_kvm_eat_signals(CPUState *env)
541 1ab3c6c0 Jan Kiszka
{
542 1ab3c6c0 Jan Kiszka
}
543 6d9cb73c Jan Kiszka
#endif /* !CONFIG_LINUX */
544 6d9cb73c Jan Kiszka
545 296af7c9 Blue Swirl
#ifndef _WIN32
546 55f8d6ac Jan Kiszka
static void dummy_signal(int sig)
547 55f8d6ac Jan Kiszka
{
548 55f8d6ac Jan Kiszka
}
549 55f8d6ac Jan Kiszka
550 714bd040 Paolo Bonzini
static void qemu_kvm_init_cpu_signals(CPUState *env)
551 714bd040 Paolo Bonzini
{
552 714bd040 Paolo Bonzini
    int r;
553 714bd040 Paolo Bonzini
    sigset_t set;
554 714bd040 Paolo Bonzini
    struct sigaction sigact;
555 714bd040 Paolo Bonzini
556 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
557 714bd040 Paolo Bonzini
    sigact.sa_handler = dummy_signal;
558 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
559 714bd040 Paolo Bonzini
560 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, NULL, &set);
561 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
562 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
563 714bd040 Paolo Bonzini
    r = kvm_set_signal_mask(env, &set);
564 714bd040 Paolo Bonzini
    if (r) {
565 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
566 714bd040 Paolo Bonzini
        exit(1);
567 714bd040 Paolo Bonzini
    }
568 714bd040 Paolo Bonzini
569 714bd040 Paolo Bonzini
    sigdelset(&set, SIG_IPI);
570 714bd040 Paolo Bonzini
    sigdelset(&set, SIGBUS);
571 714bd040 Paolo Bonzini
    r = kvm_set_signal_mask(env, &set);
572 714bd040 Paolo Bonzini
    if (r) {
573 714bd040 Paolo Bonzini
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
574 714bd040 Paolo Bonzini
        exit(1);
575 714bd040 Paolo Bonzini
    }
576 714bd040 Paolo Bonzini
}
577 714bd040 Paolo Bonzini
578 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
579 714bd040 Paolo Bonzini
{
580 714bd040 Paolo Bonzini
    sigset_t set;
581 714bd040 Paolo Bonzini
    struct sigaction sigact;
582 714bd040 Paolo Bonzini
583 714bd040 Paolo Bonzini
    memset(&sigact, 0, sizeof(sigact));
584 714bd040 Paolo Bonzini
    sigact.sa_handler = cpu_signal;
585 714bd040 Paolo Bonzini
    sigaction(SIG_IPI, &sigact, NULL);
586 714bd040 Paolo Bonzini
587 714bd040 Paolo Bonzini
    sigemptyset(&set);
588 714bd040 Paolo Bonzini
    sigaddset(&set, SIG_IPI);
589 714bd040 Paolo Bonzini
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
590 714bd040 Paolo Bonzini
}
591 714bd040 Paolo Bonzini
592 55f8d6ac Jan Kiszka
#else /* _WIN32 */
593 ff48eb5f Jan Kiszka
static void qemu_kvm_init_cpu_signals(CPUState *env)
594 ff48eb5f Jan Kiszka
{
595 714bd040 Paolo Bonzini
    abort();
596 714bd040 Paolo Bonzini
}
597 ff48eb5f Jan Kiszka
598 714bd040 Paolo Bonzini
static void qemu_tcg_init_cpu_signals(void)
599 714bd040 Paolo Bonzini
{
600 ff48eb5f Jan Kiszka
}
601 714bd040 Paolo Bonzini
#endif /* _WIN32 */
602 ff48eb5f Jan Kiszka
603 296af7c9 Blue Swirl
QemuMutex qemu_global_mutex;
604 46daff13 Paolo Bonzini
static QemuCond qemu_io_proceeded_cond;
605 46daff13 Paolo Bonzini
static bool iothread_requesting_mutex;
606 296af7c9 Blue Swirl
607 296af7c9 Blue Swirl
static QemuThread io_thread;
608 296af7c9 Blue Swirl
609 296af7c9 Blue Swirl
static QemuThread *tcg_cpu_thread;
610 296af7c9 Blue Swirl
static QemuCond *tcg_halt_cond;
611 296af7c9 Blue Swirl
612 296af7c9 Blue Swirl
/* cpu creation */
613 296af7c9 Blue Swirl
static QemuCond qemu_cpu_cond;
614 296af7c9 Blue Swirl
/* system init */
615 296af7c9 Blue Swirl
static QemuCond qemu_pause_cond;
616 e82bcec2 Marcelo Tosatti
static QemuCond qemu_work_cond;
617 296af7c9 Blue Swirl
618 d3b12f5d Paolo Bonzini
void qemu_init_cpu_loop(void)
619 296af7c9 Blue Swirl
{
620 6d9cb73c Jan Kiszka
    qemu_init_sigbus();
621 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_cpu_cond);
622 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_pause_cond);
623 ed94592b Anthony Liguori
    qemu_cond_init(&qemu_work_cond);
624 46daff13 Paolo Bonzini
    qemu_cond_init(&qemu_io_proceeded_cond);
625 296af7c9 Blue Swirl
    qemu_mutex_init(&qemu_global_mutex);
626 296af7c9 Blue Swirl
627 b7680cb6 Jan Kiszka
    qemu_thread_get_self(&io_thread);
628 296af7c9 Blue Swirl
}
629 296af7c9 Blue Swirl
630 e82bcec2 Marcelo Tosatti
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
631 e82bcec2 Marcelo Tosatti
{
632 e82bcec2 Marcelo Tosatti
    struct qemu_work_item wi;
633 e82bcec2 Marcelo Tosatti
634 b7680cb6 Jan Kiszka
    if (qemu_cpu_is_self(env)) {
635 e82bcec2 Marcelo Tosatti
        func(data);
636 e82bcec2 Marcelo Tosatti
        return;
637 e82bcec2 Marcelo Tosatti
    }
638 e82bcec2 Marcelo Tosatti
639 e82bcec2 Marcelo Tosatti
    wi.func = func;
640 e82bcec2 Marcelo Tosatti
    wi.data = data;
641 0ab07c62 Jan Kiszka
    if (!env->queued_work_first) {
642 e82bcec2 Marcelo Tosatti
        env->queued_work_first = &wi;
643 0ab07c62 Jan Kiszka
    } else {
644 e82bcec2 Marcelo Tosatti
        env->queued_work_last->next = &wi;
645 0ab07c62 Jan Kiszka
    }
646 e82bcec2 Marcelo Tosatti
    env->queued_work_last = &wi;
647 e82bcec2 Marcelo Tosatti
    wi.next = NULL;
648 e82bcec2 Marcelo Tosatti
    wi.done = false;
649 e82bcec2 Marcelo Tosatti
650 e82bcec2 Marcelo Tosatti
    qemu_cpu_kick(env);
651 e82bcec2 Marcelo Tosatti
    while (!wi.done) {
652 e82bcec2 Marcelo Tosatti
        CPUState *self_env = cpu_single_env;
653 e82bcec2 Marcelo Tosatti
654 e82bcec2 Marcelo Tosatti
        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
655 e82bcec2 Marcelo Tosatti
        cpu_single_env = self_env;
656 e82bcec2 Marcelo Tosatti
    }
657 e82bcec2 Marcelo Tosatti
}
658 e82bcec2 Marcelo Tosatti
659 e82bcec2 Marcelo Tosatti
static void flush_queued_work(CPUState *env)
660 e82bcec2 Marcelo Tosatti
{
661 e82bcec2 Marcelo Tosatti
    struct qemu_work_item *wi;
662 e82bcec2 Marcelo Tosatti
663 0ab07c62 Jan Kiszka
    if (!env->queued_work_first) {
664 e82bcec2 Marcelo Tosatti
        return;
665 0ab07c62 Jan Kiszka
    }
666 e82bcec2 Marcelo Tosatti
667 e82bcec2 Marcelo Tosatti
    while ((wi = env->queued_work_first)) {
668 e82bcec2 Marcelo Tosatti
        env->queued_work_first = wi->next;
669 e82bcec2 Marcelo Tosatti
        wi->func(wi->data);
670 e82bcec2 Marcelo Tosatti
        wi->done = true;
671 e82bcec2 Marcelo Tosatti
    }
672 e82bcec2 Marcelo Tosatti
    env->queued_work_last = NULL;
673 e82bcec2 Marcelo Tosatti
    qemu_cond_broadcast(&qemu_work_cond);
674 e82bcec2 Marcelo Tosatti
}
675 e82bcec2 Marcelo Tosatti
676 296af7c9 Blue Swirl
static void qemu_wait_io_event_common(CPUState *env)
677 296af7c9 Blue Swirl
{
678 296af7c9 Blue Swirl
    if (env->stop) {
679 296af7c9 Blue Swirl
        env->stop = 0;
680 296af7c9 Blue Swirl
        env->stopped = 1;
681 296af7c9 Blue Swirl
        qemu_cond_signal(&qemu_pause_cond);
682 296af7c9 Blue Swirl
    }
683 e82bcec2 Marcelo Tosatti
    flush_queued_work(env);
684 aa2c364b Jan Kiszka
    env->thread_kicked = false;
685 296af7c9 Blue Swirl
}
686 296af7c9 Blue Swirl
687 6cabe1f3 Jan Kiszka
static void qemu_tcg_wait_io_event(void)
688 296af7c9 Blue Swirl
{
689 6cabe1f3 Jan Kiszka
    CPUState *env;
690 6cabe1f3 Jan Kiszka
691 16400322 Jan Kiszka
    while (all_cpu_threads_idle()) {
692 ab33fcda Paolo Bonzini
       /* Start accounting real time to the virtual clock if the CPUs
693 ab33fcda Paolo Bonzini
          are idle.  */
694 ab33fcda Paolo Bonzini
        qemu_clock_warp(vm_clock);
695 9705fbb5 Paolo Bonzini
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
696 16400322 Jan Kiszka
    }
697 296af7c9 Blue Swirl
698 46daff13 Paolo Bonzini
    while (iothread_requesting_mutex) {
699 46daff13 Paolo Bonzini
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
700 46daff13 Paolo Bonzini
    }
701 6cabe1f3 Jan Kiszka
702 6cabe1f3 Jan Kiszka
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
703 6cabe1f3 Jan Kiszka
        qemu_wait_io_event_common(env);
704 6cabe1f3 Jan Kiszka
    }
705 296af7c9 Blue Swirl
}
706 296af7c9 Blue Swirl
707 296af7c9 Blue Swirl
static void qemu_kvm_wait_io_event(CPUState *env)
708 296af7c9 Blue Swirl
{
709 16400322 Jan Kiszka
    while (cpu_thread_is_idle(env)) {
710 9705fbb5 Paolo Bonzini
        qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
711 16400322 Jan Kiszka
    }
712 296af7c9 Blue Swirl
713 5db5bdac Jan Kiszka
    qemu_kvm_eat_signals(env);
714 296af7c9 Blue Swirl
    qemu_wait_io_event_common(env);
715 296af7c9 Blue Swirl
}
716 296af7c9 Blue Swirl
717 7e97cd88 Jan Kiszka
static void *qemu_kvm_cpu_thread_fn(void *arg)
718 296af7c9 Blue Swirl
{
719 296af7c9 Blue Swirl
    CPUState *env = arg;
720 84b4915d Jan Kiszka
    int r;
721 296af7c9 Blue Swirl
722 6164e6d6 Marcelo Tosatti
    qemu_mutex_lock(&qemu_global_mutex);
723 b7680cb6 Jan Kiszka
    qemu_thread_get_self(env->thread);
724 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
725 296af7c9 Blue Swirl
726 84b4915d Jan Kiszka
    r = kvm_init_vcpu(env);
727 84b4915d Jan Kiszka
    if (r < 0) {
728 84b4915d Jan Kiszka
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
729 84b4915d Jan Kiszka
        exit(1);
730 84b4915d Jan Kiszka
    }
731 296af7c9 Blue Swirl
732 55f8d6ac Jan Kiszka
    qemu_kvm_init_cpu_signals(env);
733 296af7c9 Blue Swirl
734 296af7c9 Blue Swirl
    /* signal CPU creation */
735 296af7c9 Blue Swirl
    env->created = 1;
736 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
737 296af7c9 Blue Swirl
738 296af7c9 Blue Swirl
    while (1) {
739 0ab07c62 Jan Kiszka
        if (cpu_can_run(env)) {
740 6792a57b Jan Kiszka
            r = kvm_cpu_exec(env);
741 83f338f7 Jan Kiszka
            if (r == EXCP_DEBUG) {
742 1009d2ed Jan Kiszka
                cpu_handle_guest_debug(env);
743 83f338f7 Jan Kiszka
            }
744 0ab07c62 Jan Kiszka
        }
745 296af7c9 Blue Swirl
        qemu_kvm_wait_io_event(env);
746 296af7c9 Blue Swirl
    }
747 296af7c9 Blue Swirl
748 296af7c9 Blue Swirl
    return NULL;
749 296af7c9 Blue Swirl
}
750 296af7c9 Blue Swirl
751 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void);
752 bdb7ca67 Jan Kiszka
753 7e97cd88 Jan Kiszka
static void *qemu_tcg_cpu_thread_fn(void *arg)
754 296af7c9 Blue Swirl
{
755 296af7c9 Blue Swirl
    CPUState *env = arg;
756 296af7c9 Blue Swirl
757 55f8d6ac Jan Kiszka
    qemu_tcg_init_cpu_signals();
758 b7680cb6 Jan Kiszka
    qemu_thread_get_self(env->thread);
759 296af7c9 Blue Swirl
760 296af7c9 Blue Swirl
    /* signal CPU creation */
761 296af7c9 Blue Swirl
    qemu_mutex_lock(&qemu_global_mutex);
762 0ab07c62 Jan Kiszka
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
763 dc7a09cf Jan Kiszka
        env->thread_id = qemu_get_thread_id();
764 296af7c9 Blue Swirl
        env->created = 1;
765 0ab07c62 Jan Kiszka
    }
766 296af7c9 Blue Swirl
    qemu_cond_signal(&qemu_cpu_cond);
767 296af7c9 Blue Swirl
768 fa7d1867 Jan Kiszka
    /* wait for initial kick-off after machine start */
769 fa7d1867 Jan Kiszka
    while (first_cpu->stopped) {
770 fa7d1867 Jan Kiszka
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
771 0ab07c62 Jan Kiszka
    }
772 296af7c9 Blue Swirl
773 296af7c9 Blue Swirl
    while (1) {
774 bdb7ca67 Jan Kiszka
        tcg_exec_all();
775 946fb27c Paolo Bonzini
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
776 3b2319a3 Paolo Bonzini
            qemu_notify_event();
777 3b2319a3 Paolo Bonzini
        }
778 6cabe1f3 Jan Kiszka
        qemu_tcg_wait_io_event();
779 296af7c9 Blue Swirl
    }
780 296af7c9 Blue Swirl
781 296af7c9 Blue Swirl
    return NULL;
782 296af7c9 Blue Swirl
}
783 296af7c9 Blue Swirl
784 cc015e9a Paolo Bonzini
static void qemu_cpu_kick_thread(CPUState *env)
785 cc015e9a Paolo Bonzini
{
786 cc015e9a Paolo Bonzini
#ifndef _WIN32
787 cc015e9a Paolo Bonzini
    int err;
788 cc015e9a Paolo Bonzini
789 cc015e9a Paolo Bonzini
    err = pthread_kill(env->thread->thread, SIG_IPI);
790 cc015e9a Paolo Bonzini
    if (err) {
791 cc015e9a Paolo Bonzini
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
792 cc015e9a Paolo Bonzini
        exit(1);
793 cc015e9a Paolo Bonzini
    }
794 cc015e9a Paolo Bonzini
#else /* _WIN32 */
795 cc015e9a Paolo Bonzini
    if (!qemu_cpu_is_self(env)) {
796 1ecf47bf Paolo Bonzini
        SuspendThread(env->hThread);
797 cc015e9a Paolo Bonzini
        cpu_signal(0);
798 1ecf47bf Paolo Bonzini
        ResumeThread(env->hThread);
799 cc015e9a Paolo Bonzini
    }
800 cc015e9a Paolo Bonzini
#endif
801 cc015e9a Paolo Bonzini
}
802 cc015e9a Paolo Bonzini
803 296af7c9 Blue Swirl
void qemu_cpu_kick(void *_env)
804 296af7c9 Blue Swirl
{
805 296af7c9 Blue Swirl
    CPUState *env = _env;
806 296af7c9 Blue Swirl
807 296af7c9 Blue Swirl
    qemu_cond_broadcast(env->halt_cond);
808 eae74cf9 Jan Kiszka
    if (kvm_enabled() && !env->thread_kicked) {
809 cc015e9a Paolo Bonzini
        qemu_cpu_kick_thread(env);
810 aa2c364b Jan Kiszka
        env->thread_kicked = true;
811 aa2c364b Jan Kiszka
    }
812 296af7c9 Blue Swirl
}
813 296af7c9 Blue Swirl
814 46d62fac Jan Kiszka
void qemu_cpu_kick_self(void)
815 296af7c9 Blue Swirl
{
816 b55c22c6 Paolo Bonzini
#ifndef _WIN32
817 46d62fac Jan Kiszka
    assert(cpu_single_env);
818 296af7c9 Blue Swirl
819 46d62fac Jan Kiszka
    if (!cpu_single_env->thread_kicked) {
820 cc015e9a Paolo Bonzini
        qemu_cpu_kick_thread(cpu_single_env);
821 46d62fac Jan Kiszka
        cpu_single_env->thread_kicked = true;
822 296af7c9 Blue Swirl
    }
823 b55c22c6 Paolo Bonzini
#else
824 b55c22c6 Paolo Bonzini
    abort();
825 b55c22c6 Paolo Bonzini
#endif
826 296af7c9 Blue Swirl
}
827 296af7c9 Blue Swirl
828 b7680cb6 Jan Kiszka
int qemu_cpu_is_self(void *_env)
829 296af7c9 Blue Swirl
{
830 296af7c9 Blue Swirl
    CPUState *env = _env;
831 a8486bc9 Marcelo Tosatti
832 b7680cb6 Jan Kiszka
    return qemu_thread_is_self(env->thread);
833 296af7c9 Blue Swirl
}
834 296af7c9 Blue Swirl
835 296af7c9 Blue Swirl
void qemu_mutex_lock_iothread(void)
836 296af7c9 Blue Swirl
{
837 296af7c9 Blue Swirl
    if (kvm_enabled()) {
838 296af7c9 Blue Swirl
        qemu_mutex_lock(&qemu_global_mutex);
839 1a28cac3 Marcelo Tosatti
    } else {
840 46daff13 Paolo Bonzini
        iothread_requesting_mutex = true;
841 1a28cac3 Marcelo Tosatti
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
842 cc015e9a Paolo Bonzini
            qemu_cpu_kick_thread(first_cpu);
843 1a28cac3 Marcelo Tosatti
            qemu_mutex_lock(&qemu_global_mutex);
844 1a28cac3 Marcelo Tosatti
        }
845 46daff13 Paolo Bonzini
        iothread_requesting_mutex = false;
846 46daff13 Paolo Bonzini
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
847 1a28cac3 Marcelo Tosatti
    }
848 296af7c9 Blue Swirl
}
849 296af7c9 Blue Swirl
850 296af7c9 Blue Swirl
void qemu_mutex_unlock_iothread(void)
851 296af7c9 Blue Swirl
{
852 296af7c9 Blue Swirl
    qemu_mutex_unlock(&qemu_global_mutex);
853 296af7c9 Blue Swirl
}
854 296af7c9 Blue Swirl
855 296af7c9 Blue Swirl
static int all_vcpus_paused(void)
856 296af7c9 Blue Swirl
{
857 296af7c9 Blue Swirl
    CPUState *penv = first_cpu;
858 296af7c9 Blue Swirl
859 296af7c9 Blue Swirl
    while (penv) {
860 0ab07c62 Jan Kiszka
        if (!penv->stopped) {
861 296af7c9 Blue Swirl
            return 0;
862 0ab07c62 Jan Kiszka
        }
863 296af7c9 Blue Swirl
        penv = (CPUState *)penv->next_cpu;
864 296af7c9 Blue Swirl
    }
865 296af7c9 Blue Swirl
866 296af7c9 Blue Swirl
    return 1;
867 296af7c9 Blue Swirl
}
868 296af7c9 Blue Swirl
869 296af7c9 Blue Swirl
void pause_all_vcpus(void)
870 296af7c9 Blue Swirl
{
871 296af7c9 Blue Swirl
    CPUState *penv = first_cpu;
872 296af7c9 Blue Swirl
873 a5c57d64 Paolo Bonzini
    qemu_clock_enable(vm_clock, false);
874 296af7c9 Blue Swirl
    while (penv) {
875 296af7c9 Blue Swirl
        penv->stop = 1;
876 296af7c9 Blue Swirl
        qemu_cpu_kick(penv);
877 296af7c9 Blue Swirl
        penv = (CPUState *)penv->next_cpu;
878 296af7c9 Blue Swirl
    }
879 296af7c9 Blue Swirl
880 296af7c9 Blue Swirl
    while (!all_vcpus_paused()) {
881 be7d6c57 Paolo Bonzini
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
882 296af7c9 Blue Swirl
        penv = first_cpu;
883 296af7c9 Blue Swirl
        while (penv) {
884 1fbb22e5 Marcelo Tosatti
            qemu_cpu_kick(penv);
885 296af7c9 Blue Swirl
            penv = (CPUState *)penv->next_cpu;
886 296af7c9 Blue Swirl
        }
887 296af7c9 Blue Swirl
    }
888 296af7c9 Blue Swirl
}
889 296af7c9 Blue Swirl
890 296af7c9 Blue Swirl
void resume_all_vcpus(void)
891 296af7c9 Blue Swirl
{
892 296af7c9 Blue Swirl
    CPUState *penv = first_cpu;
893 296af7c9 Blue Swirl
894 47113ab6 Wen Congyang
    qemu_clock_enable(vm_clock, true);
895 296af7c9 Blue Swirl
    while (penv) {
896 296af7c9 Blue Swirl
        penv->stop = 0;
897 296af7c9 Blue Swirl
        penv->stopped = 0;
898 296af7c9 Blue Swirl
        qemu_cpu_kick(penv);
899 296af7c9 Blue Swirl
        penv = (CPUState *)penv->next_cpu;
900 296af7c9 Blue Swirl
    }
901 296af7c9 Blue Swirl
}
902 296af7c9 Blue Swirl
903 7e97cd88 Jan Kiszka
static void qemu_tcg_init_vcpu(void *_env)
904 296af7c9 Blue Swirl
{
905 296af7c9 Blue Swirl
    CPUState *env = _env;
906 0ab07c62 Jan Kiszka
907 296af7c9 Blue Swirl
    /* share a single thread for all cpus with TCG */
908 296af7c9 Blue Swirl
    if (!tcg_cpu_thread) {
909 7267c094 Anthony Liguori
        env->thread = g_malloc0(sizeof(QemuThread));
910 7267c094 Anthony Liguori
        env->halt_cond = g_malloc0(sizeof(QemuCond));
911 296af7c9 Blue Swirl
        qemu_cond_init(env->halt_cond);
912 fa7d1867 Jan Kiszka
        tcg_halt_cond = env->halt_cond;
913 cf218714 Jan Kiszka
        qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
914 1ecf47bf Paolo Bonzini
                           QEMU_THREAD_JOINABLE);
915 1ecf47bf Paolo Bonzini
#ifdef _WIN32
916 1ecf47bf Paolo Bonzini
        env->hThread = qemu_thread_get_handle(env->thread);
917 1ecf47bf Paolo Bonzini
#endif
918 0ab07c62 Jan Kiszka
        while (env->created == 0) {
919 18a85728 Paolo Bonzini
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
920 0ab07c62 Jan Kiszka
        }
921 296af7c9 Blue Swirl
        tcg_cpu_thread = env->thread;
922 296af7c9 Blue Swirl
    } else {
923 296af7c9 Blue Swirl
        env->thread = tcg_cpu_thread;
924 296af7c9 Blue Swirl
        env->halt_cond = tcg_halt_cond;
925 296af7c9 Blue Swirl
    }
926 296af7c9 Blue Swirl
}
927 296af7c9 Blue Swirl
928 7e97cd88 Jan Kiszka
static void qemu_kvm_start_vcpu(CPUState *env)
929 296af7c9 Blue Swirl
{
930 7267c094 Anthony Liguori
    env->thread = g_malloc0(sizeof(QemuThread));
931 7267c094 Anthony Liguori
    env->halt_cond = g_malloc0(sizeof(QemuCond));
932 296af7c9 Blue Swirl
    qemu_cond_init(env->halt_cond);
933 cf218714 Jan Kiszka
    qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env,
934 1ecf47bf Paolo Bonzini
                       QEMU_THREAD_JOINABLE);
935 0ab07c62 Jan Kiszka
    while (env->created == 0) {
936 18a85728 Paolo Bonzini
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
937 0ab07c62 Jan Kiszka
    }
938 296af7c9 Blue Swirl
}
939 296af7c9 Blue Swirl
940 296af7c9 Blue Swirl
void qemu_init_vcpu(void *_env)
941 296af7c9 Blue Swirl
{
942 296af7c9 Blue Swirl
    CPUState *env = _env;
943 296af7c9 Blue Swirl
944 296af7c9 Blue Swirl
    env->nr_cores = smp_cores;
945 296af7c9 Blue Swirl
    env->nr_threads = smp_threads;
946 fa7d1867 Jan Kiszka
    env->stopped = 1;
947 0ab07c62 Jan Kiszka
    if (kvm_enabled()) {
948 7e97cd88 Jan Kiszka
        qemu_kvm_start_vcpu(env);
949 0ab07c62 Jan Kiszka
    } else {
950 7e97cd88 Jan Kiszka
        qemu_tcg_init_vcpu(env);
951 0ab07c62 Jan Kiszka
    }
952 296af7c9 Blue Swirl
}
953 296af7c9 Blue Swirl
954 b4a3d965 Jan Kiszka
void cpu_stop_current(void)
955 296af7c9 Blue Swirl
{
956 b4a3d965 Jan Kiszka
    if (cpu_single_env) {
957 67bb172f Paolo Bonzini
        cpu_single_env->stop = 0;
958 b4a3d965 Jan Kiszka
        cpu_single_env->stopped = 1;
959 b4a3d965 Jan Kiszka
        cpu_exit(cpu_single_env);
960 67bb172f Paolo Bonzini
        qemu_cond_signal(&qemu_pause_cond);
961 b4a3d965 Jan Kiszka
    }
962 296af7c9 Blue Swirl
}
963 296af7c9 Blue Swirl
964 1dfb4dd9 Luiz Capitulino
void vm_stop(RunState state)
965 296af7c9 Blue Swirl
{
966 b7680cb6 Jan Kiszka
    if (!qemu_thread_is_self(&io_thread)) {
967 1dfb4dd9 Luiz Capitulino
        qemu_system_vmstop_request(state);
968 296af7c9 Blue Swirl
        /*
969 296af7c9 Blue Swirl
         * FIXME: should not return to device code in case
970 296af7c9 Blue Swirl
         * vm_stop() has been requested.
971 296af7c9 Blue Swirl
         */
972 b4a3d965 Jan Kiszka
        cpu_stop_current();
973 296af7c9 Blue Swirl
        return;
974 296af7c9 Blue Swirl
    }
975 1dfb4dd9 Luiz Capitulino
    do_vm_stop(state);
976 296af7c9 Blue Swirl
}
977 296af7c9 Blue Swirl
978 8a9236f1 Luiz Capitulino
/* does a state transition even if the VM is already stopped,
979 8a9236f1 Luiz Capitulino
   current state is forgotten forever */
980 8a9236f1 Luiz Capitulino
void vm_stop_force_state(RunState state)
981 8a9236f1 Luiz Capitulino
{
982 8a9236f1 Luiz Capitulino
    if (runstate_is_running()) {
983 8a9236f1 Luiz Capitulino
        vm_stop(state);
984 8a9236f1 Luiz Capitulino
    } else {
985 8a9236f1 Luiz Capitulino
        runstate_set(state);
986 8a9236f1 Luiz Capitulino
    }
987 8a9236f1 Luiz Capitulino
}
988 8a9236f1 Luiz Capitulino
989 6792a57b Jan Kiszka
static int tcg_cpu_exec(CPUState *env)
990 296af7c9 Blue Swirl
{
991 296af7c9 Blue Swirl
    int ret;
992 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
993 296af7c9 Blue Swirl
    int64_t ti;
994 296af7c9 Blue Swirl
#endif
995 296af7c9 Blue Swirl
996 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
997 296af7c9 Blue Swirl
    ti = profile_getclock();
998 296af7c9 Blue Swirl
#endif
999 296af7c9 Blue Swirl
    if (use_icount) {
1000 296af7c9 Blue Swirl
        int64_t count;
1001 296af7c9 Blue Swirl
        int decr;
1002 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1003 296af7c9 Blue Swirl
        env->icount_decr.u16.low = 0;
1004 296af7c9 Blue Swirl
        env->icount_extra = 0;
1005 946fb27c Paolo Bonzini
        count = qemu_icount_round(qemu_clock_deadline(vm_clock));
1006 296af7c9 Blue Swirl
        qemu_icount += count;
1007 296af7c9 Blue Swirl
        decr = (count > 0xffff) ? 0xffff : count;
1008 296af7c9 Blue Swirl
        count -= decr;
1009 296af7c9 Blue Swirl
        env->icount_decr.u16.low = decr;
1010 296af7c9 Blue Swirl
        env->icount_extra = count;
1011 296af7c9 Blue Swirl
    }
1012 296af7c9 Blue Swirl
    ret = cpu_exec(env);
1013 296af7c9 Blue Swirl
#ifdef CONFIG_PROFILER
1014 296af7c9 Blue Swirl
    qemu_time += profile_getclock() - ti;
1015 296af7c9 Blue Swirl
#endif
1016 296af7c9 Blue Swirl
    if (use_icount) {
1017 296af7c9 Blue Swirl
        /* Fold pending instructions back into the
1018 296af7c9 Blue Swirl
           instruction counter, and clear the interrupt flag.  */
1019 296af7c9 Blue Swirl
        qemu_icount -= (env->icount_decr.u16.low
1020 296af7c9 Blue Swirl
                        + env->icount_extra);
1021 296af7c9 Blue Swirl
        env->icount_decr.u32 = 0;
1022 296af7c9 Blue Swirl
        env->icount_extra = 0;
1023 296af7c9 Blue Swirl
    }
1024 296af7c9 Blue Swirl
    return ret;
1025 296af7c9 Blue Swirl
}
1026 296af7c9 Blue Swirl
1027 bdb7ca67 Jan Kiszka
static void tcg_exec_all(void)
1028 296af7c9 Blue Swirl
{
1029 9a36085b Jan Kiszka
    int r;
1030 9a36085b Jan Kiszka
1031 ab33fcda Paolo Bonzini
    /* Account partial waits to the vm_clock.  */
1032 ab33fcda Paolo Bonzini
    qemu_clock_warp(vm_clock);
1033 ab33fcda Paolo Bonzini
1034 0ab07c62 Jan Kiszka
    if (next_cpu == NULL) {
1035 296af7c9 Blue Swirl
        next_cpu = first_cpu;
1036 0ab07c62 Jan Kiszka
    }
1037 c629a4bc Jan Kiszka
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1038 345f4426 Jan Kiszka
        CPUState *env = next_cpu;
1039 296af7c9 Blue Swirl
1040 296af7c9 Blue Swirl
        qemu_clock_enable(vm_clock,
1041 345f4426 Jan Kiszka
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1042 296af7c9 Blue Swirl
1043 3c638d06 Jan Kiszka
        if (cpu_can_run(env)) {
1044 bdb7ca67 Jan Kiszka
            r = tcg_cpu_exec(env);
1045 9a36085b Jan Kiszka
            if (r == EXCP_DEBUG) {
1046 1009d2ed Jan Kiszka
                cpu_handle_guest_debug(env);
1047 3c638d06 Jan Kiszka
                break;
1048 3c638d06 Jan Kiszka
            }
1049 df646dfd Paolo Bonzini
        } else if (env->stop || env->stopped) {
1050 296af7c9 Blue Swirl
            break;
1051 296af7c9 Blue Swirl
        }
1052 296af7c9 Blue Swirl
    }
1053 c629a4bc Jan Kiszka
    exit_request = 0;
1054 296af7c9 Blue Swirl
}
1055 296af7c9 Blue Swirl
1056 296af7c9 Blue Swirl
void set_numa_modes(void)
1057 296af7c9 Blue Swirl
{
1058 296af7c9 Blue Swirl
    CPUState *env;
1059 296af7c9 Blue Swirl
    int i;
1060 296af7c9 Blue Swirl
1061 296af7c9 Blue Swirl
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1062 296af7c9 Blue Swirl
        for (i = 0; i < nb_numa_nodes; i++) {
1063 296af7c9 Blue Swirl
            if (node_cpumask[i] & (1 << env->cpu_index)) {
1064 296af7c9 Blue Swirl
                env->numa_node = i;
1065 296af7c9 Blue Swirl
            }
1066 296af7c9 Blue Swirl
        }
1067 296af7c9 Blue Swirl
    }
1068 296af7c9 Blue Swirl
}
1069 296af7c9 Blue Swirl
1070 296af7c9 Blue Swirl
void set_cpu_log(const char *optarg)
1071 296af7c9 Blue Swirl
{
1072 296af7c9 Blue Swirl
    int mask;
1073 296af7c9 Blue Swirl
    const CPULogItem *item;
1074 296af7c9 Blue Swirl
1075 296af7c9 Blue Swirl
    mask = cpu_str_to_log_mask(optarg);
1076 296af7c9 Blue Swirl
    if (!mask) {
1077 296af7c9 Blue Swirl
        printf("Log items (comma separated):\n");
1078 296af7c9 Blue Swirl
        for (item = cpu_log_items; item->mask != 0; item++) {
1079 296af7c9 Blue Swirl
            printf("%-10s %s\n", item->name, item->help);
1080 296af7c9 Blue Swirl
        }
1081 296af7c9 Blue Swirl
        exit(1);
1082 296af7c9 Blue Swirl
    }
1083 296af7c9 Blue Swirl
    cpu_set_log(mask);
1084 296af7c9 Blue Swirl
}
1085 29e922b6 Blue Swirl
1086 c235d738 Matthew Fernandez
void set_cpu_log_filename(const char *optarg)
1087 c235d738 Matthew Fernandez
{
1088 c235d738 Matthew Fernandez
    cpu_set_log_filename(optarg);
1089 c235d738 Matthew Fernandez
}
1090 c235d738 Matthew Fernandez
1091 9a78eead Stefan Weil
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1092 262353cb Blue Swirl
{
1093 262353cb Blue Swirl
    /* XXX: implement xxx_cpu_list for targets that still miss it */
1094 262353cb Blue Swirl
#if defined(cpu_list_id)
1095 262353cb Blue Swirl
    cpu_list_id(f, cpu_fprintf, optarg);
1096 262353cb Blue Swirl
#elif defined(cpu_list)
1097 262353cb Blue Swirl
    cpu_list(f, cpu_fprintf); /* deprecated */
1098 262353cb Blue Swirl
#endif
1099 262353cb Blue Swirl
}
1100 de0b36b6 Luiz Capitulino
1101 de0b36b6 Luiz Capitulino
CpuInfoList *qmp_query_cpus(Error **errp)
1102 de0b36b6 Luiz Capitulino
{
1103 de0b36b6 Luiz Capitulino
    CpuInfoList *head = NULL, *cur_item = NULL;
1104 de0b36b6 Luiz Capitulino
    CPUState *env;
1105 de0b36b6 Luiz Capitulino
1106 de0b36b6 Luiz Capitulino
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1107 de0b36b6 Luiz Capitulino
        CpuInfoList *info;
1108 de0b36b6 Luiz Capitulino
1109 de0b36b6 Luiz Capitulino
        cpu_synchronize_state(env);
1110 de0b36b6 Luiz Capitulino
1111 de0b36b6 Luiz Capitulino
        info = g_malloc0(sizeof(*info));
1112 de0b36b6 Luiz Capitulino
        info->value = g_malloc0(sizeof(*info->value));
1113 de0b36b6 Luiz Capitulino
        info->value->CPU = env->cpu_index;
1114 de0b36b6 Luiz Capitulino
        info->value->current = (env == first_cpu);
1115 de0b36b6 Luiz Capitulino
        info->value->halted = env->halted;
1116 de0b36b6 Luiz Capitulino
        info->value->thread_id = env->thread_id;
1117 de0b36b6 Luiz Capitulino
#if defined(TARGET_I386)
1118 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1119 de0b36b6 Luiz Capitulino
        info->value->pc = env->eip + env->segs[R_CS].base;
1120 de0b36b6 Luiz Capitulino
#elif defined(TARGET_PPC)
1121 de0b36b6 Luiz Capitulino
        info->value->has_nip = true;
1122 de0b36b6 Luiz Capitulino
        info->value->nip = env->nip;
1123 de0b36b6 Luiz Capitulino
#elif defined(TARGET_SPARC)
1124 de0b36b6 Luiz Capitulino
        info->value->has_pc = true;
1125 de0b36b6 Luiz Capitulino
        info->value->pc = env->pc;
1126 de0b36b6 Luiz Capitulino
        info->value->has_npc = true;
1127 de0b36b6 Luiz Capitulino
        info->value->npc = env->npc;
1128 de0b36b6 Luiz Capitulino
#elif defined(TARGET_MIPS)
1129 de0b36b6 Luiz Capitulino
        info->value->has_PC = true;
1130 de0b36b6 Luiz Capitulino
        info->value->PC = env->active_tc.PC;
1131 de0b36b6 Luiz Capitulino
#endif
1132 de0b36b6 Luiz Capitulino
1133 de0b36b6 Luiz Capitulino
        /* XXX: waiting for the qapi to support GSList */
1134 de0b36b6 Luiz Capitulino
        if (!cur_item) {
1135 de0b36b6 Luiz Capitulino
            head = cur_item = info;
1136 de0b36b6 Luiz Capitulino
        } else {
1137 de0b36b6 Luiz Capitulino
            cur_item->next = info;
1138 de0b36b6 Luiz Capitulino
            cur_item = info;
1139 de0b36b6 Luiz Capitulino
        }
1140 de0b36b6 Luiz Capitulino
    }
1141 de0b36b6 Luiz Capitulino
1142 de0b36b6 Luiz Capitulino
    return head;
1143 de0b36b6 Luiz Capitulino
}
1144 0cfd6a9a Luiz Capitulino
1145 0cfd6a9a Luiz Capitulino
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1146 0cfd6a9a Luiz Capitulino
                 bool has_cpu, int64_t cpu_index, Error **errp)
1147 0cfd6a9a Luiz Capitulino
{
1148 0cfd6a9a Luiz Capitulino
    FILE *f;
1149 0cfd6a9a Luiz Capitulino
    uint32_t l;
1150 0cfd6a9a Luiz Capitulino
    CPUState *env;
1151 0cfd6a9a Luiz Capitulino
    uint8_t buf[1024];
1152 0cfd6a9a Luiz Capitulino
1153 0cfd6a9a Luiz Capitulino
    if (!has_cpu) {
1154 0cfd6a9a Luiz Capitulino
        cpu_index = 0;
1155 0cfd6a9a Luiz Capitulino
    }
1156 0cfd6a9a Luiz Capitulino
1157 0cfd6a9a Luiz Capitulino
    for (env = first_cpu; env; env = env->next_cpu) {
1158 0cfd6a9a Luiz Capitulino
        if (cpu_index == env->cpu_index) {
1159 0cfd6a9a Luiz Capitulino
            break;
1160 0cfd6a9a Luiz Capitulino
        }
1161 0cfd6a9a Luiz Capitulino
    }
1162 0cfd6a9a Luiz Capitulino
1163 0cfd6a9a Luiz Capitulino
    if (env == NULL) {
1164 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1165 0cfd6a9a Luiz Capitulino
                  "a CPU number");
1166 0cfd6a9a Luiz Capitulino
        return;
1167 0cfd6a9a Luiz Capitulino
    }
1168 0cfd6a9a Luiz Capitulino
1169 0cfd6a9a Luiz Capitulino
    f = fopen(filename, "wb");
1170 0cfd6a9a Luiz Capitulino
    if (!f) {
1171 0cfd6a9a Luiz Capitulino
        error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1172 0cfd6a9a Luiz Capitulino
        return;
1173 0cfd6a9a Luiz Capitulino
    }
1174 0cfd6a9a Luiz Capitulino
1175 0cfd6a9a Luiz Capitulino
    while (size != 0) {
1176 0cfd6a9a Luiz Capitulino
        l = sizeof(buf);
1177 0cfd6a9a Luiz Capitulino
        if (l > size)
1178 0cfd6a9a Luiz Capitulino
            l = size;
1179 0cfd6a9a Luiz Capitulino
        cpu_memory_rw_debug(env, addr, buf, l, 0);
1180 0cfd6a9a Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1181 0cfd6a9a Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1182 0cfd6a9a Luiz Capitulino
            goto exit;
1183 0cfd6a9a Luiz Capitulino
        }
1184 0cfd6a9a Luiz Capitulino
        addr += l;
1185 0cfd6a9a Luiz Capitulino
        size -= l;
1186 0cfd6a9a Luiz Capitulino
    }
1187 0cfd6a9a Luiz Capitulino
1188 0cfd6a9a Luiz Capitulino
exit:
1189 0cfd6a9a Luiz Capitulino
    fclose(f);
1190 0cfd6a9a Luiz Capitulino
}
1191 6d3962bf Luiz Capitulino
1192 6d3962bf Luiz Capitulino
void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1193 6d3962bf Luiz Capitulino
                  Error **errp)
1194 6d3962bf Luiz Capitulino
{
1195 6d3962bf Luiz Capitulino
    FILE *f;
1196 6d3962bf Luiz Capitulino
    uint32_t l;
1197 6d3962bf Luiz Capitulino
    uint8_t buf[1024];
1198 6d3962bf Luiz Capitulino
1199 6d3962bf Luiz Capitulino
    f = fopen(filename, "wb");
1200 6d3962bf Luiz Capitulino
    if (!f) {
1201 6d3962bf Luiz Capitulino
        error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1202 6d3962bf Luiz Capitulino
        return;
1203 6d3962bf Luiz Capitulino
    }
1204 6d3962bf Luiz Capitulino
1205 6d3962bf Luiz Capitulino
    while (size != 0) {
1206 6d3962bf Luiz Capitulino
        l = sizeof(buf);
1207 6d3962bf Luiz Capitulino
        if (l > size)
1208 6d3962bf Luiz Capitulino
            l = size;
1209 6d3962bf Luiz Capitulino
        cpu_physical_memory_rw(addr, buf, l, 0);
1210 6d3962bf Luiz Capitulino
        if (fwrite(buf, 1, l, f) != l) {
1211 6d3962bf Luiz Capitulino
            error_set(errp, QERR_IO_ERROR);
1212 6d3962bf Luiz Capitulino
            goto exit;
1213 6d3962bf Luiz Capitulino
        }
1214 6d3962bf Luiz Capitulino
        addr += l;
1215 6d3962bf Luiz Capitulino
        size -= l;
1216 6d3962bf Luiz Capitulino
    }
1217 6d3962bf Luiz Capitulino
1218 6d3962bf Luiz Capitulino
exit:
1219 6d3962bf Luiz Capitulino
    fclose(f);
1220 6d3962bf Luiz Capitulino
}
1221 ab49ab5c Luiz Capitulino
1222 ab49ab5c Luiz Capitulino
void qmp_inject_nmi(Error **errp)
1223 ab49ab5c Luiz Capitulino
{
1224 ab49ab5c Luiz Capitulino
#if defined(TARGET_I386)
1225 ab49ab5c Luiz Capitulino
    CPUState *env;
1226 ab49ab5c Luiz Capitulino
1227 ab49ab5c Luiz Capitulino
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1228 ab49ab5c Luiz Capitulino
        cpu_interrupt(env, CPU_INTERRUPT_NMI);
1229 ab49ab5c Luiz Capitulino
    }
1230 ab49ab5c Luiz Capitulino
#else
1231 ab49ab5c Luiz Capitulino
    error_set(errp, QERR_UNSUPPORTED);
1232 ab49ab5c Luiz Capitulino
#endif
1233 ab49ab5c Luiz Capitulino
}