root / cpus.c @ 8e00128d
History | View | Annotate | Download (18.9 kB)
1 | 296af7c9 | Blue Swirl | /*
|
---|---|---|---|
2 | 296af7c9 | Blue Swirl | * QEMU System Emulator
|
3 | 296af7c9 | Blue Swirl | *
|
4 | 296af7c9 | Blue Swirl | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 296af7c9 | Blue Swirl | *
|
6 | 296af7c9 | Blue Swirl | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 296af7c9 | Blue Swirl | * of this software and associated documentation files (the "Software"), to deal
|
8 | 296af7c9 | Blue Swirl | * in the Software without restriction, including without limitation the rights
|
9 | 296af7c9 | Blue Swirl | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 296af7c9 | Blue Swirl | * copies of the Software, and to permit persons to whom the Software is
|
11 | 296af7c9 | Blue Swirl | * furnished to do so, subject to the following conditions:
|
12 | 296af7c9 | Blue Swirl | *
|
13 | 296af7c9 | Blue Swirl | * The above copyright notice and this permission notice shall be included in
|
14 | 296af7c9 | Blue Swirl | * all copies or substantial portions of the Software.
|
15 | 296af7c9 | Blue Swirl | *
|
16 | 296af7c9 | Blue Swirl | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 296af7c9 | Blue Swirl | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 296af7c9 | Blue Swirl | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 296af7c9 | Blue Swirl | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 296af7c9 | Blue Swirl | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 296af7c9 | Blue Swirl | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 296af7c9 | Blue Swirl | * THE SOFTWARE.
|
23 | 296af7c9 | Blue Swirl | */
|
24 | 296af7c9 | Blue Swirl | |
25 | 296af7c9 | Blue Swirl | /* Needed early for CONFIG_BSD etc. */
|
26 | 296af7c9 | Blue Swirl | #include "config-host.h" |
27 | 296af7c9 | Blue Swirl | |
28 | 296af7c9 | Blue Swirl | #include "monitor.h" |
29 | 296af7c9 | Blue Swirl | #include "sysemu.h" |
30 | 296af7c9 | Blue Swirl | #include "gdbstub.h" |
31 | 296af7c9 | Blue Swirl | #include "dma.h" |
32 | 296af7c9 | Blue Swirl | #include "kvm.h" |
33 | 262ea18e | Jan Kiszka | #include "exec-all.h" |
34 | 296af7c9 | Blue Swirl | |
35 | 296af7c9 | Blue Swirl | #include "cpus.h" |
36 | 296af7c9 | Blue Swirl | |
37 | 7277e027 | Blue Swirl | #ifdef SIGRTMIN
|
38 | 7277e027 | Blue Swirl | #define SIG_IPI (SIGRTMIN+4) |
39 | 7277e027 | Blue Swirl | #else
|
40 | 7277e027 | Blue Swirl | #define SIG_IPI SIGUSR1
|
41 | 7277e027 | Blue Swirl | #endif
|
42 | 7277e027 | Blue Swirl | |
43 | 296af7c9 | Blue Swirl | static CPUState *next_cpu;
|
44 | 296af7c9 | Blue Swirl | |
45 | 296af7c9 | Blue Swirl | /***********************************************************/
|
46 | 296af7c9 | Blue Swirl | void hw_error(const char *fmt, ...) |
47 | 296af7c9 | Blue Swirl | { |
48 | 296af7c9 | Blue Swirl | va_list ap; |
49 | 296af7c9 | Blue Swirl | CPUState *env; |
50 | 296af7c9 | Blue Swirl | |
51 | 296af7c9 | Blue Swirl | va_start(ap, fmt); |
52 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu: hardware error: ");
|
53 | 296af7c9 | Blue Swirl | vfprintf(stderr, fmt, ap); |
54 | 296af7c9 | Blue Swirl | fprintf(stderr, "\n");
|
55 | 296af7c9 | Blue Swirl | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
56 | 296af7c9 | Blue Swirl | fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
57 | 296af7c9 | Blue Swirl | #ifdef TARGET_I386
|
58 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
59 | 296af7c9 | Blue Swirl | #else
|
60 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, 0);
|
61 | 296af7c9 | Blue Swirl | #endif
|
62 | 296af7c9 | Blue Swirl | } |
63 | 296af7c9 | Blue Swirl | va_end(ap); |
64 | 296af7c9 | Blue Swirl | abort(); |
65 | 296af7c9 | Blue Swirl | } |
66 | 296af7c9 | Blue Swirl | |
67 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_states(void) |
68 | 296af7c9 | Blue Swirl | { |
69 | 296af7c9 | Blue Swirl | CPUState *cpu; |
70 | 296af7c9 | Blue Swirl | |
71 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
72 | 296af7c9 | Blue Swirl | cpu_synchronize_state(cpu); |
73 | 296af7c9 | Blue Swirl | } |
74 | 296af7c9 | Blue Swirl | } |
75 | 296af7c9 | Blue Swirl | |
76 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_reset(void) |
77 | 296af7c9 | Blue Swirl | { |
78 | 296af7c9 | Blue Swirl | CPUState *cpu; |
79 | 296af7c9 | Blue Swirl | |
80 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
81 | 296af7c9 | Blue Swirl | cpu_synchronize_post_reset(cpu); |
82 | 296af7c9 | Blue Swirl | } |
83 | 296af7c9 | Blue Swirl | } |
84 | 296af7c9 | Blue Swirl | |
85 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_init(void) |
86 | 296af7c9 | Blue Swirl | { |
87 | 296af7c9 | Blue Swirl | CPUState *cpu; |
88 | 296af7c9 | Blue Swirl | |
89 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
90 | 296af7c9 | Blue Swirl | cpu_synchronize_post_init(cpu); |
91 | 296af7c9 | Blue Swirl | } |
92 | 296af7c9 | Blue Swirl | } |
93 | 296af7c9 | Blue Swirl | |
94 | 3ae9501c | Marcelo Tosatti | int cpu_is_stopped(CPUState *env)
|
95 | 3ae9501c | Marcelo Tosatti | { |
96 | 3ae9501c | Marcelo Tosatti | return !vm_running || env->stopped;
|
97 | 3ae9501c | Marcelo Tosatti | } |
98 | 3ae9501c | Marcelo Tosatti | |
99 | 296af7c9 | Blue Swirl | static void do_vm_stop(int reason) |
100 | 296af7c9 | Blue Swirl | { |
101 | 296af7c9 | Blue Swirl | if (vm_running) {
|
102 | 296af7c9 | Blue Swirl | cpu_disable_ticks(); |
103 | 296af7c9 | Blue Swirl | vm_running = 0;
|
104 | 296af7c9 | Blue Swirl | pause_all_vcpus(); |
105 | 296af7c9 | Blue Swirl | vm_state_notify(0, reason);
|
106 | 296af7c9 | Blue Swirl | monitor_protocol_event(QEVENT_STOP, NULL);
|
107 | 296af7c9 | Blue Swirl | } |
108 | 296af7c9 | Blue Swirl | } |
109 | 296af7c9 | Blue Swirl | |
110 | 296af7c9 | Blue Swirl | static int cpu_can_run(CPUState *env) |
111 | 296af7c9 | Blue Swirl | { |
112 | 296af7c9 | Blue Swirl | if (env->stop)
|
113 | 296af7c9 | Blue Swirl | return 0; |
114 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
115 | 296af7c9 | Blue Swirl | return 0; |
116 | 296af7c9 | Blue Swirl | return 1; |
117 | 296af7c9 | Blue Swirl | } |
118 | 296af7c9 | Blue Swirl | |
119 | 296af7c9 | Blue Swirl | static int cpu_has_work(CPUState *env) |
120 | 296af7c9 | Blue Swirl | { |
121 | 296af7c9 | Blue Swirl | if (env->stop)
|
122 | 296af7c9 | Blue Swirl | return 1; |
123 | e82bcec2 | Marcelo Tosatti | if (env->queued_work_first)
|
124 | e82bcec2 | Marcelo Tosatti | return 1; |
125 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
126 | 296af7c9 | Blue Swirl | return 0; |
127 | 296af7c9 | Blue Swirl | if (!env->halted)
|
128 | 296af7c9 | Blue Swirl | return 1; |
129 | 296af7c9 | Blue Swirl | if (qemu_cpu_has_work(env))
|
130 | 296af7c9 | Blue Swirl | return 1; |
131 | 296af7c9 | Blue Swirl | return 0; |
132 | 296af7c9 | Blue Swirl | } |
133 | 296af7c9 | Blue Swirl | |
134 | 472fb0c4 | Jan Kiszka | static int any_cpu_has_work(void) |
135 | 296af7c9 | Blue Swirl | { |
136 | 296af7c9 | Blue Swirl | CPUState *env; |
137 | 296af7c9 | Blue Swirl | |
138 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
139 | 296af7c9 | Blue Swirl | if (cpu_has_work(env))
|
140 | 296af7c9 | Blue Swirl | return 1; |
141 | 296af7c9 | Blue Swirl | return 0; |
142 | 296af7c9 | Blue Swirl | } |
143 | 296af7c9 | Blue Swirl | |
144 | 3c638d06 | Jan Kiszka | static void cpu_debug_handler(CPUState *env) |
145 | 3c638d06 | Jan Kiszka | { |
146 | 3c638d06 | Jan Kiszka | gdb_set_stop_cpu(env); |
147 | 3c638d06 | Jan Kiszka | debug_requested = EXCP_DEBUG; |
148 | 3c638d06 | Jan Kiszka | vm_stop(EXCP_DEBUG); |
149 | 3c638d06 | Jan Kiszka | } |
150 | 3c638d06 | Jan Kiszka | |
151 | 296af7c9 | Blue Swirl | #ifndef _WIN32
|
152 | 296af7c9 | Blue Swirl | static int io_thread_fd = -1; |
153 | 296af7c9 | Blue Swirl | |
154 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
155 | 296af7c9 | Blue Swirl | { |
156 | 296af7c9 | Blue Swirl | /* Write 8 bytes to be compatible with eventfd. */
|
157 | 26a82330 | Blue Swirl | static const uint64_t val = 1; |
158 | 296af7c9 | Blue Swirl | ssize_t ret; |
159 | 296af7c9 | Blue Swirl | |
160 | 296af7c9 | Blue Swirl | if (io_thread_fd == -1) |
161 | 296af7c9 | Blue Swirl | return;
|
162 | 296af7c9 | Blue Swirl | |
163 | 296af7c9 | Blue Swirl | do {
|
164 | 296af7c9 | Blue Swirl | ret = write(io_thread_fd, &val, sizeof(val));
|
165 | 296af7c9 | Blue Swirl | } while (ret < 0 && errno == EINTR); |
166 | 296af7c9 | Blue Swirl | |
167 | 296af7c9 | Blue Swirl | /* EAGAIN is fine, a read must be pending. */
|
168 | 296af7c9 | Blue Swirl | if (ret < 0 && errno != EAGAIN) { |
169 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
170 | 296af7c9 | Blue Swirl | strerror(errno)); |
171 | 296af7c9 | Blue Swirl | exit (1);
|
172 | 296af7c9 | Blue Swirl | } |
173 | 296af7c9 | Blue Swirl | } |
174 | 296af7c9 | Blue Swirl | |
175 | 296af7c9 | Blue Swirl | static void qemu_event_read(void *opaque) |
176 | 296af7c9 | Blue Swirl | { |
177 | 296af7c9 | Blue Swirl | int fd = (unsigned long)opaque; |
178 | 296af7c9 | Blue Swirl | ssize_t len; |
179 | 296af7c9 | Blue Swirl | char buffer[512]; |
180 | 296af7c9 | Blue Swirl | |
181 | 296af7c9 | Blue Swirl | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
182 | 296af7c9 | Blue Swirl | do {
|
183 | 296af7c9 | Blue Swirl | len = read(fd, buffer, sizeof(buffer));
|
184 | 296af7c9 | Blue Swirl | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
185 | 296af7c9 | Blue Swirl | } |
186 | 296af7c9 | Blue Swirl | |
187 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
188 | 296af7c9 | Blue Swirl | { |
189 | 296af7c9 | Blue Swirl | int err;
|
190 | 296af7c9 | Blue Swirl | int fds[2]; |
191 | 296af7c9 | Blue Swirl | |
192 | 296af7c9 | Blue Swirl | err = qemu_eventfd(fds); |
193 | 296af7c9 | Blue Swirl | if (err == -1) |
194 | 296af7c9 | Blue Swirl | return -errno;
|
195 | 296af7c9 | Blue Swirl | |
196 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[0], O_NONBLOCK);
|
197 | 296af7c9 | Blue Swirl | if (err < 0) |
198 | 296af7c9 | Blue Swirl | goto fail;
|
199 | 296af7c9 | Blue Swirl | |
200 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[1], O_NONBLOCK);
|
201 | 296af7c9 | Blue Swirl | if (err < 0) |
202 | 296af7c9 | Blue Swirl | goto fail;
|
203 | 296af7c9 | Blue Swirl | |
204 | 296af7c9 | Blue Swirl | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
205 | 296af7c9 | Blue Swirl | (void *)(unsigned long)fds[0]); |
206 | 296af7c9 | Blue Swirl | |
207 | 296af7c9 | Blue Swirl | io_thread_fd = fds[1];
|
208 | 296af7c9 | Blue Swirl | return 0; |
209 | 296af7c9 | Blue Swirl | |
210 | 296af7c9 | Blue Swirl | fail:
|
211 | 296af7c9 | Blue Swirl | close(fds[0]);
|
212 | 296af7c9 | Blue Swirl | close(fds[1]);
|
213 | 296af7c9 | Blue Swirl | return err;
|
214 | 296af7c9 | Blue Swirl | } |
215 | 296af7c9 | Blue Swirl | #else
|
216 | 296af7c9 | Blue Swirl | HANDLE qemu_event_handle; |
217 | 296af7c9 | Blue Swirl | |
218 | 296af7c9 | Blue Swirl | static void dummy_event_handler(void *opaque) |
219 | 296af7c9 | Blue Swirl | { |
220 | 296af7c9 | Blue Swirl | } |
221 | 296af7c9 | Blue Swirl | |
222 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
223 | 296af7c9 | Blue Swirl | { |
224 | 296af7c9 | Blue Swirl | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
225 | 296af7c9 | Blue Swirl | if (!qemu_event_handle) {
|
226 | 296af7c9 | Blue Swirl | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
227 | 296af7c9 | Blue Swirl | return -1; |
228 | 296af7c9 | Blue Swirl | } |
229 | 296af7c9 | Blue Swirl | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
230 | 296af7c9 | Blue Swirl | return 0; |
231 | 296af7c9 | Blue Swirl | } |
232 | 296af7c9 | Blue Swirl | |
233 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
234 | 296af7c9 | Blue Swirl | { |
235 | 296af7c9 | Blue Swirl | if (!SetEvent(qemu_event_handle)) {
|
236 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
237 | 296af7c9 | Blue Swirl | GetLastError()); |
238 | 296af7c9 | Blue Swirl | exit (1);
|
239 | 296af7c9 | Blue Swirl | } |
240 | 296af7c9 | Blue Swirl | } |
241 | 296af7c9 | Blue Swirl | #endif
|
242 | 296af7c9 | Blue Swirl | |
243 | 296af7c9 | Blue Swirl | #ifndef CONFIG_IOTHREAD
|
244 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
245 | 296af7c9 | Blue Swirl | { |
246 | 3c638d06 | Jan Kiszka | cpu_set_debug_excp_handler(cpu_debug_handler); |
247 | 3c638d06 | Jan Kiszka | |
248 | 296af7c9 | Blue Swirl | return qemu_event_init();
|
249 | 296af7c9 | Blue Swirl | } |
250 | 296af7c9 | Blue Swirl | |
251 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
252 | 7277e027 | Blue Swirl | { |
253 | 7277e027 | Blue Swirl | } |
254 | 7277e027 | Blue Swirl | |
255 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
256 | 296af7c9 | Blue Swirl | { |
257 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
258 | 296af7c9 | Blue Swirl | |
259 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
260 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
261 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
262 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
263 | 296af7c9 | Blue Swirl | return;
|
264 | 296af7c9 | Blue Swirl | } |
265 | 296af7c9 | Blue Swirl | |
266 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *env) |
267 | 296af7c9 | Blue Swirl | { |
268 | 296af7c9 | Blue Swirl | return 1; |
269 | 296af7c9 | Blue Swirl | } |
270 | 296af7c9 | Blue Swirl | |
271 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
272 | e82bcec2 | Marcelo Tosatti | { |
273 | e82bcec2 | Marcelo Tosatti | func(data); |
274 | e82bcec2 | Marcelo Tosatti | } |
275 | e82bcec2 | Marcelo Tosatti | |
276 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
277 | 296af7c9 | Blue Swirl | { |
278 | 296af7c9 | Blue Swirl | } |
279 | 296af7c9 | Blue Swirl | |
280 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
281 | 296af7c9 | Blue Swirl | { |
282 | 296af7c9 | Blue Swirl | } |
283 | 296af7c9 | Blue Swirl | |
284 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *env) |
285 | 296af7c9 | Blue Swirl | { |
286 | 296af7c9 | Blue Swirl | return;
|
287 | 296af7c9 | Blue Swirl | } |
288 | 296af7c9 | Blue Swirl | |
289 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
290 | 296af7c9 | Blue Swirl | { |
291 | 296af7c9 | Blue Swirl | CPUState *env = cpu_single_env; |
292 | 296af7c9 | Blue Swirl | |
293 | 296af7c9 | Blue Swirl | qemu_event_increment (); |
294 | 296af7c9 | Blue Swirl | if (env) {
|
295 | 296af7c9 | Blue Swirl | cpu_exit(env); |
296 | 296af7c9 | Blue Swirl | } |
297 | 296af7c9 | Blue Swirl | if (next_cpu && env != next_cpu) {
|
298 | 296af7c9 | Blue Swirl | cpu_exit(next_cpu); |
299 | 296af7c9 | Blue Swirl | } |
300 | 296af7c9 | Blue Swirl | } |
301 | 296af7c9 | Blue Swirl | |
302 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) {} |
303 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) {} |
304 | 296af7c9 | Blue Swirl | |
305 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
306 | 296af7c9 | Blue Swirl | { |
307 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
308 | 296af7c9 | Blue Swirl | } |
309 | 296af7c9 | Blue Swirl | |
310 | 296af7c9 | Blue Swirl | #else /* CONFIG_IOTHREAD */ |
311 | 296af7c9 | Blue Swirl | |
312 | 296af7c9 | Blue Swirl | #include "qemu-thread.h" |
313 | 296af7c9 | Blue Swirl | |
314 | 296af7c9 | Blue Swirl | QemuMutex qemu_global_mutex; |
315 | 296af7c9 | Blue Swirl | static QemuMutex qemu_fair_mutex;
|
316 | 296af7c9 | Blue Swirl | |
317 | 296af7c9 | Blue Swirl | static QemuThread io_thread;
|
318 | 296af7c9 | Blue Swirl | |
319 | 296af7c9 | Blue Swirl | static QemuThread *tcg_cpu_thread;
|
320 | 296af7c9 | Blue Swirl | static QemuCond *tcg_halt_cond;
|
321 | 296af7c9 | Blue Swirl | |
322 | 296af7c9 | Blue Swirl | static int qemu_system_ready; |
323 | 296af7c9 | Blue Swirl | /* cpu creation */
|
324 | 296af7c9 | Blue Swirl | static QemuCond qemu_cpu_cond;
|
325 | 296af7c9 | Blue Swirl | /* system init */
|
326 | 296af7c9 | Blue Swirl | static QemuCond qemu_system_cond;
|
327 | 296af7c9 | Blue Swirl | static QemuCond qemu_pause_cond;
|
328 | e82bcec2 | Marcelo Tosatti | static QemuCond qemu_work_cond;
|
329 | 296af7c9 | Blue Swirl | |
330 | 55541c8a | Paolo Bonzini | static void tcg_init_ipi(void); |
331 | 55541c8a | Paolo Bonzini | static void kvm_init_ipi(CPUState *env); |
332 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void); |
333 | 296af7c9 | Blue Swirl | |
334 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
335 | 296af7c9 | Blue Swirl | { |
336 | 296af7c9 | Blue Swirl | int ret;
|
337 | 296af7c9 | Blue Swirl | |
338 | 3c638d06 | Jan Kiszka | cpu_set_debug_excp_handler(cpu_debug_handler); |
339 | 3c638d06 | Jan Kiszka | |
340 | 296af7c9 | Blue Swirl | ret = qemu_event_init(); |
341 | 296af7c9 | Blue Swirl | if (ret)
|
342 | 296af7c9 | Blue Swirl | return ret;
|
343 | 296af7c9 | Blue Swirl | |
344 | 296af7c9 | Blue Swirl | qemu_cond_init(&qemu_pause_cond); |
345 | f8ca7b43 | Jan Kiszka | qemu_cond_init(&qemu_system_cond); |
346 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_fair_mutex); |
347 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_global_mutex); |
348 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
349 | 296af7c9 | Blue Swirl | |
350 | 296af7c9 | Blue Swirl | unblock_io_signals(); |
351 | 296af7c9 | Blue Swirl | qemu_thread_self(&io_thread); |
352 | 296af7c9 | Blue Swirl | |
353 | 296af7c9 | Blue Swirl | return 0; |
354 | 296af7c9 | Blue Swirl | } |
355 | 296af7c9 | Blue Swirl | |
356 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
357 | 7277e027 | Blue Swirl | { |
358 | 7277e027 | Blue Swirl | qemu_system_ready = 1;
|
359 | 7277e027 | Blue Swirl | qemu_cond_broadcast(&qemu_system_cond); |
360 | 7277e027 | Blue Swirl | } |
361 | 7277e027 | Blue Swirl | |
362 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
363 | e82bcec2 | Marcelo Tosatti | { |
364 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item wi;
|
365 | e82bcec2 | Marcelo Tosatti | |
366 | e82bcec2 | Marcelo Tosatti | if (qemu_cpu_self(env)) {
|
367 | e82bcec2 | Marcelo Tosatti | func(data); |
368 | e82bcec2 | Marcelo Tosatti | return;
|
369 | e82bcec2 | Marcelo Tosatti | } |
370 | e82bcec2 | Marcelo Tosatti | |
371 | e82bcec2 | Marcelo Tosatti | wi.func = func; |
372 | e82bcec2 | Marcelo Tosatti | wi.data = data; |
373 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
374 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = &wi; |
375 | e82bcec2 | Marcelo Tosatti | else
|
376 | e82bcec2 | Marcelo Tosatti | env->queued_work_last->next = &wi; |
377 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = &wi; |
378 | e82bcec2 | Marcelo Tosatti | wi.next = NULL;
|
379 | e82bcec2 | Marcelo Tosatti | wi.done = false;
|
380 | e82bcec2 | Marcelo Tosatti | |
381 | e82bcec2 | Marcelo Tosatti | qemu_cpu_kick(env); |
382 | e82bcec2 | Marcelo Tosatti | while (!wi.done) {
|
383 | e82bcec2 | Marcelo Tosatti | CPUState *self_env = cpu_single_env; |
384 | e82bcec2 | Marcelo Tosatti | |
385 | e82bcec2 | Marcelo Tosatti | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
386 | e82bcec2 | Marcelo Tosatti | cpu_single_env = self_env; |
387 | e82bcec2 | Marcelo Tosatti | } |
388 | e82bcec2 | Marcelo Tosatti | } |
389 | e82bcec2 | Marcelo Tosatti | |
390 | e82bcec2 | Marcelo Tosatti | static void flush_queued_work(CPUState *env) |
391 | e82bcec2 | Marcelo Tosatti | { |
392 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item *wi;
|
393 | e82bcec2 | Marcelo Tosatti | |
394 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
395 | e82bcec2 | Marcelo Tosatti | return;
|
396 | e82bcec2 | Marcelo Tosatti | |
397 | e82bcec2 | Marcelo Tosatti | while ((wi = env->queued_work_first)) {
|
398 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = wi->next; |
399 | e82bcec2 | Marcelo Tosatti | wi->func(wi->data); |
400 | e82bcec2 | Marcelo Tosatti | wi->done = true;
|
401 | e82bcec2 | Marcelo Tosatti | } |
402 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = NULL;
|
403 | e82bcec2 | Marcelo Tosatti | qemu_cond_broadcast(&qemu_work_cond); |
404 | e82bcec2 | Marcelo Tosatti | } |
405 | e82bcec2 | Marcelo Tosatti | |
406 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event_common(CPUState *env) |
407 | 296af7c9 | Blue Swirl | { |
408 | 296af7c9 | Blue Swirl | if (env->stop) {
|
409 | 296af7c9 | Blue Swirl | env->stop = 0;
|
410 | 296af7c9 | Blue Swirl | env->stopped = 1;
|
411 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_pause_cond); |
412 | 296af7c9 | Blue Swirl | } |
413 | e82bcec2 | Marcelo Tosatti | flush_queued_work(env); |
414 | 296af7c9 | Blue Swirl | } |
415 | 296af7c9 | Blue Swirl | |
416 | 6cabe1f3 | Jan Kiszka | static void qemu_tcg_wait_io_event(void) |
417 | 296af7c9 | Blue Swirl | { |
418 | 6cabe1f3 | Jan Kiszka | CPUState *env; |
419 | 6cabe1f3 | Jan Kiszka | |
420 | 472fb0c4 | Jan Kiszka | while (!any_cpu_has_work())
|
421 | 6cabe1f3 | Jan Kiszka | qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000);
|
422 | 296af7c9 | Blue Swirl | |
423 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
424 | 296af7c9 | Blue Swirl | |
425 | 296af7c9 | Blue Swirl | /*
|
426 | 296af7c9 | Blue Swirl | * Users of qemu_global_mutex can be starved, having no chance
|
427 | 296af7c9 | Blue Swirl | * to acquire it since this path will get to it first.
|
428 | 296af7c9 | Blue Swirl | * So use another lock to provide fairness.
|
429 | 296af7c9 | Blue Swirl | */
|
430 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
431 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
432 | 296af7c9 | Blue Swirl | |
433 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
434 | 6cabe1f3 | Jan Kiszka | |
435 | 6cabe1f3 | Jan Kiszka | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
436 | 6cabe1f3 | Jan Kiszka | qemu_wait_io_event_common(env); |
437 | 6cabe1f3 | Jan Kiszka | } |
438 | 296af7c9 | Blue Swirl | } |
439 | 296af7c9 | Blue Swirl | |
440 | 296af7c9 | Blue Swirl | static void qemu_kvm_eat_signal(CPUState *env, int timeout) |
441 | 296af7c9 | Blue Swirl | { |
442 | 296af7c9 | Blue Swirl | struct timespec ts;
|
443 | 296af7c9 | Blue Swirl | int r, e;
|
444 | 296af7c9 | Blue Swirl | siginfo_t siginfo; |
445 | 296af7c9 | Blue Swirl | sigset_t waitset; |
446 | 296af7c9 | Blue Swirl | |
447 | 296af7c9 | Blue Swirl | ts.tv_sec = timeout / 1000;
|
448 | 296af7c9 | Blue Swirl | ts.tv_nsec = (timeout % 1000) * 1000000; |
449 | 296af7c9 | Blue Swirl | |
450 | 296af7c9 | Blue Swirl | sigemptyset(&waitset); |
451 | 296af7c9 | Blue Swirl | sigaddset(&waitset, SIG_IPI); |
452 | 296af7c9 | Blue Swirl | |
453 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
454 | 296af7c9 | Blue Swirl | r = sigtimedwait(&waitset, &siginfo, &ts); |
455 | 296af7c9 | Blue Swirl | e = errno; |
456 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
457 | 296af7c9 | Blue Swirl | |
458 | 296af7c9 | Blue Swirl | if (r == -1 && !(e == EAGAIN || e == EINTR)) { |
459 | 296af7c9 | Blue Swirl | fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
|
460 | 296af7c9 | Blue Swirl | exit(1);
|
461 | 296af7c9 | Blue Swirl | } |
462 | 296af7c9 | Blue Swirl | } |
463 | 296af7c9 | Blue Swirl | |
464 | 296af7c9 | Blue Swirl | static void qemu_kvm_wait_io_event(CPUState *env) |
465 | 296af7c9 | Blue Swirl | { |
466 | 296af7c9 | Blue Swirl | while (!cpu_has_work(env))
|
467 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
468 | 296af7c9 | Blue Swirl | |
469 | 296af7c9 | Blue Swirl | qemu_kvm_eat_signal(env, 0);
|
470 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
471 | 296af7c9 | Blue Swirl | } |
472 | 296af7c9 | Blue Swirl | |
473 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env); |
474 | 296af7c9 | Blue Swirl | |
475 | 296af7c9 | Blue Swirl | static void *kvm_cpu_thread_fn(void *arg) |
476 | 296af7c9 | Blue Swirl | { |
477 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
478 | 296af7c9 | Blue Swirl | |
479 | 6164e6d6 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
480 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
481 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
482 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
483 | 296af7c9 | Blue Swirl | |
484 | 55541c8a | Paolo Bonzini | kvm_init_ipi(env); |
485 | 296af7c9 | Blue Swirl | |
486 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
487 | 296af7c9 | Blue Swirl | env->created = 1;
|
488 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
489 | 296af7c9 | Blue Swirl | |
490 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
491 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
492 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
493 | 296af7c9 | Blue Swirl | |
494 | 296af7c9 | Blue Swirl | while (1) { |
495 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
496 | 296af7c9 | Blue Swirl | qemu_cpu_exec(env); |
497 | 296af7c9 | Blue Swirl | qemu_kvm_wait_io_event(env); |
498 | 296af7c9 | Blue Swirl | } |
499 | 296af7c9 | Blue Swirl | |
500 | 296af7c9 | Blue Swirl | return NULL; |
501 | 296af7c9 | Blue Swirl | } |
502 | 296af7c9 | Blue Swirl | |
503 | 296af7c9 | Blue Swirl | static void *tcg_cpu_thread_fn(void *arg) |
504 | 296af7c9 | Blue Swirl | { |
505 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
506 | 296af7c9 | Blue Swirl | |
507 | 55541c8a | Paolo Bonzini | tcg_init_ipi(); |
508 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
509 | 296af7c9 | Blue Swirl | |
510 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
511 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
512 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
513 | 296af7c9 | Blue Swirl | env->created = 1;
|
514 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
515 | 296af7c9 | Blue Swirl | |
516 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
517 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
518 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
519 | 296af7c9 | Blue Swirl | |
520 | 296af7c9 | Blue Swirl | while (1) { |
521 | 472fb0c4 | Jan Kiszka | cpu_exec_all(); |
522 | 6cabe1f3 | Jan Kiszka | qemu_tcg_wait_io_event(); |
523 | 296af7c9 | Blue Swirl | } |
524 | 296af7c9 | Blue Swirl | |
525 | 296af7c9 | Blue Swirl | return NULL; |
526 | 296af7c9 | Blue Swirl | } |
527 | 296af7c9 | Blue Swirl | |
528 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *_env) |
529 | 296af7c9 | Blue Swirl | { |
530 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
531 | 296af7c9 | Blue Swirl | qemu_cond_broadcast(env->halt_cond); |
532 | 1fbb22e5 | Marcelo Tosatti | qemu_thread_signal(env->thread, SIG_IPI); |
533 | 296af7c9 | Blue Swirl | } |
534 | 296af7c9 | Blue Swirl | |
535 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *_env) |
536 | 296af7c9 | Blue Swirl | { |
537 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
538 | 296af7c9 | Blue Swirl | QemuThread this; |
539 | 296af7c9 | Blue Swirl | |
540 | 296af7c9 | Blue Swirl | qemu_thread_self(&this); |
541 | 296af7c9 | Blue Swirl | |
542 | 296af7c9 | Blue Swirl | return qemu_thread_equal(&this, env->thread);
|
543 | 296af7c9 | Blue Swirl | } |
544 | 296af7c9 | Blue Swirl | |
545 | 296af7c9 | Blue Swirl | static void cpu_signal(int sig) |
546 | 296af7c9 | Blue Swirl | { |
547 | 296af7c9 | Blue Swirl | if (cpu_single_env)
|
548 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
549 | 1a28cac3 | Marcelo Tosatti | exit_request = 1;
|
550 | 296af7c9 | Blue Swirl | } |
551 | 296af7c9 | Blue Swirl | |
552 | 55541c8a | Paolo Bonzini | static void tcg_init_ipi(void) |
553 | 296af7c9 | Blue Swirl | { |
554 | 296af7c9 | Blue Swirl | sigset_t set; |
555 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
556 | 296af7c9 | Blue Swirl | |
557 | 55541c8a | Paolo Bonzini | memset(&sigact, 0, sizeof(sigact)); |
558 | 55541c8a | Paolo Bonzini | sigact.sa_handler = cpu_signal; |
559 | 55541c8a | Paolo Bonzini | sigaction(SIG_IPI, &sigact, NULL);
|
560 | 296af7c9 | Blue Swirl | |
561 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
562 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
563 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
564 | 296af7c9 | Blue Swirl | } |
565 | 296af7c9 | Blue Swirl | |
566 | 296af7c9 | Blue Swirl | static void dummy_signal(int sig) |
567 | 296af7c9 | Blue Swirl | { |
568 | 296af7c9 | Blue Swirl | } |
569 | 296af7c9 | Blue Swirl | |
570 | 55541c8a | Paolo Bonzini | static void kvm_init_ipi(CPUState *env) |
571 | 296af7c9 | Blue Swirl | { |
572 | 296af7c9 | Blue Swirl | int r;
|
573 | 296af7c9 | Blue Swirl | sigset_t set; |
574 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
575 | 296af7c9 | Blue Swirl | |
576 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
577 | 296af7c9 | Blue Swirl | sigact.sa_handler = dummy_signal; |
578 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
579 | 296af7c9 | Blue Swirl | |
580 | 55541c8a | Paolo Bonzini | pthread_sigmask(SIG_BLOCK, NULL, &set);
|
581 | 55541c8a | Paolo Bonzini | sigdelset(&set, SIG_IPI); |
582 | 296af7c9 | Blue Swirl | r = kvm_set_signal_mask(env, &set); |
583 | 296af7c9 | Blue Swirl | if (r) {
|
584 | 296af7c9 | Blue Swirl | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
|
585 | 296af7c9 | Blue Swirl | exit(1);
|
586 | 296af7c9 | Blue Swirl | } |
587 | 296af7c9 | Blue Swirl | } |
588 | 296af7c9 | Blue Swirl | |
589 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void) |
590 | 296af7c9 | Blue Swirl | { |
591 | 296af7c9 | Blue Swirl | sigset_t set; |
592 | 296af7c9 | Blue Swirl | |
593 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
594 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
595 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
596 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
597 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
598 | 296af7c9 | Blue Swirl | |
599 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
600 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
601 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
602 | 296af7c9 | Blue Swirl | } |
603 | 296af7c9 | Blue Swirl | |
604 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) |
605 | 296af7c9 | Blue Swirl | { |
606 | 296af7c9 | Blue Swirl | if (kvm_enabled()) {
|
607 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
608 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
609 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
610 | 1a28cac3 | Marcelo Tosatti | } else {
|
611 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_fair_mutex); |
612 | 1a28cac3 | Marcelo Tosatti | if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
613 | 1a28cac3 | Marcelo Tosatti | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
614 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
615 | 1a28cac3 | Marcelo Tosatti | } |
616 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_unlock(&qemu_fair_mutex); |
617 | 1a28cac3 | Marcelo Tosatti | } |
618 | 296af7c9 | Blue Swirl | } |
619 | 296af7c9 | Blue Swirl | |
620 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) |
621 | 296af7c9 | Blue Swirl | { |
622 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
623 | 296af7c9 | Blue Swirl | } |
624 | 296af7c9 | Blue Swirl | |
625 | 296af7c9 | Blue Swirl | static int all_vcpus_paused(void) |
626 | 296af7c9 | Blue Swirl | { |
627 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
628 | 296af7c9 | Blue Swirl | |
629 | 296af7c9 | Blue Swirl | while (penv) {
|
630 | 296af7c9 | Blue Swirl | if (!penv->stopped)
|
631 | 296af7c9 | Blue Swirl | return 0; |
632 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
633 | 296af7c9 | Blue Swirl | } |
634 | 296af7c9 | Blue Swirl | |
635 | 296af7c9 | Blue Swirl | return 1; |
636 | 296af7c9 | Blue Swirl | } |
637 | 296af7c9 | Blue Swirl | |
638 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
639 | 296af7c9 | Blue Swirl | { |
640 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
641 | 296af7c9 | Blue Swirl | |
642 | 296af7c9 | Blue Swirl | while (penv) {
|
643 | 296af7c9 | Blue Swirl | penv->stop = 1;
|
644 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
645 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
646 | 296af7c9 | Blue Swirl | } |
647 | 296af7c9 | Blue Swirl | |
648 | 296af7c9 | Blue Swirl | while (!all_vcpus_paused()) {
|
649 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
650 | 296af7c9 | Blue Swirl | penv = first_cpu; |
651 | 296af7c9 | Blue Swirl | while (penv) {
|
652 | 1fbb22e5 | Marcelo Tosatti | qemu_cpu_kick(penv); |
653 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
654 | 296af7c9 | Blue Swirl | } |
655 | 296af7c9 | Blue Swirl | } |
656 | 296af7c9 | Blue Swirl | } |
657 | 296af7c9 | Blue Swirl | |
658 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
659 | 296af7c9 | Blue Swirl | { |
660 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
661 | 296af7c9 | Blue Swirl | |
662 | 296af7c9 | Blue Swirl | while (penv) {
|
663 | 296af7c9 | Blue Swirl | penv->stop = 0;
|
664 | 296af7c9 | Blue Swirl | penv->stopped = 0;
|
665 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
666 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
667 | 296af7c9 | Blue Swirl | } |
668 | 296af7c9 | Blue Swirl | } |
669 | 296af7c9 | Blue Swirl | |
670 | 296af7c9 | Blue Swirl | static void tcg_init_vcpu(void *_env) |
671 | 296af7c9 | Blue Swirl | { |
672 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
673 | 296af7c9 | Blue Swirl | /* share a single thread for all cpus with TCG */
|
674 | 296af7c9 | Blue Swirl | if (!tcg_cpu_thread) {
|
675 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
676 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
677 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
678 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); |
679 | 296af7c9 | Blue Swirl | while (env->created == 0) |
680 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
681 | 296af7c9 | Blue Swirl | tcg_cpu_thread = env->thread; |
682 | 296af7c9 | Blue Swirl | tcg_halt_cond = env->halt_cond; |
683 | 296af7c9 | Blue Swirl | } else {
|
684 | 296af7c9 | Blue Swirl | env->thread = tcg_cpu_thread; |
685 | 296af7c9 | Blue Swirl | env->halt_cond = tcg_halt_cond; |
686 | 296af7c9 | Blue Swirl | } |
687 | 296af7c9 | Blue Swirl | } |
688 | 296af7c9 | Blue Swirl | |
689 | 296af7c9 | Blue Swirl | static void kvm_start_vcpu(CPUState *env) |
690 | 296af7c9 | Blue Swirl | { |
691 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
692 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
693 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
694 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); |
695 | 296af7c9 | Blue Swirl | while (env->created == 0) |
696 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
697 | 296af7c9 | Blue Swirl | } |
698 | 296af7c9 | Blue Swirl | |
699 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
700 | 296af7c9 | Blue Swirl | { |
701 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
702 | 296af7c9 | Blue Swirl | |
703 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
704 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
705 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
706 | 296af7c9 | Blue Swirl | kvm_start_vcpu(env); |
707 | 296af7c9 | Blue Swirl | else
|
708 | 296af7c9 | Blue Swirl | tcg_init_vcpu(env); |
709 | 296af7c9 | Blue Swirl | } |
710 | 296af7c9 | Blue Swirl | |
711 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
712 | 296af7c9 | Blue Swirl | { |
713 | 296af7c9 | Blue Swirl | qemu_event_increment(); |
714 | 296af7c9 | Blue Swirl | } |
715 | 296af7c9 | Blue Swirl | |
716 | 296af7c9 | Blue Swirl | static void qemu_system_vmstop_request(int reason) |
717 | 296af7c9 | Blue Swirl | { |
718 | 296af7c9 | Blue Swirl | vmstop_requested = reason; |
719 | 296af7c9 | Blue Swirl | qemu_notify_event(); |
720 | 296af7c9 | Blue Swirl | } |
721 | 296af7c9 | Blue Swirl | |
722 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
723 | 296af7c9 | Blue Swirl | { |
724 | 296af7c9 | Blue Swirl | QemuThread me; |
725 | 296af7c9 | Blue Swirl | qemu_thread_self(&me); |
726 | 296af7c9 | Blue Swirl | |
727 | 296af7c9 | Blue Swirl | if (!qemu_thread_equal(&me, &io_thread)) {
|
728 | 296af7c9 | Blue Swirl | qemu_system_vmstop_request(reason); |
729 | 296af7c9 | Blue Swirl | /*
|
730 | 296af7c9 | Blue Swirl | * FIXME: should not return to device code in case
|
731 | 296af7c9 | Blue Swirl | * vm_stop() has been requested.
|
732 | 296af7c9 | Blue Swirl | */
|
733 | 296af7c9 | Blue Swirl | if (cpu_single_env) {
|
734 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
735 | 296af7c9 | Blue Swirl | cpu_single_env->stop = 1;
|
736 | 296af7c9 | Blue Swirl | } |
737 | 296af7c9 | Blue Swirl | return;
|
738 | 296af7c9 | Blue Swirl | } |
739 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
740 | 296af7c9 | Blue Swirl | } |
741 | 296af7c9 | Blue Swirl | |
742 | 296af7c9 | Blue Swirl | #endif
|
743 | 296af7c9 | Blue Swirl | |
744 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env) |
745 | 296af7c9 | Blue Swirl | { |
746 | 296af7c9 | Blue Swirl | int ret;
|
747 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
748 | 296af7c9 | Blue Swirl | int64_t ti; |
749 | 296af7c9 | Blue Swirl | #endif
|
750 | 296af7c9 | Blue Swirl | |
751 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
752 | 296af7c9 | Blue Swirl | ti = profile_getclock(); |
753 | 296af7c9 | Blue Swirl | #endif
|
754 | 296af7c9 | Blue Swirl | if (use_icount) {
|
755 | 296af7c9 | Blue Swirl | int64_t count; |
756 | 296af7c9 | Blue Swirl | int decr;
|
757 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
758 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = 0;
|
759 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
760 | 296af7c9 | Blue Swirl | count = qemu_icount_round (qemu_next_deadline()); |
761 | 296af7c9 | Blue Swirl | qemu_icount += count; |
762 | 296af7c9 | Blue Swirl | decr = (count > 0xffff) ? 0xffff : count; |
763 | 296af7c9 | Blue Swirl | count -= decr; |
764 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = decr; |
765 | 296af7c9 | Blue Swirl | env->icount_extra = count; |
766 | 296af7c9 | Blue Swirl | } |
767 | 296af7c9 | Blue Swirl | ret = cpu_exec(env); |
768 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
769 | 296af7c9 | Blue Swirl | qemu_time += profile_getclock() - ti; |
770 | 296af7c9 | Blue Swirl | #endif
|
771 | 296af7c9 | Blue Swirl | if (use_icount) {
|
772 | 296af7c9 | Blue Swirl | /* Fold pending instructions back into the
|
773 | 296af7c9 | Blue Swirl | instruction counter, and clear the interrupt flag. */
|
774 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low |
775 | 296af7c9 | Blue Swirl | + env->icount_extra); |
776 | 296af7c9 | Blue Swirl | env->icount_decr.u32 = 0;
|
777 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
778 | 296af7c9 | Blue Swirl | } |
779 | 296af7c9 | Blue Swirl | return ret;
|
780 | 296af7c9 | Blue Swirl | } |
781 | 296af7c9 | Blue Swirl | |
782 | 472fb0c4 | Jan Kiszka | bool cpu_exec_all(void) |
783 | 296af7c9 | Blue Swirl | { |
784 | 296af7c9 | Blue Swirl | if (next_cpu == NULL) |
785 | 296af7c9 | Blue Swirl | next_cpu = first_cpu; |
786 | c629a4bc | Jan Kiszka | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
787 | 345f4426 | Jan Kiszka | CPUState *env = next_cpu; |
788 | 296af7c9 | Blue Swirl | |
789 | 296af7c9 | Blue Swirl | qemu_clock_enable(vm_clock, |
790 | 345f4426 | Jan Kiszka | (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
791 | 296af7c9 | Blue Swirl | |
792 | 296af7c9 | Blue Swirl | if (qemu_alarm_pending())
|
793 | 296af7c9 | Blue Swirl | break;
|
794 | 3c638d06 | Jan Kiszka | if (cpu_can_run(env)) {
|
795 | 3c638d06 | Jan Kiszka | if (qemu_cpu_exec(env) == EXCP_DEBUG) {
|
796 | 3c638d06 | Jan Kiszka | break;
|
797 | 3c638d06 | Jan Kiszka | } |
798 | 3c638d06 | Jan Kiszka | } else if (env->stop) { |
799 | 296af7c9 | Blue Swirl | break;
|
800 | 296af7c9 | Blue Swirl | } |
801 | 296af7c9 | Blue Swirl | } |
802 | c629a4bc | Jan Kiszka | exit_request = 0;
|
803 | 472fb0c4 | Jan Kiszka | return any_cpu_has_work();
|
804 | 296af7c9 | Blue Swirl | } |
805 | 296af7c9 | Blue Swirl | |
806 | 296af7c9 | Blue Swirl | void set_numa_modes(void) |
807 | 296af7c9 | Blue Swirl | { |
808 | 296af7c9 | Blue Swirl | CPUState *env; |
809 | 296af7c9 | Blue Swirl | int i;
|
810 | 296af7c9 | Blue Swirl | |
811 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
812 | 296af7c9 | Blue Swirl | for (i = 0; i < nb_numa_nodes; i++) { |
813 | 296af7c9 | Blue Swirl | if (node_cpumask[i] & (1 << env->cpu_index)) { |
814 | 296af7c9 | Blue Swirl | env->numa_node = i; |
815 | 296af7c9 | Blue Swirl | } |
816 | 296af7c9 | Blue Swirl | } |
817 | 296af7c9 | Blue Swirl | } |
818 | 296af7c9 | Blue Swirl | } |
819 | 296af7c9 | Blue Swirl | |
820 | 296af7c9 | Blue Swirl | void set_cpu_log(const char *optarg) |
821 | 296af7c9 | Blue Swirl | { |
822 | 296af7c9 | Blue Swirl | int mask;
|
823 | 296af7c9 | Blue Swirl | const CPULogItem *item;
|
824 | 296af7c9 | Blue Swirl | |
825 | 296af7c9 | Blue Swirl | mask = cpu_str_to_log_mask(optarg); |
826 | 296af7c9 | Blue Swirl | if (!mask) {
|
827 | 296af7c9 | Blue Swirl | printf("Log items (comma separated):\n");
|
828 | 296af7c9 | Blue Swirl | for (item = cpu_log_items; item->mask != 0; item++) { |
829 | 296af7c9 | Blue Swirl | printf("%-10s %s\n", item->name, item->help);
|
830 | 296af7c9 | Blue Swirl | } |
831 | 296af7c9 | Blue Swirl | exit(1);
|
832 | 296af7c9 | Blue Swirl | } |
833 | 296af7c9 | Blue Swirl | cpu_set_log(mask); |
834 | 296af7c9 | Blue Swirl | } |
835 | 29e922b6 | Blue Swirl | |
836 | 29e922b6 | Blue Swirl | /* Return the virtual CPU time, based on the instruction counter. */
|
837 | 29e922b6 | Blue Swirl | int64_t cpu_get_icount(void)
|
838 | 29e922b6 | Blue Swirl | { |
839 | 29e922b6 | Blue Swirl | int64_t icount; |
840 | 29e922b6 | Blue Swirl | CPUState *env = cpu_single_env;; |
841 | 29e922b6 | Blue Swirl | |
842 | 29e922b6 | Blue Swirl | icount = qemu_icount; |
843 | 29e922b6 | Blue Swirl | if (env) {
|
844 | 29e922b6 | Blue Swirl | if (!can_do_io(env)) {
|
845 | 29e922b6 | Blue Swirl | fprintf(stderr, "Bad clock read\n");
|
846 | 29e922b6 | Blue Swirl | } |
847 | 29e922b6 | Blue Swirl | icount -= (env->icount_decr.u16.low + env->icount_extra); |
848 | 29e922b6 | Blue Swirl | } |
849 | 29e922b6 | Blue Swirl | return qemu_icount_bias + (icount << icount_time_shift);
|
850 | 29e922b6 | Blue Swirl | } |
851 | 262353cb | Blue Swirl | |
852 | 262353cb | Blue Swirl | void list_cpus(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
853 | 262353cb | Blue Swirl | const char *optarg) |
854 | 262353cb | Blue Swirl | { |
855 | 262353cb | Blue Swirl | /* XXX: implement xxx_cpu_list for targets that still miss it */
|
856 | 262353cb | Blue Swirl | #if defined(cpu_list_id)
|
857 | 262353cb | Blue Swirl | cpu_list_id(f, cpu_fprintf, optarg); |
858 | 262353cb | Blue Swirl | #elif defined(cpu_list)
|
859 | 262353cb | Blue Swirl | cpu_list(f, cpu_fprintf); /* deprecated */
|
860 | 262353cb | Blue Swirl | #endif
|
861 | 262353cb | Blue Swirl | } |