root / cpus.c @ a8486bc9
History | View | Annotate | Download (20.5 kB)
1 | 296af7c9 | Blue Swirl | /*
|
---|---|---|---|
2 | 296af7c9 | Blue Swirl | * QEMU System Emulator
|
3 | 296af7c9 | Blue Swirl | *
|
4 | 296af7c9 | Blue Swirl | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 296af7c9 | Blue Swirl | *
|
6 | 296af7c9 | Blue Swirl | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 296af7c9 | Blue Swirl | * of this software and associated documentation files (the "Software"), to deal
|
8 | 296af7c9 | Blue Swirl | * in the Software without restriction, including without limitation the rights
|
9 | 296af7c9 | Blue Swirl | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 296af7c9 | Blue Swirl | * copies of the Software, and to permit persons to whom the Software is
|
11 | 296af7c9 | Blue Swirl | * furnished to do so, subject to the following conditions:
|
12 | 296af7c9 | Blue Swirl | *
|
13 | 296af7c9 | Blue Swirl | * The above copyright notice and this permission notice shall be included in
|
14 | 296af7c9 | Blue Swirl | * all copies or substantial portions of the Software.
|
15 | 296af7c9 | Blue Swirl | *
|
16 | 296af7c9 | Blue Swirl | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 296af7c9 | Blue Swirl | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 296af7c9 | Blue Swirl | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 296af7c9 | Blue Swirl | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 296af7c9 | Blue Swirl | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 296af7c9 | Blue Swirl | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 296af7c9 | Blue Swirl | * THE SOFTWARE.
|
23 | 296af7c9 | Blue Swirl | */
|
24 | 296af7c9 | Blue Swirl | |
25 | 296af7c9 | Blue Swirl | /* Needed early for CONFIG_BSD etc. */
|
26 | 296af7c9 | Blue Swirl | #include "config-host.h" |
27 | 296af7c9 | Blue Swirl | |
28 | 296af7c9 | Blue Swirl | #include "monitor.h" |
29 | 296af7c9 | Blue Swirl | #include "sysemu.h" |
30 | 296af7c9 | Blue Swirl | #include "gdbstub.h" |
31 | 296af7c9 | Blue Swirl | #include "dma.h" |
32 | 296af7c9 | Blue Swirl | #include "kvm.h" |
33 | 262ea18e | Jan Kiszka | #include "exec-all.h" |
34 | 296af7c9 | Blue Swirl | |
35 | 296af7c9 | Blue Swirl | #include "cpus.h" |
36 | a8486bc9 | Marcelo Tosatti | #include "compatfd.h" |
37 | 296af7c9 | Blue Swirl | |
38 | 7277e027 | Blue Swirl | #ifdef SIGRTMIN
|
39 | 7277e027 | Blue Swirl | #define SIG_IPI (SIGRTMIN+4) |
40 | 7277e027 | Blue Swirl | #else
|
41 | 7277e027 | Blue Swirl | #define SIG_IPI SIGUSR1
|
42 | 7277e027 | Blue Swirl | #endif
|
43 | 7277e027 | Blue Swirl | |
44 | 296af7c9 | Blue Swirl | static CPUState *next_cpu;
|
45 | 296af7c9 | Blue Swirl | |
46 | 296af7c9 | Blue Swirl | /***********************************************************/
|
47 | 296af7c9 | Blue Swirl | void hw_error(const char *fmt, ...) |
48 | 296af7c9 | Blue Swirl | { |
49 | 296af7c9 | Blue Swirl | va_list ap; |
50 | 296af7c9 | Blue Swirl | CPUState *env; |
51 | 296af7c9 | Blue Swirl | |
52 | 296af7c9 | Blue Swirl | va_start(ap, fmt); |
53 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu: hardware error: ");
|
54 | 296af7c9 | Blue Swirl | vfprintf(stderr, fmt, ap); |
55 | 296af7c9 | Blue Swirl | fprintf(stderr, "\n");
|
56 | 296af7c9 | Blue Swirl | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
57 | 296af7c9 | Blue Swirl | fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
58 | 296af7c9 | Blue Swirl | #ifdef TARGET_I386
|
59 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
60 | 296af7c9 | Blue Swirl | #else
|
61 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, 0);
|
62 | 296af7c9 | Blue Swirl | #endif
|
63 | 296af7c9 | Blue Swirl | } |
64 | 296af7c9 | Blue Swirl | va_end(ap); |
65 | 296af7c9 | Blue Swirl | abort(); |
66 | 296af7c9 | Blue Swirl | } |
67 | 296af7c9 | Blue Swirl | |
68 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_states(void) |
69 | 296af7c9 | Blue Swirl | { |
70 | 296af7c9 | Blue Swirl | CPUState *cpu; |
71 | 296af7c9 | Blue Swirl | |
72 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
73 | 296af7c9 | Blue Swirl | cpu_synchronize_state(cpu); |
74 | 296af7c9 | Blue Swirl | } |
75 | 296af7c9 | Blue Swirl | } |
76 | 296af7c9 | Blue Swirl | |
77 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_reset(void) |
78 | 296af7c9 | Blue Swirl | { |
79 | 296af7c9 | Blue Swirl | CPUState *cpu; |
80 | 296af7c9 | Blue Swirl | |
81 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
82 | 296af7c9 | Blue Swirl | cpu_synchronize_post_reset(cpu); |
83 | 296af7c9 | Blue Swirl | } |
84 | 296af7c9 | Blue Swirl | } |
85 | 296af7c9 | Blue Swirl | |
86 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_init(void) |
87 | 296af7c9 | Blue Swirl | { |
88 | 296af7c9 | Blue Swirl | CPUState *cpu; |
89 | 296af7c9 | Blue Swirl | |
90 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
91 | 296af7c9 | Blue Swirl | cpu_synchronize_post_init(cpu); |
92 | 296af7c9 | Blue Swirl | } |
93 | 296af7c9 | Blue Swirl | } |
94 | 296af7c9 | Blue Swirl | |
95 | 3ae9501c | Marcelo Tosatti | int cpu_is_stopped(CPUState *env)
|
96 | 3ae9501c | Marcelo Tosatti | { |
97 | 3ae9501c | Marcelo Tosatti | return !vm_running || env->stopped;
|
98 | 3ae9501c | Marcelo Tosatti | } |
99 | 3ae9501c | Marcelo Tosatti | |
100 | 296af7c9 | Blue Swirl | static void do_vm_stop(int reason) |
101 | 296af7c9 | Blue Swirl | { |
102 | 296af7c9 | Blue Swirl | if (vm_running) {
|
103 | 296af7c9 | Blue Swirl | cpu_disable_ticks(); |
104 | 296af7c9 | Blue Swirl | vm_running = 0;
|
105 | 296af7c9 | Blue Swirl | pause_all_vcpus(); |
106 | 296af7c9 | Blue Swirl | vm_state_notify(0, reason);
|
107 | 296af7c9 | Blue Swirl | monitor_protocol_event(QEVENT_STOP, NULL);
|
108 | 296af7c9 | Blue Swirl | } |
109 | 296af7c9 | Blue Swirl | } |
110 | 296af7c9 | Blue Swirl | |
111 | 296af7c9 | Blue Swirl | static int cpu_can_run(CPUState *env) |
112 | 296af7c9 | Blue Swirl | { |
113 | 296af7c9 | Blue Swirl | if (env->stop)
|
114 | 296af7c9 | Blue Swirl | return 0; |
115 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
116 | 296af7c9 | Blue Swirl | return 0; |
117 | 296af7c9 | Blue Swirl | return 1; |
118 | 296af7c9 | Blue Swirl | } |
119 | 296af7c9 | Blue Swirl | |
120 | 296af7c9 | Blue Swirl | static int cpu_has_work(CPUState *env) |
121 | 296af7c9 | Blue Swirl | { |
122 | 296af7c9 | Blue Swirl | if (env->stop)
|
123 | 296af7c9 | Blue Swirl | return 1; |
124 | e82bcec2 | Marcelo Tosatti | if (env->queued_work_first)
|
125 | e82bcec2 | Marcelo Tosatti | return 1; |
126 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
127 | 296af7c9 | Blue Swirl | return 0; |
128 | 296af7c9 | Blue Swirl | if (!env->halted)
|
129 | 296af7c9 | Blue Swirl | return 1; |
130 | 296af7c9 | Blue Swirl | if (qemu_cpu_has_work(env))
|
131 | 296af7c9 | Blue Swirl | return 1; |
132 | 296af7c9 | Blue Swirl | return 0; |
133 | 296af7c9 | Blue Swirl | } |
134 | 296af7c9 | Blue Swirl | |
135 | 472fb0c4 | Jan Kiszka | static int any_cpu_has_work(void) |
136 | 296af7c9 | Blue Swirl | { |
137 | 296af7c9 | Blue Swirl | CPUState *env; |
138 | 296af7c9 | Blue Swirl | |
139 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
140 | 296af7c9 | Blue Swirl | if (cpu_has_work(env))
|
141 | 296af7c9 | Blue Swirl | return 1; |
142 | 296af7c9 | Blue Swirl | return 0; |
143 | 296af7c9 | Blue Swirl | } |
144 | 296af7c9 | Blue Swirl | |
145 | 3c638d06 | Jan Kiszka | static void cpu_debug_handler(CPUState *env) |
146 | 3c638d06 | Jan Kiszka | { |
147 | 3c638d06 | Jan Kiszka | gdb_set_stop_cpu(env); |
148 | 3c638d06 | Jan Kiszka | debug_requested = EXCP_DEBUG; |
149 | 3c638d06 | Jan Kiszka | vm_stop(EXCP_DEBUG); |
150 | 3c638d06 | Jan Kiszka | } |
151 | 3c638d06 | Jan Kiszka | |
152 | 296af7c9 | Blue Swirl | #ifndef _WIN32
|
153 | 296af7c9 | Blue Swirl | static int io_thread_fd = -1; |
154 | 296af7c9 | Blue Swirl | |
155 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
156 | 296af7c9 | Blue Swirl | { |
157 | 296af7c9 | Blue Swirl | /* Write 8 bytes to be compatible with eventfd. */
|
158 | 26a82330 | Blue Swirl | static const uint64_t val = 1; |
159 | 296af7c9 | Blue Swirl | ssize_t ret; |
160 | 296af7c9 | Blue Swirl | |
161 | 296af7c9 | Blue Swirl | if (io_thread_fd == -1) |
162 | 296af7c9 | Blue Swirl | return;
|
163 | 296af7c9 | Blue Swirl | |
164 | 296af7c9 | Blue Swirl | do {
|
165 | 296af7c9 | Blue Swirl | ret = write(io_thread_fd, &val, sizeof(val));
|
166 | 296af7c9 | Blue Swirl | } while (ret < 0 && errno == EINTR); |
167 | 296af7c9 | Blue Swirl | |
168 | 296af7c9 | Blue Swirl | /* EAGAIN is fine, a read must be pending. */
|
169 | 296af7c9 | Blue Swirl | if (ret < 0 && errno != EAGAIN) { |
170 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
171 | 296af7c9 | Blue Swirl | strerror(errno)); |
172 | 296af7c9 | Blue Swirl | exit (1);
|
173 | 296af7c9 | Blue Swirl | } |
174 | 296af7c9 | Blue Swirl | } |
175 | 296af7c9 | Blue Swirl | |
176 | 296af7c9 | Blue Swirl | static void qemu_event_read(void *opaque) |
177 | 296af7c9 | Blue Swirl | { |
178 | 296af7c9 | Blue Swirl | int fd = (unsigned long)opaque; |
179 | 296af7c9 | Blue Swirl | ssize_t len; |
180 | 296af7c9 | Blue Swirl | char buffer[512]; |
181 | 296af7c9 | Blue Swirl | |
182 | 296af7c9 | Blue Swirl | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
183 | 296af7c9 | Blue Swirl | do {
|
184 | 296af7c9 | Blue Swirl | len = read(fd, buffer, sizeof(buffer));
|
185 | 296af7c9 | Blue Swirl | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
186 | 296af7c9 | Blue Swirl | } |
187 | 296af7c9 | Blue Swirl | |
188 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
189 | 296af7c9 | Blue Swirl | { |
190 | 296af7c9 | Blue Swirl | int err;
|
191 | 296af7c9 | Blue Swirl | int fds[2]; |
192 | 296af7c9 | Blue Swirl | |
193 | 296af7c9 | Blue Swirl | err = qemu_eventfd(fds); |
194 | 296af7c9 | Blue Swirl | if (err == -1) |
195 | 296af7c9 | Blue Swirl | return -errno;
|
196 | 296af7c9 | Blue Swirl | |
197 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[0], O_NONBLOCK);
|
198 | 296af7c9 | Blue Swirl | if (err < 0) |
199 | 296af7c9 | Blue Swirl | goto fail;
|
200 | 296af7c9 | Blue Swirl | |
201 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[1], O_NONBLOCK);
|
202 | 296af7c9 | Blue Swirl | if (err < 0) |
203 | 296af7c9 | Blue Swirl | goto fail;
|
204 | 296af7c9 | Blue Swirl | |
205 | 296af7c9 | Blue Swirl | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
206 | 296af7c9 | Blue Swirl | (void *)(unsigned long)fds[0]); |
207 | 296af7c9 | Blue Swirl | |
208 | 296af7c9 | Blue Swirl | io_thread_fd = fds[1];
|
209 | 296af7c9 | Blue Swirl | return 0; |
210 | 296af7c9 | Blue Swirl | |
211 | 296af7c9 | Blue Swirl | fail:
|
212 | 296af7c9 | Blue Swirl | close(fds[0]);
|
213 | 296af7c9 | Blue Swirl | close(fds[1]);
|
214 | 296af7c9 | Blue Swirl | return err;
|
215 | 296af7c9 | Blue Swirl | } |
216 | 296af7c9 | Blue Swirl | #else
|
217 | 296af7c9 | Blue Swirl | HANDLE qemu_event_handle; |
218 | 296af7c9 | Blue Swirl | |
219 | 296af7c9 | Blue Swirl | static void dummy_event_handler(void *opaque) |
220 | 296af7c9 | Blue Swirl | { |
221 | 296af7c9 | Blue Swirl | } |
222 | 296af7c9 | Blue Swirl | |
223 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
224 | 296af7c9 | Blue Swirl | { |
225 | 296af7c9 | Blue Swirl | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
226 | 296af7c9 | Blue Swirl | if (!qemu_event_handle) {
|
227 | 296af7c9 | Blue Swirl | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
228 | 296af7c9 | Blue Swirl | return -1; |
229 | 296af7c9 | Blue Swirl | } |
230 | 296af7c9 | Blue Swirl | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
231 | 296af7c9 | Blue Swirl | return 0; |
232 | 296af7c9 | Blue Swirl | } |
233 | 296af7c9 | Blue Swirl | |
234 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
235 | 296af7c9 | Blue Swirl | { |
236 | 296af7c9 | Blue Swirl | if (!SetEvent(qemu_event_handle)) {
|
237 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
238 | 296af7c9 | Blue Swirl | GetLastError()); |
239 | 296af7c9 | Blue Swirl | exit (1);
|
240 | 296af7c9 | Blue Swirl | } |
241 | 296af7c9 | Blue Swirl | } |
242 | 296af7c9 | Blue Swirl | #endif
|
243 | 296af7c9 | Blue Swirl | |
244 | 296af7c9 | Blue Swirl | #ifndef CONFIG_IOTHREAD
|
245 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
246 | 296af7c9 | Blue Swirl | { |
247 | 3c638d06 | Jan Kiszka | cpu_set_debug_excp_handler(cpu_debug_handler); |
248 | 3c638d06 | Jan Kiszka | |
249 | 296af7c9 | Blue Swirl | return qemu_event_init();
|
250 | 296af7c9 | Blue Swirl | } |
251 | 296af7c9 | Blue Swirl | |
252 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
253 | 7277e027 | Blue Swirl | { |
254 | 7277e027 | Blue Swirl | } |
255 | 7277e027 | Blue Swirl | |
256 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
257 | 296af7c9 | Blue Swirl | { |
258 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
259 | 296af7c9 | Blue Swirl | |
260 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
261 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
262 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
263 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
264 | 296af7c9 | Blue Swirl | return;
|
265 | 296af7c9 | Blue Swirl | } |
266 | 296af7c9 | Blue Swirl | |
267 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *env) |
268 | 296af7c9 | Blue Swirl | { |
269 | 296af7c9 | Blue Swirl | return 1; |
270 | 296af7c9 | Blue Swirl | } |
271 | 296af7c9 | Blue Swirl | |
272 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
273 | e82bcec2 | Marcelo Tosatti | { |
274 | e82bcec2 | Marcelo Tosatti | func(data); |
275 | e82bcec2 | Marcelo Tosatti | } |
276 | e82bcec2 | Marcelo Tosatti | |
277 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
278 | 296af7c9 | Blue Swirl | { |
279 | 296af7c9 | Blue Swirl | } |
280 | 296af7c9 | Blue Swirl | |
281 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
282 | 296af7c9 | Blue Swirl | { |
283 | 296af7c9 | Blue Swirl | } |
284 | 296af7c9 | Blue Swirl | |
285 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *env) |
286 | 296af7c9 | Blue Swirl | { |
287 | 296af7c9 | Blue Swirl | return;
|
288 | 296af7c9 | Blue Swirl | } |
289 | 296af7c9 | Blue Swirl | |
290 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
291 | 296af7c9 | Blue Swirl | { |
292 | 296af7c9 | Blue Swirl | CPUState *env = cpu_single_env; |
293 | 296af7c9 | Blue Swirl | |
294 | 296af7c9 | Blue Swirl | qemu_event_increment (); |
295 | 296af7c9 | Blue Swirl | if (env) {
|
296 | 296af7c9 | Blue Swirl | cpu_exit(env); |
297 | 296af7c9 | Blue Swirl | } |
298 | 296af7c9 | Blue Swirl | if (next_cpu && env != next_cpu) {
|
299 | 296af7c9 | Blue Swirl | cpu_exit(next_cpu); |
300 | 296af7c9 | Blue Swirl | } |
301 | 296af7c9 | Blue Swirl | } |
302 | 296af7c9 | Blue Swirl | |
303 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) {} |
304 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) {} |
305 | 296af7c9 | Blue Swirl | |
306 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
307 | 296af7c9 | Blue Swirl | { |
308 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
309 | 296af7c9 | Blue Swirl | } |
310 | 296af7c9 | Blue Swirl | |
311 | 296af7c9 | Blue Swirl | #else /* CONFIG_IOTHREAD */ |
312 | 296af7c9 | Blue Swirl | |
313 | 296af7c9 | Blue Swirl | #include "qemu-thread.h" |
314 | 296af7c9 | Blue Swirl | |
315 | 296af7c9 | Blue Swirl | QemuMutex qemu_global_mutex; |
316 | 296af7c9 | Blue Swirl | static QemuMutex qemu_fair_mutex;
|
317 | 296af7c9 | Blue Swirl | |
318 | 296af7c9 | Blue Swirl | static QemuThread io_thread;
|
319 | 296af7c9 | Blue Swirl | |
320 | 296af7c9 | Blue Swirl | static QemuThread *tcg_cpu_thread;
|
321 | 296af7c9 | Blue Swirl | static QemuCond *tcg_halt_cond;
|
322 | 296af7c9 | Blue Swirl | |
323 | 296af7c9 | Blue Swirl | static int qemu_system_ready; |
324 | 296af7c9 | Blue Swirl | /* cpu creation */
|
325 | 296af7c9 | Blue Swirl | static QemuCond qemu_cpu_cond;
|
326 | 296af7c9 | Blue Swirl | /* system init */
|
327 | 296af7c9 | Blue Swirl | static QemuCond qemu_system_cond;
|
328 | 296af7c9 | Blue Swirl | static QemuCond qemu_pause_cond;
|
329 | e82bcec2 | Marcelo Tosatti | static QemuCond qemu_work_cond;
|
330 | 296af7c9 | Blue Swirl | |
331 | 55541c8a | Paolo Bonzini | static void tcg_init_ipi(void); |
332 | 55541c8a | Paolo Bonzini | static void kvm_init_ipi(CPUState *env); |
333 | a8486bc9 | Marcelo Tosatti | static sigset_t block_io_signals(void); |
334 | a8486bc9 | Marcelo Tosatti | |
335 | a8486bc9 | Marcelo Tosatti | /* If we have signalfd, we mask out the signals we want to handle and then
|
336 | a8486bc9 | Marcelo Tosatti | * use signalfd to listen for them. We rely on whatever the current signal
|
337 | a8486bc9 | Marcelo Tosatti | * handler is to dispatch the signals when we receive them.
|
338 | a8486bc9 | Marcelo Tosatti | */
|
339 | a8486bc9 | Marcelo Tosatti | static void sigfd_handler(void *opaque) |
340 | a8486bc9 | Marcelo Tosatti | { |
341 | a8486bc9 | Marcelo Tosatti | int fd = (unsigned long) opaque; |
342 | a8486bc9 | Marcelo Tosatti | struct qemu_signalfd_siginfo info;
|
343 | a8486bc9 | Marcelo Tosatti | struct sigaction action;
|
344 | a8486bc9 | Marcelo Tosatti | ssize_t len; |
345 | a8486bc9 | Marcelo Tosatti | |
346 | a8486bc9 | Marcelo Tosatti | while (1) { |
347 | a8486bc9 | Marcelo Tosatti | do {
|
348 | a8486bc9 | Marcelo Tosatti | len = read(fd, &info, sizeof(info));
|
349 | a8486bc9 | Marcelo Tosatti | } while (len == -1 && errno == EINTR); |
350 | a8486bc9 | Marcelo Tosatti | |
351 | a8486bc9 | Marcelo Tosatti | if (len == -1 && errno == EAGAIN) { |
352 | a8486bc9 | Marcelo Tosatti | break;
|
353 | a8486bc9 | Marcelo Tosatti | } |
354 | a8486bc9 | Marcelo Tosatti | |
355 | a8486bc9 | Marcelo Tosatti | if (len != sizeof(info)) { |
356 | a8486bc9 | Marcelo Tosatti | printf("read from sigfd returned %zd: %m\n", len);
|
357 | a8486bc9 | Marcelo Tosatti | return;
|
358 | a8486bc9 | Marcelo Tosatti | } |
359 | a8486bc9 | Marcelo Tosatti | |
360 | a8486bc9 | Marcelo Tosatti | sigaction(info.ssi_signo, NULL, &action);
|
361 | a8486bc9 | Marcelo Tosatti | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
|
362 | a8486bc9 | Marcelo Tosatti | action.sa_sigaction(info.ssi_signo, |
363 | a8486bc9 | Marcelo Tosatti | (siginfo_t *)&info, NULL);
|
364 | a8486bc9 | Marcelo Tosatti | } else if (action.sa_handler) { |
365 | a8486bc9 | Marcelo Tosatti | action.sa_handler(info.ssi_signo); |
366 | a8486bc9 | Marcelo Tosatti | } |
367 | a8486bc9 | Marcelo Tosatti | } |
368 | a8486bc9 | Marcelo Tosatti | } |
369 | a8486bc9 | Marcelo Tosatti | |
370 | a8486bc9 | Marcelo Tosatti | static int qemu_signalfd_init(sigset_t mask) |
371 | a8486bc9 | Marcelo Tosatti | { |
372 | a8486bc9 | Marcelo Tosatti | int sigfd;
|
373 | a8486bc9 | Marcelo Tosatti | |
374 | a8486bc9 | Marcelo Tosatti | sigfd = qemu_signalfd(&mask); |
375 | a8486bc9 | Marcelo Tosatti | if (sigfd == -1) { |
376 | a8486bc9 | Marcelo Tosatti | fprintf(stderr, "failed to create signalfd\n");
|
377 | a8486bc9 | Marcelo Tosatti | return -errno;
|
378 | a8486bc9 | Marcelo Tosatti | } |
379 | a8486bc9 | Marcelo Tosatti | |
380 | a8486bc9 | Marcelo Tosatti | fcntl_setfl(sigfd, O_NONBLOCK); |
381 | a8486bc9 | Marcelo Tosatti | |
382 | a8486bc9 | Marcelo Tosatti | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, |
383 | a8486bc9 | Marcelo Tosatti | (void *)(unsigned long) sigfd); |
384 | a8486bc9 | Marcelo Tosatti | |
385 | a8486bc9 | Marcelo Tosatti | return 0; |
386 | a8486bc9 | Marcelo Tosatti | } |
387 | 296af7c9 | Blue Swirl | |
388 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
389 | 296af7c9 | Blue Swirl | { |
390 | 296af7c9 | Blue Swirl | int ret;
|
391 | a8486bc9 | Marcelo Tosatti | sigset_t blocked_signals; |
392 | 296af7c9 | Blue Swirl | |
393 | 3c638d06 | Jan Kiszka | cpu_set_debug_excp_handler(cpu_debug_handler); |
394 | 3c638d06 | Jan Kiszka | |
395 | a8486bc9 | Marcelo Tosatti | blocked_signals = block_io_signals(); |
396 | a8486bc9 | Marcelo Tosatti | |
397 | a8486bc9 | Marcelo Tosatti | ret = qemu_signalfd_init(blocked_signals); |
398 | a8486bc9 | Marcelo Tosatti | if (ret)
|
399 | a8486bc9 | Marcelo Tosatti | return ret;
|
400 | a8486bc9 | Marcelo Tosatti | |
401 | a8486bc9 | Marcelo Tosatti | /* Note eventfd must be drained before signalfd handlers run */
|
402 | 296af7c9 | Blue Swirl | ret = qemu_event_init(); |
403 | 296af7c9 | Blue Swirl | if (ret)
|
404 | 296af7c9 | Blue Swirl | return ret;
|
405 | 296af7c9 | Blue Swirl | |
406 | 296af7c9 | Blue Swirl | qemu_cond_init(&qemu_pause_cond); |
407 | f8ca7b43 | Jan Kiszka | qemu_cond_init(&qemu_system_cond); |
408 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_fair_mutex); |
409 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_global_mutex); |
410 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
411 | 296af7c9 | Blue Swirl | |
412 | 296af7c9 | Blue Swirl | qemu_thread_self(&io_thread); |
413 | 296af7c9 | Blue Swirl | |
414 | 296af7c9 | Blue Swirl | return 0; |
415 | 296af7c9 | Blue Swirl | } |
416 | 296af7c9 | Blue Swirl | |
417 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
418 | 7277e027 | Blue Swirl | { |
419 | 7277e027 | Blue Swirl | qemu_system_ready = 1;
|
420 | 7277e027 | Blue Swirl | qemu_cond_broadcast(&qemu_system_cond); |
421 | 7277e027 | Blue Swirl | } |
422 | 7277e027 | Blue Swirl | |
423 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
424 | e82bcec2 | Marcelo Tosatti | { |
425 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item wi;
|
426 | e82bcec2 | Marcelo Tosatti | |
427 | e82bcec2 | Marcelo Tosatti | if (qemu_cpu_self(env)) {
|
428 | e82bcec2 | Marcelo Tosatti | func(data); |
429 | e82bcec2 | Marcelo Tosatti | return;
|
430 | e82bcec2 | Marcelo Tosatti | } |
431 | e82bcec2 | Marcelo Tosatti | |
432 | e82bcec2 | Marcelo Tosatti | wi.func = func; |
433 | e82bcec2 | Marcelo Tosatti | wi.data = data; |
434 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
435 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = &wi; |
436 | e82bcec2 | Marcelo Tosatti | else
|
437 | e82bcec2 | Marcelo Tosatti | env->queued_work_last->next = &wi; |
438 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = &wi; |
439 | e82bcec2 | Marcelo Tosatti | wi.next = NULL;
|
440 | e82bcec2 | Marcelo Tosatti | wi.done = false;
|
441 | e82bcec2 | Marcelo Tosatti | |
442 | e82bcec2 | Marcelo Tosatti | qemu_cpu_kick(env); |
443 | e82bcec2 | Marcelo Tosatti | while (!wi.done) {
|
444 | e82bcec2 | Marcelo Tosatti | CPUState *self_env = cpu_single_env; |
445 | e82bcec2 | Marcelo Tosatti | |
446 | e82bcec2 | Marcelo Tosatti | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
447 | e82bcec2 | Marcelo Tosatti | cpu_single_env = self_env; |
448 | e82bcec2 | Marcelo Tosatti | } |
449 | e82bcec2 | Marcelo Tosatti | } |
450 | e82bcec2 | Marcelo Tosatti | |
451 | e82bcec2 | Marcelo Tosatti | static void flush_queued_work(CPUState *env) |
452 | e82bcec2 | Marcelo Tosatti | { |
453 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item *wi;
|
454 | e82bcec2 | Marcelo Tosatti | |
455 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
456 | e82bcec2 | Marcelo Tosatti | return;
|
457 | e82bcec2 | Marcelo Tosatti | |
458 | e82bcec2 | Marcelo Tosatti | while ((wi = env->queued_work_first)) {
|
459 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = wi->next; |
460 | e82bcec2 | Marcelo Tosatti | wi->func(wi->data); |
461 | e82bcec2 | Marcelo Tosatti | wi->done = true;
|
462 | e82bcec2 | Marcelo Tosatti | } |
463 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = NULL;
|
464 | e82bcec2 | Marcelo Tosatti | qemu_cond_broadcast(&qemu_work_cond); |
465 | e82bcec2 | Marcelo Tosatti | } |
466 | e82bcec2 | Marcelo Tosatti | |
467 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event_common(CPUState *env) |
468 | 296af7c9 | Blue Swirl | { |
469 | 296af7c9 | Blue Swirl | if (env->stop) {
|
470 | 296af7c9 | Blue Swirl | env->stop = 0;
|
471 | 296af7c9 | Blue Swirl | env->stopped = 1;
|
472 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_pause_cond); |
473 | 296af7c9 | Blue Swirl | } |
474 | e82bcec2 | Marcelo Tosatti | flush_queued_work(env); |
475 | 296af7c9 | Blue Swirl | } |
476 | 296af7c9 | Blue Swirl | |
477 | 6cabe1f3 | Jan Kiszka | static void qemu_tcg_wait_io_event(void) |
478 | 296af7c9 | Blue Swirl | { |
479 | 6cabe1f3 | Jan Kiszka | CPUState *env; |
480 | 6cabe1f3 | Jan Kiszka | |
481 | 472fb0c4 | Jan Kiszka | while (!any_cpu_has_work())
|
482 | 6cabe1f3 | Jan Kiszka | qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000);
|
483 | 296af7c9 | Blue Swirl | |
484 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
485 | 296af7c9 | Blue Swirl | |
486 | 296af7c9 | Blue Swirl | /*
|
487 | 296af7c9 | Blue Swirl | * Users of qemu_global_mutex can be starved, having no chance
|
488 | 296af7c9 | Blue Swirl | * to acquire it since this path will get to it first.
|
489 | 296af7c9 | Blue Swirl | * So use another lock to provide fairness.
|
490 | 296af7c9 | Blue Swirl | */
|
491 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
492 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
493 | 296af7c9 | Blue Swirl | |
494 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
495 | 6cabe1f3 | Jan Kiszka | |
496 | 6cabe1f3 | Jan Kiszka | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
497 | 6cabe1f3 | Jan Kiszka | qemu_wait_io_event_common(env); |
498 | 6cabe1f3 | Jan Kiszka | } |
499 | 296af7c9 | Blue Swirl | } |
500 | 296af7c9 | Blue Swirl | |
501 | 296af7c9 | Blue Swirl | static void qemu_kvm_eat_signal(CPUState *env, int timeout) |
502 | 296af7c9 | Blue Swirl | { |
503 | 296af7c9 | Blue Swirl | struct timespec ts;
|
504 | 296af7c9 | Blue Swirl | int r, e;
|
505 | 296af7c9 | Blue Swirl | siginfo_t siginfo; |
506 | 296af7c9 | Blue Swirl | sigset_t waitset; |
507 | 296af7c9 | Blue Swirl | |
508 | 296af7c9 | Blue Swirl | ts.tv_sec = timeout / 1000;
|
509 | 296af7c9 | Blue Swirl | ts.tv_nsec = (timeout % 1000) * 1000000; |
510 | 296af7c9 | Blue Swirl | |
511 | 296af7c9 | Blue Swirl | sigemptyset(&waitset); |
512 | 296af7c9 | Blue Swirl | sigaddset(&waitset, SIG_IPI); |
513 | 296af7c9 | Blue Swirl | |
514 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
515 | 296af7c9 | Blue Swirl | r = sigtimedwait(&waitset, &siginfo, &ts); |
516 | 296af7c9 | Blue Swirl | e = errno; |
517 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
518 | 296af7c9 | Blue Swirl | |
519 | 296af7c9 | Blue Swirl | if (r == -1 && !(e == EAGAIN || e == EINTR)) { |
520 | 296af7c9 | Blue Swirl | fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
|
521 | 296af7c9 | Blue Swirl | exit(1);
|
522 | 296af7c9 | Blue Swirl | } |
523 | 296af7c9 | Blue Swirl | } |
524 | 296af7c9 | Blue Swirl | |
525 | 296af7c9 | Blue Swirl | static void qemu_kvm_wait_io_event(CPUState *env) |
526 | 296af7c9 | Blue Swirl | { |
527 | 296af7c9 | Blue Swirl | while (!cpu_has_work(env))
|
528 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
529 | 296af7c9 | Blue Swirl | |
530 | 296af7c9 | Blue Swirl | qemu_kvm_eat_signal(env, 0);
|
531 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
532 | 296af7c9 | Blue Swirl | } |
533 | 296af7c9 | Blue Swirl | |
534 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env); |
535 | 296af7c9 | Blue Swirl | |
536 | 296af7c9 | Blue Swirl | static void *kvm_cpu_thread_fn(void *arg) |
537 | 296af7c9 | Blue Swirl | { |
538 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
539 | 296af7c9 | Blue Swirl | |
540 | 6164e6d6 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
541 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
542 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
543 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
544 | 296af7c9 | Blue Swirl | |
545 | 55541c8a | Paolo Bonzini | kvm_init_ipi(env); |
546 | 296af7c9 | Blue Swirl | |
547 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
548 | 296af7c9 | Blue Swirl | env->created = 1;
|
549 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
550 | 296af7c9 | Blue Swirl | |
551 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
552 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
553 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
554 | 296af7c9 | Blue Swirl | |
555 | 296af7c9 | Blue Swirl | while (1) { |
556 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
557 | 296af7c9 | Blue Swirl | qemu_cpu_exec(env); |
558 | 296af7c9 | Blue Swirl | qemu_kvm_wait_io_event(env); |
559 | 296af7c9 | Blue Swirl | } |
560 | 296af7c9 | Blue Swirl | |
561 | 296af7c9 | Blue Swirl | return NULL; |
562 | 296af7c9 | Blue Swirl | } |
563 | 296af7c9 | Blue Swirl | |
564 | 296af7c9 | Blue Swirl | static void *tcg_cpu_thread_fn(void *arg) |
565 | 296af7c9 | Blue Swirl | { |
566 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
567 | 296af7c9 | Blue Swirl | |
568 | 55541c8a | Paolo Bonzini | tcg_init_ipi(); |
569 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
570 | 296af7c9 | Blue Swirl | |
571 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
572 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
573 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
574 | 296af7c9 | Blue Swirl | env->created = 1;
|
575 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
576 | 296af7c9 | Blue Swirl | |
577 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
578 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
579 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
580 | 296af7c9 | Blue Swirl | |
581 | 296af7c9 | Blue Swirl | while (1) { |
582 | 472fb0c4 | Jan Kiszka | cpu_exec_all(); |
583 | 6cabe1f3 | Jan Kiszka | qemu_tcg_wait_io_event(); |
584 | 296af7c9 | Blue Swirl | } |
585 | 296af7c9 | Blue Swirl | |
586 | 296af7c9 | Blue Swirl | return NULL; |
587 | 296af7c9 | Blue Swirl | } |
588 | 296af7c9 | Blue Swirl | |
589 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *_env) |
590 | 296af7c9 | Blue Swirl | { |
591 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
592 | 296af7c9 | Blue Swirl | qemu_cond_broadcast(env->halt_cond); |
593 | 1fbb22e5 | Marcelo Tosatti | qemu_thread_signal(env->thread, SIG_IPI); |
594 | 296af7c9 | Blue Swirl | } |
595 | 296af7c9 | Blue Swirl | |
596 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *_env) |
597 | 296af7c9 | Blue Swirl | { |
598 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
599 | 296af7c9 | Blue Swirl | QemuThread this; |
600 | 296af7c9 | Blue Swirl | |
601 | 296af7c9 | Blue Swirl | qemu_thread_self(&this); |
602 | 296af7c9 | Blue Swirl | |
603 | 296af7c9 | Blue Swirl | return qemu_thread_equal(&this, env->thread);
|
604 | 296af7c9 | Blue Swirl | } |
605 | 296af7c9 | Blue Swirl | |
606 | 296af7c9 | Blue Swirl | static void cpu_signal(int sig) |
607 | 296af7c9 | Blue Swirl | { |
608 | 296af7c9 | Blue Swirl | if (cpu_single_env)
|
609 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
610 | 1a28cac3 | Marcelo Tosatti | exit_request = 1;
|
611 | 296af7c9 | Blue Swirl | } |
612 | 296af7c9 | Blue Swirl | |
613 | 55541c8a | Paolo Bonzini | static void tcg_init_ipi(void) |
614 | 296af7c9 | Blue Swirl | { |
615 | 296af7c9 | Blue Swirl | sigset_t set; |
616 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
617 | 296af7c9 | Blue Swirl | |
618 | 55541c8a | Paolo Bonzini | memset(&sigact, 0, sizeof(sigact)); |
619 | 55541c8a | Paolo Bonzini | sigact.sa_handler = cpu_signal; |
620 | 55541c8a | Paolo Bonzini | sigaction(SIG_IPI, &sigact, NULL);
|
621 | 296af7c9 | Blue Swirl | |
622 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
623 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
624 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
625 | 296af7c9 | Blue Swirl | } |
626 | 296af7c9 | Blue Swirl | |
627 | 296af7c9 | Blue Swirl | static void dummy_signal(int sig) |
628 | 296af7c9 | Blue Swirl | { |
629 | 296af7c9 | Blue Swirl | } |
630 | 296af7c9 | Blue Swirl | |
631 | 55541c8a | Paolo Bonzini | static void kvm_init_ipi(CPUState *env) |
632 | 296af7c9 | Blue Swirl | { |
633 | 296af7c9 | Blue Swirl | int r;
|
634 | 296af7c9 | Blue Swirl | sigset_t set; |
635 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
636 | 296af7c9 | Blue Swirl | |
637 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
638 | 296af7c9 | Blue Swirl | sigact.sa_handler = dummy_signal; |
639 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
640 | 296af7c9 | Blue Swirl | |
641 | 55541c8a | Paolo Bonzini | pthread_sigmask(SIG_BLOCK, NULL, &set);
|
642 | 55541c8a | Paolo Bonzini | sigdelset(&set, SIG_IPI); |
643 | 296af7c9 | Blue Swirl | r = kvm_set_signal_mask(env, &set); |
644 | 296af7c9 | Blue Swirl | if (r) {
|
645 | 296af7c9 | Blue Swirl | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
|
646 | 296af7c9 | Blue Swirl | exit(1);
|
647 | 296af7c9 | Blue Swirl | } |
648 | 296af7c9 | Blue Swirl | } |
649 | 296af7c9 | Blue Swirl | |
650 | a8486bc9 | Marcelo Tosatti | static sigset_t block_io_signals(void) |
651 | 296af7c9 | Blue Swirl | { |
652 | 296af7c9 | Blue Swirl | sigset_t set; |
653 | 296af7c9 | Blue Swirl | |
654 | a8486bc9 | Marcelo Tosatti | /* SIGUSR2 used by posix-aio-compat.c */
|
655 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
656 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
657 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
658 | 296af7c9 | Blue Swirl | |
659 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
660 | a8486bc9 | Marcelo Tosatti | sigaddset(&set, SIGIO); |
661 | a8486bc9 | Marcelo Tosatti | sigaddset(&set, SIGALRM); |
662 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
663 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
664 | a8486bc9 | Marcelo Tosatti | |
665 | a8486bc9 | Marcelo Tosatti | return set;
|
666 | 296af7c9 | Blue Swirl | } |
667 | 296af7c9 | Blue Swirl | |
668 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) |
669 | 296af7c9 | Blue Swirl | { |
670 | 296af7c9 | Blue Swirl | if (kvm_enabled()) {
|
671 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
672 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
673 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
674 | 1a28cac3 | Marcelo Tosatti | } else {
|
675 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_fair_mutex); |
676 | 1a28cac3 | Marcelo Tosatti | if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
677 | 1a28cac3 | Marcelo Tosatti | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
678 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
679 | 1a28cac3 | Marcelo Tosatti | } |
680 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_unlock(&qemu_fair_mutex); |
681 | 1a28cac3 | Marcelo Tosatti | } |
682 | 296af7c9 | Blue Swirl | } |
683 | 296af7c9 | Blue Swirl | |
684 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) |
685 | 296af7c9 | Blue Swirl | { |
686 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
687 | 296af7c9 | Blue Swirl | } |
688 | 296af7c9 | Blue Swirl | |
689 | 296af7c9 | Blue Swirl | static int all_vcpus_paused(void) |
690 | 296af7c9 | Blue Swirl | { |
691 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
692 | 296af7c9 | Blue Swirl | |
693 | 296af7c9 | Blue Swirl | while (penv) {
|
694 | 296af7c9 | Blue Swirl | if (!penv->stopped)
|
695 | 296af7c9 | Blue Swirl | return 0; |
696 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
697 | 296af7c9 | Blue Swirl | } |
698 | 296af7c9 | Blue Swirl | |
699 | 296af7c9 | Blue Swirl | return 1; |
700 | 296af7c9 | Blue Swirl | } |
701 | 296af7c9 | Blue Swirl | |
702 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
703 | 296af7c9 | Blue Swirl | { |
704 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
705 | 296af7c9 | Blue Swirl | |
706 | 296af7c9 | Blue Swirl | while (penv) {
|
707 | 296af7c9 | Blue Swirl | penv->stop = 1;
|
708 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
709 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
710 | 296af7c9 | Blue Swirl | } |
711 | 296af7c9 | Blue Swirl | |
712 | 296af7c9 | Blue Swirl | while (!all_vcpus_paused()) {
|
713 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
714 | 296af7c9 | Blue Swirl | penv = first_cpu; |
715 | 296af7c9 | Blue Swirl | while (penv) {
|
716 | 1fbb22e5 | Marcelo Tosatti | qemu_cpu_kick(penv); |
717 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
718 | 296af7c9 | Blue Swirl | } |
719 | 296af7c9 | Blue Swirl | } |
720 | 296af7c9 | Blue Swirl | } |
721 | 296af7c9 | Blue Swirl | |
722 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
723 | 296af7c9 | Blue Swirl | { |
724 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
725 | 296af7c9 | Blue Swirl | |
726 | 296af7c9 | Blue Swirl | while (penv) {
|
727 | 296af7c9 | Blue Swirl | penv->stop = 0;
|
728 | 296af7c9 | Blue Swirl | penv->stopped = 0;
|
729 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
730 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
731 | 296af7c9 | Blue Swirl | } |
732 | 296af7c9 | Blue Swirl | } |
733 | 296af7c9 | Blue Swirl | |
734 | 296af7c9 | Blue Swirl | static void tcg_init_vcpu(void *_env) |
735 | 296af7c9 | Blue Swirl | { |
736 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
737 | 296af7c9 | Blue Swirl | /* share a single thread for all cpus with TCG */
|
738 | 296af7c9 | Blue Swirl | if (!tcg_cpu_thread) {
|
739 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
740 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
741 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
742 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); |
743 | 296af7c9 | Blue Swirl | while (env->created == 0) |
744 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
745 | 296af7c9 | Blue Swirl | tcg_cpu_thread = env->thread; |
746 | 296af7c9 | Blue Swirl | tcg_halt_cond = env->halt_cond; |
747 | 296af7c9 | Blue Swirl | } else {
|
748 | 296af7c9 | Blue Swirl | env->thread = tcg_cpu_thread; |
749 | 296af7c9 | Blue Swirl | env->halt_cond = tcg_halt_cond; |
750 | 296af7c9 | Blue Swirl | } |
751 | 296af7c9 | Blue Swirl | } |
752 | 296af7c9 | Blue Swirl | |
753 | 296af7c9 | Blue Swirl | static void kvm_start_vcpu(CPUState *env) |
754 | 296af7c9 | Blue Swirl | { |
755 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
756 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
757 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
758 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); |
759 | 296af7c9 | Blue Swirl | while (env->created == 0) |
760 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
761 | 296af7c9 | Blue Swirl | } |
762 | 296af7c9 | Blue Swirl | |
763 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
764 | 296af7c9 | Blue Swirl | { |
765 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
766 | 296af7c9 | Blue Swirl | |
767 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
768 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
769 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
770 | 296af7c9 | Blue Swirl | kvm_start_vcpu(env); |
771 | 296af7c9 | Blue Swirl | else
|
772 | 296af7c9 | Blue Swirl | tcg_init_vcpu(env); |
773 | 296af7c9 | Blue Swirl | } |
774 | 296af7c9 | Blue Swirl | |
775 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
776 | 296af7c9 | Blue Swirl | { |
777 | 296af7c9 | Blue Swirl | qemu_event_increment(); |
778 | 296af7c9 | Blue Swirl | } |
779 | 296af7c9 | Blue Swirl | |
780 | 296af7c9 | Blue Swirl | static void qemu_system_vmstop_request(int reason) |
781 | 296af7c9 | Blue Swirl | { |
782 | 296af7c9 | Blue Swirl | vmstop_requested = reason; |
783 | 296af7c9 | Blue Swirl | qemu_notify_event(); |
784 | 296af7c9 | Blue Swirl | } |
785 | 296af7c9 | Blue Swirl | |
786 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
787 | 296af7c9 | Blue Swirl | { |
788 | 296af7c9 | Blue Swirl | QemuThread me; |
789 | 296af7c9 | Blue Swirl | qemu_thread_self(&me); |
790 | 296af7c9 | Blue Swirl | |
791 | 296af7c9 | Blue Swirl | if (!qemu_thread_equal(&me, &io_thread)) {
|
792 | 296af7c9 | Blue Swirl | qemu_system_vmstop_request(reason); |
793 | 296af7c9 | Blue Swirl | /*
|
794 | 296af7c9 | Blue Swirl | * FIXME: should not return to device code in case
|
795 | 296af7c9 | Blue Swirl | * vm_stop() has been requested.
|
796 | 296af7c9 | Blue Swirl | */
|
797 | 296af7c9 | Blue Swirl | if (cpu_single_env) {
|
798 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
799 | 296af7c9 | Blue Swirl | cpu_single_env->stop = 1;
|
800 | 296af7c9 | Blue Swirl | } |
801 | 296af7c9 | Blue Swirl | return;
|
802 | 296af7c9 | Blue Swirl | } |
803 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
804 | 296af7c9 | Blue Swirl | } |
805 | 296af7c9 | Blue Swirl | |
806 | 296af7c9 | Blue Swirl | #endif
|
807 | 296af7c9 | Blue Swirl | |
808 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env) |
809 | 296af7c9 | Blue Swirl | { |
810 | 296af7c9 | Blue Swirl | int ret;
|
811 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
812 | 296af7c9 | Blue Swirl | int64_t ti; |
813 | 296af7c9 | Blue Swirl | #endif
|
814 | 296af7c9 | Blue Swirl | |
815 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
816 | 296af7c9 | Blue Swirl | ti = profile_getclock(); |
817 | 296af7c9 | Blue Swirl | #endif
|
818 | 296af7c9 | Blue Swirl | if (use_icount) {
|
819 | 296af7c9 | Blue Swirl | int64_t count; |
820 | 296af7c9 | Blue Swirl | int decr;
|
821 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
822 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = 0;
|
823 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
824 | 296af7c9 | Blue Swirl | count = qemu_icount_round (qemu_next_deadline()); |
825 | 296af7c9 | Blue Swirl | qemu_icount += count; |
826 | 296af7c9 | Blue Swirl | decr = (count > 0xffff) ? 0xffff : count; |
827 | 296af7c9 | Blue Swirl | count -= decr; |
828 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = decr; |
829 | 296af7c9 | Blue Swirl | env->icount_extra = count; |
830 | 296af7c9 | Blue Swirl | } |
831 | 296af7c9 | Blue Swirl | ret = cpu_exec(env); |
832 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
833 | 296af7c9 | Blue Swirl | qemu_time += profile_getclock() - ti; |
834 | 296af7c9 | Blue Swirl | #endif
|
835 | 296af7c9 | Blue Swirl | if (use_icount) {
|
836 | 296af7c9 | Blue Swirl | /* Fold pending instructions back into the
|
837 | 296af7c9 | Blue Swirl | instruction counter, and clear the interrupt flag. */
|
838 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low |
839 | 296af7c9 | Blue Swirl | + env->icount_extra); |
840 | 296af7c9 | Blue Swirl | env->icount_decr.u32 = 0;
|
841 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
842 | 296af7c9 | Blue Swirl | } |
843 | 296af7c9 | Blue Swirl | return ret;
|
844 | 296af7c9 | Blue Swirl | } |
845 | 296af7c9 | Blue Swirl | |
846 | 472fb0c4 | Jan Kiszka | bool cpu_exec_all(void) |
847 | 296af7c9 | Blue Swirl | { |
848 | 296af7c9 | Blue Swirl | if (next_cpu == NULL) |
849 | 296af7c9 | Blue Swirl | next_cpu = first_cpu; |
850 | c629a4bc | Jan Kiszka | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
851 | 345f4426 | Jan Kiszka | CPUState *env = next_cpu; |
852 | 296af7c9 | Blue Swirl | |
853 | 296af7c9 | Blue Swirl | qemu_clock_enable(vm_clock, |
854 | 345f4426 | Jan Kiszka | (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
855 | 296af7c9 | Blue Swirl | |
856 | 296af7c9 | Blue Swirl | if (qemu_alarm_pending())
|
857 | 296af7c9 | Blue Swirl | break;
|
858 | 3c638d06 | Jan Kiszka | if (cpu_can_run(env)) {
|
859 | 3c638d06 | Jan Kiszka | if (qemu_cpu_exec(env) == EXCP_DEBUG) {
|
860 | 3c638d06 | Jan Kiszka | break;
|
861 | 3c638d06 | Jan Kiszka | } |
862 | 3c638d06 | Jan Kiszka | } else if (env->stop) { |
863 | 296af7c9 | Blue Swirl | break;
|
864 | 296af7c9 | Blue Swirl | } |
865 | 296af7c9 | Blue Swirl | } |
866 | c629a4bc | Jan Kiszka | exit_request = 0;
|
867 | 472fb0c4 | Jan Kiszka | return any_cpu_has_work();
|
868 | 296af7c9 | Blue Swirl | } |
869 | 296af7c9 | Blue Swirl | |
870 | 296af7c9 | Blue Swirl | void set_numa_modes(void) |
871 | 296af7c9 | Blue Swirl | { |
872 | 296af7c9 | Blue Swirl | CPUState *env; |
873 | 296af7c9 | Blue Swirl | int i;
|
874 | 296af7c9 | Blue Swirl | |
875 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
876 | 296af7c9 | Blue Swirl | for (i = 0; i < nb_numa_nodes; i++) { |
877 | 296af7c9 | Blue Swirl | if (node_cpumask[i] & (1 << env->cpu_index)) { |
878 | 296af7c9 | Blue Swirl | env->numa_node = i; |
879 | 296af7c9 | Blue Swirl | } |
880 | 296af7c9 | Blue Swirl | } |
881 | 296af7c9 | Blue Swirl | } |
882 | 296af7c9 | Blue Swirl | } |
883 | 296af7c9 | Blue Swirl | |
884 | 296af7c9 | Blue Swirl | void set_cpu_log(const char *optarg) |
885 | 296af7c9 | Blue Swirl | { |
886 | 296af7c9 | Blue Swirl | int mask;
|
887 | 296af7c9 | Blue Swirl | const CPULogItem *item;
|
888 | 296af7c9 | Blue Swirl | |
889 | 296af7c9 | Blue Swirl | mask = cpu_str_to_log_mask(optarg); |
890 | 296af7c9 | Blue Swirl | if (!mask) {
|
891 | 296af7c9 | Blue Swirl | printf("Log items (comma separated):\n");
|
892 | 296af7c9 | Blue Swirl | for (item = cpu_log_items; item->mask != 0; item++) { |
893 | 296af7c9 | Blue Swirl | printf("%-10s %s\n", item->name, item->help);
|
894 | 296af7c9 | Blue Swirl | } |
895 | 296af7c9 | Blue Swirl | exit(1);
|
896 | 296af7c9 | Blue Swirl | } |
897 | 296af7c9 | Blue Swirl | cpu_set_log(mask); |
898 | 296af7c9 | Blue Swirl | } |
899 | 29e922b6 | Blue Swirl | |
900 | 29e922b6 | Blue Swirl | /* Return the virtual CPU time, based on the instruction counter. */
|
901 | 29e922b6 | Blue Swirl | int64_t cpu_get_icount(void)
|
902 | 29e922b6 | Blue Swirl | { |
903 | 29e922b6 | Blue Swirl | int64_t icount; |
904 | 29e922b6 | Blue Swirl | CPUState *env = cpu_single_env;; |
905 | 29e922b6 | Blue Swirl | |
906 | 29e922b6 | Blue Swirl | icount = qemu_icount; |
907 | 29e922b6 | Blue Swirl | if (env) {
|
908 | 29e922b6 | Blue Swirl | if (!can_do_io(env)) {
|
909 | 29e922b6 | Blue Swirl | fprintf(stderr, "Bad clock read\n");
|
910 | 29e922b6 | Blue Swirl | } |
911 | 29e922b6 | Blue Swirl | icount -= (env->icount_decr.u16.low + env->icount_extra); |
912 | 29e922b6 | Blue Swirl | } |
913 | 29e922b6 | Blue Swirl | return qemu_icount_bias + (icount << icount_time_shift);
|
914 | 29e922b6 | Blue Swirl | } |
915 | 262353cb | Blue Swirl | |
916 | 262353cb | Blue Swirl | void list_cpus(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
917 | 262353cb | Blue Swirl | const char *optarg) |
918 | 262353cb | Blue Swirl | { |
919 | 262353cb | Blue Swirl | /* XXX: implement xxx_cpu_list for targets that still miss it */
|
920 | 262353cb | Blue Swirl | #if defined(cpu_list_id)
|
921 | 262353cb | Blue Swirl | cpu_list_id(f, cpu_fprintf, optarg); |
922 | 262353cb | Blue Swirl | #elif defined(cpu_list)
|
923 | 262353cb | Blue Swirl | cpu_list(f, cpu_fprintf); /* deprecated */
|
924 | 262353cb | Blue Swirl | #endif
|
925 | 262353cb | Blue Swirl | } |