root / cpus.c @ c821c2bd
History | View | Annotate | Download (19 kB)
1 | 296af7c9 | Blue Swirl | /*
|
---|---|---|---|
2 | 296af7c9 | Blue Swirl | * QEMU System Emulator
|
3 | 296af7c9 | Blue Swirl | *
|
4 | 296af7c9 | Blue Swirl | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 296af7c9 | Blue Swirl | *
|
6 | 296af7c9 | Blue Swirl | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 296af7c9 | Blue Swirl | * of this software and associated documentation files (the "Software"), to deal
|
8 | 296af7c9 | Blue Swirl | * in the Software without restriction, including without limitation the rights
|
9 | 296af7c9 | Blue Swirl | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 296af7c9 | Blue Swirl | * copies of the Software, and to permit persons to whom the Software is
|
11 | 296af7c9 | Blue Swirl | * furnished to do so, subject to the following conditions:
|
12 | 296af7c9 | Blue Swirl | *
|
13 | 296af7c9 | Blue Swirl | * The above copyright notice and this permission notice shall be included in
|
14 | 296af7c9 | Blue Swirl | * all copies or substantial portions of the Software.
|
15 | 296af7c9 | Blue Swirl | *
|
16 | 296af7c9 | Blue Swirl | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 296af7c9 | Blue Swirl | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 296af7c9 | Blue Swirl | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 296af7c9 | Blue Swirl | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 296af7c9 | Blue Swirl | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 296af7c9 | Blue Swirl | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 296af7c9 | Blue Swirl | * THE SOFTWARE.
|
23 | 296af7c9 | Blue Swirl | */
|
24 | 296af7c9 | Blue Swirl | |
25 | 296af7c9 | Blue Swirl | /* Needed early for CONFIG_BSD etc. */
|
26 | 296af7c9 | Blue Swirl | #include "config-host.h" |
27 | 296af7c9 | Blue Swirl | |
28 | 296af7c9 | Blue Swirl | #include "monitor.h" |
29 | 296af7c9 | Blue Swirl | #include "sysemu.h" |
30 | 296af7c9 | Blue Swirl | #include "gdbstub.h" |
31 | 296af7c9 | Blue Swirl | #include "dma.h" |
32 | 296af7c9 | Blue Swirl | #include "kvm.h" |
33 | 296af7c9 | Blue Swirl | |
34 | 296af7c9 | Blue Swirl | #include "cpus.h" |
35 | 296af7c9 | Blue Swirl | |
36 | 7277e027 | Blue Swirl | #ifdef SIGRTMIN
|
37 | 7277e027 | Blue Swirl | #define SIG_IPI (SIGRTMIN+4) |
38 | 7277e027 | Blue Swirl | #else
|
39 | 7277e027 | Blue Swirl | #define SIG_IPI SIGUSR1
|
40 | 7277e027 | Blue Swirl | #endif
|
41 | 7277e027 | Blue Swirl | |
42 | 296af7c9 | Blue Swirl | static CPUState *cur_cpu;
|
43 | 296af7c9 | Blue Swirl | static CPUState *next_cpu;
|
44 | 296af7c9 | Blue Swirl | |
45 | 296af7c9 | Blue Swirl | /***********************************************************/
|
46 | 296af7c9 | Blue Swirl | void hw_error(const char *fmt, ...) |
47 | 296af7c9 | Blue Swirl | { |
48 | 296af7c9 | Blue Swirl | va_list ap; |
49 | 296af7c9 | Blue Swirl | CPUState *env; |
50 | 296af7c9 | Blue Swirl | |
51 | 296af7c9 | Blue Swirl | va_start(ap, fmt); |
52 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu: hardware error: ");
|
53 | 296af7c9 | Blue Swirl | vfprintf(stderr, fmt, ap); |
54 | 296af7c9 | Blue Swirl | fprintf(stderr, "\n");
|
55 | 296af7c9 | Blue Swirl | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
56 | 296af7c9 | Blue Swirl | fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
57 | 296af7c9 | Blue Swirl | #ifdef TARGET_I386
|
58 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
59 | 296af7c9 | Blue Swirl | #else
|
60 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, 0);
|
61 | 296af7c9 | Blue Swirl | #endif
|
62 | 296af7c9 | Blue Swirl | } |
63 | 296af7c9 | Blue Swirl | va_end(ap); |
64 | 296af7c9 | Blue Swirl | abort(); |
65 | 296af7c9 | Blue Swirl | } |
66 | 296af7c9 | Blue Swirl | |
67 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_states(void) |
68 | 296af7c9 | Blue Swirl | { |
69 | 296af7c9 | Blue Swirl | CPUState *cpu; |
70 | 296af7c9 | Blue Swirl | |
71 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
72 | 296af7c9 | Blue Swirl | cpu_synchronize_state(cpu); |
73 | 296af7c9 | Blue Swirl | } |
74 | 296af7c9 | Blue Swirl | } |
75 | 296af7c9 | Blue Swirl | |
76 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_reset(void) |
77 | 296af7c9 | Blue Swirl | { |
78 | 296af7c9 | Blue Swirl | CPUState *cpu; |
79 | 296af7c9 | Blue Swirl | |
80 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
81 | 296af7c9 | Blue Swirl | cpu_synchronize_post_reset(cpu); |
82 | 296af7c9 | Blue Swirl | } |
83 | 296af7c9 | Blue Swirl | } |
84 | 296af7c9 | Blue Swirl | |
85 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_init(void) |
86 | 296af7c9 | Blue Swirl | { |
87 | 296af7c9 | Blue Swirl | CPUState *cpu; |
88 | 296af7c9 | Blue Swirl | |
89 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
90 | 296af7c9 | Blue Swirl | cpu_synchronize_post_init(cpu); |
91 | 296af7c9 | Blue Swirl | } |
92 | 296af7c9 | Blue Swirl | } |
93 | 296af7c9 | Blue Swirl | |
94 | 3ae9501c | Marcelo Tosatti | int cpu_is_stopped(CPUState *env)
|
95 | 3ae9501c | Marcelo Tosatti | { |
96 | 3ae9501c | Marcelo Tosatti | return !vm_running || env->stopped;
|
97 | 3ae9501c | Marcelo Tosatti | } |
98 | 3ae9501c | Marcelo Tosatti | |
99 | 296af7c9 | Blue Swirl | static void do_vm_stop(int reason) |
100 | 296af7c9 | Blue Swirl | { |
101 | 296af7c9 | Blue Swirl | if (vm_running) {
|
102 | 296af7c9 | Blue Swirl | cpu_disable_ticks(); |
103 | 296af7c9 | Blue Swirl | vm_running = 0;
|
104 | 296af7c9 | Blue Swirl | pause_all_vcpus(); |
105 | 296af7c9 | Blue Swirl | vm_state_notify(0, reason);
|
106 | 296af7c9 | Blue Swirl | monitor_protocol_event(QEVENT_STOP, NULL);
|
107 | 296af7c9 | Blue Swirl | } |
108 | 296af7c9 | Blue Swirl | } |
109 | 296af7c9 | Blue Swirl | |
110 | 296af7c9 | Blue Swirl | static int cpu_can_run(CPUState *env) |
111 | 296af7c9 | Blue Swirl | { |
112 | 296af7c9 | Blue Swirl | if (env->stop)
|
113 | 296af7c9 | Blue Swirl | return 0; |
114 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
115 | 296af7c9 | Blue Swirl | return 0; |
116 | 296af7c9 | Blue Swirl | return 1; |
117 | 296af7c9 | Blue Swirl | } |
118 | 296af7c9 | Blue Swirl | |
119 | 296af7c9 | Blue Swirl | static int cpu_has_work(CPUState *env) |
120 | 296af7c9 | Blue Swirl | { |
121 | 296af7c9 | Blue Swirl | if (env->stop)
|
122 | 296af7c9 | Blue Swirl | return 1; |
123 | e82bcec2 | Marcelo Tosatti | if (env->queued_work_first)
|
124 | e82bcec2 | Marcelo Tosatti | return 1; |
125 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
126 | 296af7c9 | Blue Swirl | return 0; |
127 | 296af7c9 | Blue Swirl | if (!env->halted)
|
128 | 296af7c9 | Blue Swirl | return 1; |
129 | 296af7c9 | Blue Swirl | if (qemu_cpu_has_work(env))
|
130 | 296af7c9 | Blue Swirl | return 1; |
131 | 296af7c9 | Blue Swirl | return 0; |
132 | 296af7c9 | Blue Swirl | } |
133 | 296af7c9 | Blue Swirl | |
134 | 296af7c9 | Blue Swirl | static int tcg_has_work(void) |
135 | 296af7c9 | Blue Swirl | { |
136 | 296af7c9 | Blue Swirl | CPUState *env; |
137 | 296af7c9 | Blue Swirl | |
138 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
139 | 296af7c9 | Blue Swirl | if (cpu_has_work(env))
|
140 | 296af7c9 | Blue Swirl | return 1; |
141 | 296af7c9 | Blue Swirl | return 0; |
142 | 296af7c9 | Blue Swirl | } |
143 | 296af7c9 | Blue Swirl | |
144 | 296af7c9 | Blue Swirl | #ifndef _WIN32
|
145 | 296af7c9 | Blue Swirl | static int io_thread_fd = -1; |
146 | 296af7c9 | Blue Swirl | |
147 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
148 | 296af7c9 | Blue Swirl | { |
149 | 296af7c9 | Blue Swirl | /* Write 8 bytes to be compatible with eventfd. */
|
150 | 26a82330 | Blue Swirl | static const uint64_t val = 1; |
151 | 296af7c9 | Blue Swirl | ssize_t ret; |
152 | 296af7c9 | Blue Swirl | |
153 | 296af7c9 | Blue Swirl | if (io_thread_fd == -1) |
154 | 296af7c9 | Blue Swirl | return;
|
155 | 296af7c9 | Blue Swirl | |
156 | 296af7c9 | Blue Swirl | do {
|
157 | 296af7c9 | Blue Swirl | ret = write(io_thread_fd, &val, sizeof(val));
|
158 | 296af7c9 | Blue Swirl | } while (ret < 0 && errno == EINTR); |
159 | 296af7c9 | Blue Swirl | |
160 | 296af7c9 | Blue Swirl | /* EAGAIN is fine, a read must be pending. */
|
161 | 296af7c9 | Blue Swirl | if (ret < 0 && errno != EAGAIN) { |
162 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
163 | 296af7c9 | Blue Swirl | strerror(errno)); |
164 | 296af7c9 | Blue Swirl | exit (1);
|
165 | 296af7c9 | Blue Swirl | } |
166 | 296af7c9 | Blue Swirl | } |
167 | 296af7c9 | Blue Swirl | |
168 | 296af7c9 | Blue Swirl | static void qemu_event_read(void *opaque) |
169 | 296af7c9 | Blue Swirl | { |
170 | 296af7c9 | Blue Swirl | int fd = (unsigned long)opaque; |
171 | 296af7c9 | Blue Swirl | ssize_t len; |
172 | 296af7c9 | Blue Swirl | char buffer[512]; |
173 | 296af7c9 | Blue Swirl | |
174 | 296af7c9 | Blue Swirl | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
175 | 296af7c9 | Blue Swirl | do {
|
176 | 296af7c9 | Blue Swirl | len = read(fd, buffer, sizeof(buffer));
|
177 | 296af7c9 | Blue Swirl | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
178 | 296af7c9 | Blue Swirl | } |
179 | 296af7c9 | Blue Swirl | |
180 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
181 | 296af7c9 | Blue Swirl | { |
182 | 296af7c9 | Blue Swirl | int err;
|
183 | 296af7c9 | Blue Swirl | int fds[2]; |
184 | 296af7c9 | Blue Swirl | |
185 | 296af7c9 | Blue Swirl | err = qemu_eventfd(fds); |
186 | 296af7c9 | Blue Swirl | if (err == -1) |
187 | 296af7c9 | Blue Swirl | return -errno;
|
188 | 296af7c9 | Blue Swirl | |
189 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[0], O_NONBLOCK);
|
190 | 296af7c9 | Blue Swirl | if (err < 0) |
191 | 296af7c9 | Blue Swirl | goto fail;
|
192 | 296af7c9 | Blue Swirl | |
193 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[1], O_NONBLOCK);
|
194 | 296af7c9 | Blue Swirl | if (err < 0) |
195 | 296af7c9 | Blue Swirl | goto fail;
|
196 | 296af7c9 | Blue Swirl | |
197 | 296af7c9 | Blue Swirl | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
198 | 296af7c9 | Blue Swirl | (void *)(unsigned long)fds[0]); |
199 | 296af7c9 | Blue Swirl | |
200 | 296af7c9 | Blue Swirl | io_thread_fd = fds[1];
|
201 | 296af7c9 | Blue Swirl | return 0; |
202 | 296af7c9 | Blue Swirl | |
203 | 296af7c9 | Blue Swirl | fail:
|
204 | 296af7c9 | Blue Swirl | close(fds[0]);
|
205 | 296af7c9 | Blue Swirl | close(fds[1]);
|
206 | 296af7c9 | Blue Swirl | return err;
|
207 | 296af7c9 | Blue Swirl | } |
208 | 296af7c9 | Blue Swirl | #else
|
209 | 296af7c9 | Blue Swirl | HANDLE qemu_event_handle; |
210 | 296af7c9 | Blue Swirl | |
211 | 296af7c9 | Blue Swirl | static void dummy_event_handler(void *opaque) |
212 | 296af7c9 | Blue Swirl | { |
213 | 296af7c9 | Blue Swirl | } |
214 | 296af7c9 | Blue Swirl | |
215 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
216 | 296af7c9 | Blue Swirl | { |
217 | 296af7c9 | Blue Swirl | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
218 | 296af7c9 | Blue Swirl | if (!qemu_event_handle) {
|
219 | 296af7c9 | Blue Swirl | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
220 | 296af7c9 | Blue Swirl | return -1; |
221 | 296af7c9 | Blue Swirl | } |
222 | 296af7c9 | Blue Swirl | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
223 | 296af7c9 | Blue Swirl | return 0; |
224 | 296af7c9 | Blue Swirl | } |
225 | 296af7c9 | Blue Swirl | |
226 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
227 | 296af7c9 | Blue Swirl | { |
228 | 296af7c9 | Blue Swirl | if (!SetEvent(qemu_event_handle)) {
|
229 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
230 | 296af7c9 | Blue Swirl | GetLastError()); |
231 | 296af7c9 | Blue Swirl | exit (1);
|
232 | 296af7c9 | Blue Swirl | } |
233 | 296af7c9 | Blue Swirl | } |
234 | 296af7c9 | Blue Swirl | #endif
|
235 | 296af7c9 | Blue Swirl | |
236 | 296af7c9 | Blue Swirl | #ifndef CONFIG_IOTHREAD
|
237 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
238 | 296af7c9 | Blue Swirl | { |
239 | 296af7c9 | Blue Swirl | return qemu_event_init();
|
240 | 296af7c9 | Blue Swirl | } |
241 | 296af7c9 | Blue Swirl | |
242 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
243 | 7277e027 | Blue Swirl | { |
244 | 7277e027 | Blue Swirl | } |
245 | 7277e027 | Blue Swirl | |
246 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
247 | 296af7c9 | Blue Swirl | { |
248 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
249 | 296af7c9 | Blue Swirl | |
250 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
251 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
252 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
253 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
254 | 296af7c9 | Blue Swirl | return;
|
255 | 296af7c9 | Blue Swirl | } |
256 | 296af7c9 | Blue Swirl | |
257 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *env) |
258 | 296af7c9 | Blue Swirl | { |
259 | 296af7c9 | Blue Swirl | return 1; |
260 | 296af7c9 | Blue Swirl | } |
261 | 296af7c9 | Blue Swirl | |
262 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
263 | e82bcec2 | Marcelo Tosatti | { |
264 | e82bcec2 | Marcelo Tosatti | func(data); |
265 | e82bcec2 | Marcelo Tosatti | } |
266 | e82bcec2 | Marcelo Tosatti | |
267 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
268 | 296af7c9 | Blue Swirl | { |
269 | 296af7c9 | Blue Swirl | } |
270 | 296af7c9 | Blue Swirl | |
271 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
272 | 296af7c9 | Blue Swirl | { |
273 | 296af7c9 | Blue Swirl | } |
274 | 296af7c9 | Blue Swirl | |
275 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *env) |
276 | 296af7c9 | Blue Swirl | { |
277 | 296af7c9 | Blue Swirl | return;
|
278 | 296af7c9 | Blue Swirl | } |
279 | 296af7c9 | Blue Swirl | |
280 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
281 | 296af7c9 | Blue Swirl | { |
282 | 296af7c9 | Blue Swirl | CPUState *env = cpu_single_env; |
283 | 296af7c9 | Blue Swirl | |
284 | 296af7c9 | Blue Swirl | qemu_event_increment (); |
285 | 296af7c9 | Blue Swirl | if (env) {
|
286 | 296af7c9 | Blue Swirl | cpu_exit(env); |
287 | 296af7c9 | Blue Swirl | } |
288 | 296af7c9 | Blue Swirl | if (next_cpu && env != next_cpu) {
|
289 | 296af7c9 | Blue Swirl | cpu_exit(next_cpu); |
290 | 296af7c9 | Blue Swirl | } |
291 | 296af7c9 | Blue Swirl | } |
292 | 296af7c9 | Blue Swirl | |
293 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) {} |
294 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) {} |
295 | 296af7c9 | Blue Swirl | |
296 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
297 | 296af7c9 | Blue Swirl | { |
298 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
299 | 296af7c9 | Blue Swirl | } |
300 | 296af7c9 | Blue Swirl | |
301 | 296af7c9 | Blue Swirl | #else /* CONFIG_IOTHREAD */ |
302 | 296af7c9 | Blue Swirl | |
303 | 296af7c9 | Blue Swirl | #include "qemu-thread.h" |
304 | 296af7c9 | Blue Swirl | |
305 | 296af7c9 | Blue Swirl | QemuMutex qemu_global_mutex; |
306 | 296af7c9 | Blue Swirl | static QemuMutex qemu_fair_mutex;
|
307 | 296af7c9 | Blue Swirl | |
308 | 296af7c9 | Blue Swirl | static QemuThread io_thread;
|
309 | 296af7c9 | Blue Swirl | |
310 | 296af7c9 | Blue Swirl | static QemuThread *tcg_cpu_thread;
|
311 | 296af7c9 | Blue Swirl | static QemuCond *tcg_halt_cond;
|
312 | 296af7c9 | Blue Swirl | |
313 | 296af7c9 | Blue Swirl | static int qemu_system_ready; |
314 | 296af7c9 | Blue Swirl | /* cpu creation */
|
315 | 296af7c9 | Blue Swirl | static QemuCond qemu_cpu_cond;
|
316 | 296af7c9 | Blue Swirl | /* system init */
|
317 | 296af7c9 | Blue Swirl | static QemuCond qemu_system_cond;
|
318 | 296af7c9 | Blue Swirl | static QemuCond qemu_pause_cond;
|
319 | e82bcec2 | Marcelo Tosatti | static QemuCond qemu_work_cond;
|
320 | 296af7c9 | Blue Swirl | |
321 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void); |
322 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env); |
323 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void); |
324 | 296af7c9 | Blue Swirl | |
325 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
326 | 296af7c9 | Blue Swirl | { |
327 | 296af7c9 | Blue Swirl | int ret;
|
328 | 296af7c9 | Blue Swirl | |
329 | 296af7c9 | Blue Swirl | ret = qemu_event_init(); |
330 | 296af7c9 | Blue Swirl | if (ret)
|
331 | 296af7c9 | Blue Swirl | return ret;
|
332 | 296af7c9 | Blue Swirl | |
333 | 296af7c9 | Blue Swirl | qemu_cond_init(&qemu_pause_cond); |
334 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_fair_mutex); |
335 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_global_mutex); |
336 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
337 | 296af7c9 | Blue Swirl | |
338 | 296af7c9 | Blue Swirl | unblock_io_signals(); |
339 | 296af7c9 | Blue Swirl | qemu_thread_self(&io_thread); |
340 | 296af7c9 | Blue Swirl | |
341 | 296af7c9 | Blue Swirl | return 0; |
342 | 296af7c9 | Blue Swirl | } |
343 | 296af7c9 | Blue Swirl | |
344 | 7277e027 | Blue Swirl | void qemu_main_loop_start(void) |
345 | 7277e027 | Blue Swirl | { |
346 | 7277e027 | Blue Swirl | qemu_system_ready = 1;
|
347 | 7277e027 | Blue Swirl | qemu_cond_broadcast(&qemu_system_cond); |
348 | 7277e027 | Blue Swirl | } |
349 | 7277e027 | Blue Swirl | |
350 | e82bcec2 | Marcelo Tosatti | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
351 | e82bcec2 | Marcelo Tosatti | { |
352 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item wi;
|
353 | e82bcec2 | Marcelo Tosatti | |
354 | e82bcec2 | Marcelo Tosatti | if (qemu_cpu_self(env)) {
|
355 | e82bcec2 | Marcelo Tosatti | func(data); |
356 | e82bcec2 | Marcelo Tosatti | return;
|
357 | e82bcec2 | Marcelo Tosatti | } |
358 | e82bcec2 | Marcelo Tosatti | |
359 | e82bcec2 | Marcelo Tosatti | wi.func = func; |
360 | e82bcec2 | Marcelo Tosatti | wi.data = data; |
361 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
362 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = &wi; |
363 | e82bcec2 | Marcelo Tosatti | else
|
364 | e82bcec2 | Marcelo Tosatti | env->queued_work_last->next = &wi; |
365 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = &wi; |
366 | e82bcec2 | Marcelo Tosatti | wi.next = NULL;
|
367 | e82bcec2 | Marcelo Tosatti | wi.done = false;
|
368 | e82bcec2 | Marcelo Tosatti | |
369 | e82bcec2 | Marcelo Tosatti | qemu_cpu_kick(env); |
370 | e82bcec2 | Marcelo Tosatti | while (!wi.done) {
|
371 | e82bcec2 | Marcelo Tosatti | CPUState *self_env = cpu_single_env; |
372 | e82bcec2 | Marcelo Tosatti | |
373 | e82bcec2 | Marcelo Tosatti | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); |
374 | e82bcec2 | Marcelo Tosatti | cpu_single_env = self_env; |
375 | e82bcec2 | Marcelo Tosatti | } |
376 | e82bcec2 | Marcelo Tosatti | } |
377 | e82bcec2 | Marcelo Tosatti | |
378 | e82bcec2 | Marcelo Tosatti | static void flush_queued_work(CPUState *env) |
379 | e82bcec2 | Marcelo Tosatti | { |
380 | e82bcec2 | Marcelo Tosatti | struct qemu_work_item *wi;
|
381 | e82bcec2 | Marcelo Tosatti | |
382 | e82bcec2 | Marcelo Tosatti | if (!env->queued_work_first)
|
383 | e82bcec2 | Marcelo Tosatti | return;
|
384 | e82bcec2 | Marcelo Tosatti | |
385 | e82bcec2 | Marcelo Tosatti | while ((wi = env->queued_work_first)) {
|
386 | e82bcec2 | Marcelo Tosatti | env->queued_work_first = wi->next; |
387 | e82bcec2 | Marcelo Tosatti | wi->func(wi->data); |
388 | e82bcec2 | Marcelo Tosatti | wi->done = true;
|
389 | e82bcec2 | Marcelo Tosatti | } |
390 | e82bcec2 | Marcelo Tosatti | env->queued_work_last = NULL;
|
391 | e82bcec2 | Marcelo Tosatti | qemu_cond_broadcast(&qemu_work_cond); |
392 | e82bcec2 | Marcelo Tosatti | } |
393 | e82bcec2 | Marcelo Tosatti | |
394 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event_common(CPUState *env) |
395 | 296af7c9 | Blue Swirl | { |
396 | 296af7c9 | Blue Swirl | if (env->stop) {
|
397 | 296af7c9 | Blue Swirl | env->stop = 0;
|
398 | 296af7c9 | Blue Swirl | env->stopped = 1;
|
399 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_pause_cond); |
400 | 296af7c9 | Blue Swirl | } |
401 | e82bcec2 | Marcelo Tosatti | flush_queued_work(env); |
402 | 296af7c9 | Blue Swirl | } |
403 | 296af7c9 | Blue Swirl | |
404 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event(CPUState *env) |
405 | 296af7c9 | Blue Swirl | { |
406 | 296af7c9 | Blue Swirl | while (!tcg_has_work())
|
407 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
408 | 296af7c9 | Blue Swirl | |
409 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
410 | 296af7c9 | Blue Swirl | |
411 | 296af7c9 | Blue Swirl | /*
|
412 | 296af7c9 | Blue Swirl | * Users of qemu_global_mutex can be starved, having no chance
|
413 | 296af7c9 | Blue Swirl | * to acquire it since this path will get to it first.
|
414 | 296af7c9 | Blue Swirl | * So use another lock to provide fairness.
|
415 | 296af7c9 | Blue Swirl | */
|
416 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
417 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
418 | 296af7c9 | Blue Swirl | |
419 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
420 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
421 | 296af7c9 | Blue Swirl | } |
422 | 296af7c9 | Blue Swirl | |
423 | 296af7c9 | Blue Swirl | static void qemu_kvm_eat_signal(CPUState *env, int timeout) |
424 | 296af7c9 | Blue Swirl | { |
425 | 296af7c9 | Blue Swirl | struct timespec ts;
|
426 | 296af7c9 | Blue Swirl | int r, e;
|
427 | 296af7c9 | Blue Swirl | siginfo_t siginfo; |
428 | 296af7c9 | Blue Swirl | sigset_t waitset; |
429 | 296af7c9 | Blue Swirl | |
430 | 296af7c9 | Blue Swirl | ts.tv_sec = timeout / 1000;
|
431 | 296af7c9 | Blue Swirl | ts.tv_nsec = (timeout % 1000) * 1000000; |
432 | 296af7c9 | Blue Swirl | |
433 | 296af7c9 | Blue Swirl | sigemptyset(&waitset); |
434 | 296af7c9 | Blue Swirl | sigaddset(&waitset, SIG_IPI); |
435 | 296af7c9 | Blue Swirl | |
436 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
437 | 296af7c9 | Blue Swirl | r = sigtimedwait(&waitset, &siginfo, &ts); |
438 | 296af7c9 | Blue Swirl | e = errno; |
439 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
440 | 296af7c9 | Blue Swirl | |
441 | 296af7c9 | Blue Swirl | if (r == -1 && !(e == EAGAIN || e == EINTR)) { |
442 | 296af7c9 | Blue Swirl | fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
|
443 | 296af7c9 | Blue Swirl | exit(1);
|
444 | 296af7c9 | Blue Swirl | } |
445 | 296af7c9 | Blue Swirl | } |
446 | 296af7c9 | Blue Swirl | |
447 | 296af7c9 | Blue Swirl | static void qemu_kvm_wait_io_event(CPUState *env) |
448 | 296af7c9 | Blue Swirl | { |
449 | 296af7c9 | Blue Swirl | while (!cpu_has_work(env))
|
450 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
451 | 296af7c9 | Blue Swirl | |
452 | 296af7c9 | Blue Swirl | qemu_kvm_eat_signal(env, 0);
|
453 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
454 | 296af7c9 | Blue Swirl | } |
455 | 296af7c9 | Blue Swirl | |
456 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env); |
457 | 296af7c9 | Blue Swirl | |
458 | 296af7c9 | Blue Swirl | static void *kvm_cpu_thread_fn(void *arg) |
459 | 296af7c9 | Blue Swirl | { |
460 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
461 | 296af7c9 | Blue Swirl | |
462 | 6164e6d6 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
463 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
464 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
465 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
466 | 296af7c9 | Blue Swirl | |
467 | 296af7c9 | Blue Swirl | kvm_block_io_signals(env); |
468 | 296af7c9 | Blue Swirl | |
469 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
470 | 296af7c9 | Blue Swirl | env->created = 1;
|
471 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
472 | 296af7c9 | Blue Swirl | |
473 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
474 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
475 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
476 | 296af7c9 | Blue Swirl | |
477 | 296af7c9 | Blue Swirl | while (1) { |
478 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
479 | 296af7c9 | Blue Swirl | qemu_cpu_exec(env); |
480 | 296af7c9 | Blue Swirl | qemu_kvm_wait_io_event(env); |
481 | 296af7c9 | Blue Swirl | } |
482 | 296af7c9 | Blue Swirl | |
483 | 296af7c9 | Blue Swirl | return NULL; |
484 | 296af7c9 | Blue Swirl | } |
485 | 296af7c9 | Blue Swirl | |
486 | 296af7c9 | Blue Swirl | static void *tcg_cpu_thread_fn(void *arg) |
487 | 296af7c9 | Blue Swirl | { |
488 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
489 | 296af7c9 | Blue Swirl | |
490 | 296af7c9 | Blue Swirl | tcg_block_io_signals(); |
491 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
492 | 296af7c9 | Blue Swirl | |
493 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
494 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
495 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
496 | 296af7c9 | Blue Swirl | env->created = 1;
|
497 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
498 | 296af7c9 | Blue Swirl | |
499 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
500 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
501 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
502 | 296af7c9 | Blue Swirl | |
503 | 296af7c9 | Blue Swirl | while (1) { |
504 | 296af7c9 | Blue Swirl | tcg_cpu_exec(); |
505 | 296af7c9 | Blue Swirl | qemu_wait_io_event(cur_cpu); |
506 | 296af7c9 | Blue Swirl | } |
507 | 296af7c9 | Blue Swirl | |
508 | 296af7c9 | Blue Swirl | return NULL; |
509 | 296af7c9 | Blue Swirl | } |
510 | 296af7c9 | Blue Swirl | |
511 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *_env) |
512 | 296af7c9 | Blue Swirl | { |
513 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
514 | 296af7c9 | Blue Swirl | qemu_cond_broadcast(env->halt_cond); |
515 | 1fbb22e5 | Marcelo Tosatti | qemu_thread_signal(env->thread, SIG_IPI); |
516 | 296af7c9 | Blue Swirl | } |
517 | 296af7c9 | Blue Swirl | |
518 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *_env) |
519 | 296af7c9 | Blue Swirl | { |
520 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
521 | 296af7c9 | Blue Swirl | QemuThread this; |
522 | 296af7c9 | Blue Swirl | |
523 | 296af7c9 | Blue Swirl | qemu_thread_self(&this); |
524 | 296af7c9 | Blue Swirl | |
525 | 296af7c9 | Blue Swirl | return qemu_thread_equal(&this, env->thread);
|
526 | 296af7c9 | Blue Swirl | } |
527 | 296af7c9 | Blue Swirl | |
528 | 296af7c9 | Blue Swirl | static void cpu_signal(int sig) |
529 | 296af7c9 | Blue Swirl | { |
530 | 296af7c9 | Blue Swirl | if (cpu_single_env)
|
531 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
532 | 1a28cac3 | Marcelo Tosatti | exit_request = 1;
|
533 | 296af7c9 | Blue Swirl | } |
534 | 296af7c9 | Blue Swirl | |
535 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void) |
536 | 296af7c9 | Blue Swirl | { |
537 | 296af7c9 | Blue Swirl | sigset_t set; |
538 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
539 | 296af7c9 | Blue Swirl | |
540 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
541 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
542 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
543 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
544 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
545 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
546 | 296af7c9 | Blue Swirl | |
547 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
548 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
549 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
550 | 296af7c9 | Blue Swirl | |
551 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
552 | 296af7c9 | Blue Swirl | sigact.sa_handler = cpu_signal; |
553 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
554 | 296af7c9 | Blue Swirl | } |
555 | 296af7c9 | Blue Swirl | |
556 | 296af7c9 | Blue Swirl | static void dummy_signal(int sig) |
557 | 296af7c9 | Blue Swirl | { |
558 | 296af7c9 | Blue Swirl | } |
559 | 296af7c9 | Blue Swirl | |
560 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env) |
561 | 296af7c9 | Blue Swirl | { |
562 | 296af7c9 | Blue Swirl | int r;
|
563 | 296af7c9 | Blue Swirl | sigset_t set; |
564 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
565 | 296af7c9 | Blue Swirl | |
566 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
567 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
568 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
569 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
570 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
571 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
572 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
573 | 296af7c9 | Blue Swirl | |
574 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, NULL, &set);
|
575 | 296af7c9 | Blue Swirl | sigdelset(&set, SIG_IPI); |
576 | 296af7c9 | Blue Swirl | |
577 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
578 | 296af7c9 | Blue Swirl | sigact.sa_handler = dummy_signal; |
579 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
580 | 296af7c9 | Blue Swirl | |
581 | 296af7c9 | Blue Swirl | r = kvm_set_signal_mask(env, &set); |
582 | 296af7c9 | Blue Swirl | if (r) {
|
583 | 296af7c9 | Blue Swirl | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
|
584 | 296af7c9 | Blue Swirl | exit(1);
|
585 | 296af7c9 | Blue Swirl | } |
586 | 296af7c9 | Blue Swirl | } |
587 | 296af7c9 | Blue Swirl | |
588 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void) |
589 | 296af7c9 | Blue Swirl | { |
590 | 296af7c9 | Blue Swirl | sigset_t set; |
591 | 296af7c9 | Blue Swirl | |
592 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
593 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
594 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
595 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
596 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
597 | 296af7c9 | Blue Swirl | |
598 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
599 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
600 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
601 | 296af7c9 | Blue Swirl | } |
602 | 296af7c9 | Blue Swirl | |
603 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) |
604 | 296af7c9 | Blue Swirl | { |
605 | 296af7c9 | Blue Swirl | if (kvm_enabled()) {
|
606 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
607 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
608 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
609 | 1a28cac3 | Marcelo Tosatti | } else {
|
610 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_fair_mutex); |
611 | 1a28cac3 | Marcelo Tosatti | if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
612 | 1a28cac3 | Marcelo Tosatti | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
613 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_lock(&qemu_global_mutex); |
614 | 1a28cac3 | Marcelo Tosatti | } |
615 | 1a28cac3 | Marcelo Tosatti | qemu_mutex_unlock(&qemu_fair_mutex); |
616 | 1a28cac3 | Marcelo Tosatti | } |
617 | 296af7c9 | Blue Swirl | } |
618 | 296af7c9 | Blue Swirl | |
619 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) |
620 | 296af7c9 | Blue Swirl | { |
621 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
622 | 296af7c9 | Blue Swirl | } |
623 | 296af7c9 | Blue Swirl | |
624 | 296af7c9 | Blue Swirl | static int all_vcpus_paused(void) |
625 | 296af7c9 | Blue Swirl | { |
626 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
627 | 296af7c9 | Blue Swirl | |
628 | 296af7c9 | Blue Swirl | while (penv) {
|
629 | 296af7c9 | Blue Swirl | if (!penv->stopped)
|
630 | 296af7c9 | Blue Swirl | return 0; |
631 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
632 | 296af7c9 | Blue Swirl | } |
633 | 296af7c9 | Blue Swirl | |
634 | 296af7c9 | Blue Swirl | return 1; |
635 | 296af7c9 | Blue Swirl | } |
636 | 296af7c9 | Blue Swirl | |
637 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
638 | 296af7c9 | Blue Swirl | { |
639 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
640 | 296af7c9 | Blue Swirl | |
641 | 296af7c9 | Blue Swirl | while (penv) {
|
642 | 296af7c9 | Blue Swirl | penv->stop = 1;
|
643 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
644 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
645 | 296af7c9 | Blue Swirl | } |
646 | 296af7c9 | Blue Swirl | |
647 | 296af7c9 | Blue Swirl | while (!all_vcpus_paused()) {
|
648 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
649 | 296af7c9 | Blue Swirl | penv = first_cpu; |
650 | 296af7c9 | Blue Swirl | while (penv) {
|
651 | 1fbb22e5 | Marcelo Tosatti | qemu_cpu_kick(penv); |
652 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
653 | 296af7c9 | Blue Swirl | } |
654 | 296af7c9 | Blue Swirl | } |
655 | 296af7c9 | Blue Swirl | } |
656 | 296af7c9 | Blue Swirl | |
657 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
658 | 296af7c9 | Blue Swirl | { |
659 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
660 | 296af7c9 | Blue Swirl | |
661 | 296af7c9 | Blue Swirl | while (penv) {
|
662 | 296af7c9 | Blue Swirl | penv->stop = 0;
|
663 | 296af7c9 | Blue Swirl | penv->stopped = 0;
|
664 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
665 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
666 | 296af7c9 | Blue Swirl | } |
667 | 296af7c9 | Blue Swirl | } |
668 | 296af7c9 | Blue Swirl | |
669 | 296af7c9 | Blue Swirl | static void tcg_init_vcpu(void *_env) |
670 | 296af7c9 | Blue Swirl | { |
671 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
672 | 296af7c9 | Blue Swirl | /* share a single thread for all cpus with TCG */
|
673 | 296af7c9 | Blue Swirl | if (!tcg_cpu_thread) {
|
674 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
675 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
676 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
677 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); |
678 | 296af7c9 | Blue Swirl | while (env->created == 0) |
679 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
680 | 296af7c9 | Blue Swirl | tcg_cpu_thread = env->thread; |
681 | 296af7c9 | Blue Swirl | tcg_halt_cond = env->halt_cond; |
682 | 296af7c9 | Blue Swirl | } else {
|
683 | 296af7c9 | Blue Swirl | env->thread = tcg_cpu_thread; |
684 | 296af7c9 | Blue Swirl | env->halt_cond = tcg_halt_cond; |
685 | 296af7c9 | Blue Swirl | } |
686 | 296af7c9 | Blue Swirl | } |
687 | 296af7c9 | Blue Swirl | |
688 | 296af7c9 | Blue Swirl | static void kvm_start_vcpu(CPUState *env) |
689 | 296af7c9 | Blue Swirl | { |
690 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
691 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
692 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
693 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); |
694 | 296af7c9 | Blue Swirl | while (env->created == 0) |
695 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
696 | 296af7c9 | Blue Swirl | } |
697 | 296af7c9 | Blue Swirl | |
698 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
699 | 296af7c9 | Blue Swirl | { |
700 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
701 | 296af7c9 | Blue Swirl | |
702 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
703 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
704 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
705 | 296af7c9 | Blue Swirl | kvm_start_vcpu(env); |
706 | 296af7c9 | Blue Swirl | else
|
707 | 296af7c9 | Blue Swirl | tcg_init_vcpu(env); |
708 | 296af7c9 | Blue Swirl | } |
709 | 296af7c9 | Blue Swirl | |
710 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
711 | 296af7c9 | Blue Swirl | { |
712 | 296af7c9 | Blue Swirl | qemu_event_increment(); |
713 | 296af7c9 | Blue Swirl | } |
714 | 296af7c9 | Blue Swirl | |
715 | 296af7c9 | Blue Swirl | static void qemu_system_vmstop_request(int reason) |
716 | 296af7c9 | Blue Swirl | { |
717 | 296af7c9 | Blue Swirl | vmstop_requested = reason; |
718 | 296af7c9 | Blue Swirl | qemu_notify_event(); |
719 | 296af7c9 | Blue Swirl | } |
720 | 296af7c9 | Blue Swirl | |
721 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
722 | 296af7c9 | Blue Swirl | { |
723 | 296af7c9 | Blue Swirl | QemuThread me; |
724 | 296af7c9 | Blue Swirl | qemu_thread_self(&me); |
725 | 296af7c9 | Blue Swirl | |
726 | 296af7c9 | Blue Swirl | if (!qemu_thread_equal(&me, &io_thread)) {
|
727 | 296af7c9 | Blue Swirl | qemu_system_vmstop_request(reason); |
728 | 296af7c9 | Blue Swirl | /*
|
729 | 296af7c9 | Blue Swirl | * FIXME: should not return to device code in case
|
730 | 296af7c9 | Blue Swirl | * vm_stop() has been requested.
|
731 | 296af7c9 | Blue Swirl | */
|
732 | 296af7c9 | Blue Swirl | if (cpu_single_env) {
|
733 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
734 | 296af7c9 | Blue Swirl | cpu_single_env->stop = 1;
|
735 | 296af7c9 | Blue Swirl | } |
736 | 296af7c9 | Blue Swirl | return;
|
737 | 296af7c9 | Blue Swirl | } |
738 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
739 | 296af7c9 | Blue Swirl | } |
740 | 296af7c9 | Blue Swirl | |
741 | 296af7c9 | Blue Swirl | #endif
|
742 | 296af7c9 | Blue Swirl | |
743 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env) |
744 | 296af7c9 | Blue Swirl | { |
745 | 296af7c9 | Blue Swirl | int ret;
|
746 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
747 | 296af7c9 | Blue Swirl | int64_t ti; |
748 | 296af7c9 | Blue Swirl | #endif
|
749 | 296af7c9 | Blue Swirl | |
750 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
751 | 296af7c9 | Blue Swirl | ti = profile_getclock(); |
752 | 296af7c9 | Blue Swirl | #endif
|
753 | 296af7c9 | Blue Swirl | if (use_icount) {
|
754 | 296af7c9 | Blue Swirl | int64_t count; |
755 | 296af7c9 | Blue Swirl | int decr;
|
756 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
757 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = 0;
|
758 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
759 | 296af7c9 | Blue Swirl | count = qemu_icount_round (qemu_next_deadline()); |
760 | 296af7c9 | Blue Swirl | qemu_icount += count; |
761 | 296af7c9 | Blue Swirl | decr = (count > 0xffff) ? 0xffff : count; |
762 | 296af7c9 | Blue Swirl | count -= decr; |
763 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = decr; |
764 | 296af7c9 | Blue Swirl | env->icount_extra = count; |
765 | 296af7c9 | Blue Swirl | } |
766 | 296af7c9 | Blue Swirl | ret = cpu_exec(env); |
767 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
768 | 296af7c9 | Blue Swirl | qemu_time += profile_getclock() - ti; |
769 | 296af7c9 | Blue Swirl | #endif
|
770 | 296af7c9 | Blue Swirl | if (use_icount) {
|
771 | 296af7c9 | Blue Swirl | /* Fold pending instructions back into the
|
772 | 296af7c9 | Blue Swirl | instruction counter, and clear the interrupt flag. */
|
773 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low |
774 | 296af7c9 | Blue Swirl | + env->icount_extra); |
775 | 296af7c9 | Blue Swirl | env->icount_decr.u32 = 0;
|
776 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
777 | 296af7c9 | Blue Swirl | } |
778 | 296af7c9 | Blue Swirl | return ret;
|
779 | 296af7c9 | Blue Swirl | } |
780 | 296af7c9 | Blue Swirl | |
781 | 296af7c9 | Blue Swirl | bool tcg_cpu_exec(void) |
782 | 296af7c9 | Blue Swirl | { |
783 | 296af7c9 | Blue Swirl | int ret = 0; |
784 | 296af7c9 | Blue Swirl | |
785 | 296af7c9 | Blue Swirl | if (next_cpu == NULL) |
786 | 296af7c9 | Blue Swirl | next_cpu = first_cpu; |
787 | 296af7c9 | Blue Swirl | for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { |
788 | 296af7c9 | Blue Swirl | CPUState *env = cur_cpu = next_cpu; |
789 | 296af7c9 | Blue Swirl | |
790 | 296af7c9 | Blue Swirl | qemu_clock_enable(vm_clock, |
791 | 296af7c9 | Blue Swirl | (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
792 | 296af7c9 | Blue Swirl | |
793 | 296af7c9 | Blue Swirl | if (qemu_alarm_pending())
|
794 | 296af7c9 | Blue Swirl | break;
|
795 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
796 | 296af7c9 | Blue Swirl | ret = qemu_cpu_exec(env); |
797 | 296af7c9 | Blue Swirl | else if (env->stop) |
798 | 296af7c9 | Blue Swirl | break;
|
799 | 296af7c9 | Blue Swirl | |
800 | 296af7c9 | Blue Swirl | if (ret == EXCP_DEBUG) {
|
801 | 296af7c9 | Blue Swirl | gdb_set_stop_cpu(env); |
802 | 296af7c9 | Blue Swirl | debug_requested = EXCP_DEBUG; |
803 | 296af7c9 | Blue Swirl | break;
|
804 | 296af7c9 | Blue Swirl | } |
805 | 296af7c9 | Blue Swirl | } |
806 | 296af7c9 | Blue Swirl | return tcg_has_work();
|
807 | 296af7c9 | Blue Swirl | } |
808 | 296af7c9 | Blue Swirl | |
809 | 296af7c9 | Blue Swirl | void set_numa_modes(void) |
810 | 296af7c9 | Blue Swirl | { |
811 | 296af7c9 | Blue Swirl | CPUState *env; |
812 | 296af7c9 | Blue Swirl | int i;
|
813 | 296af7c9 | Blue Swirl | |
814 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
815 | 296af7c9 | Blue Swirl | for (i = 0; i < nb_numa_nodes; i++) { |
816 | 296af7c9 | Blue Swirl | if (node_cpumask[i] & (1 << env->cpu_index)) { |
817 | 296af7c9 | Blue Swirl | env->numa_node = i; |
818 | 296af7c9 | Blue Swirl | } |
819 | 296af7c9 | Blue Swirl | } |
820 | 296af7c9 | Blue Swirl | } |
821 | 296af7c9 | Blue Swirl | } |
822 | 296af7c9 | Blue Swirl | |
823 | 296af7c9 | Blue Swirl | void set_cpu_log(const char *optarg) |
824 | 296af7c9 | Blue Swirl | { |
825 | 296af7c9 | Blue Swirl | int mask;
|
826 | 296af7c9 | Blue Swirl | const CPULogItem *item;
|
827 | 296af7c9 | Blue Swirl | |
828 | 296af7c9 | Blue Swirl | mask = cpu_str_to_log_mask(optarg); |
829 | 296af7c9 | Blue Swirl | if (!mask) {
|
830 | 296af7c9 | Blue Swirl | printf("Log items (comma separated):\n");
|
831 | 296af7c9 | Blue Swirl | for (item = cpu_log_items; item->mask != 0; item++) { |
832 | 296af7c9 | Blue Swirl | printf("%-10s %s\n", item->name, item->help);
|
833 | 296af7c9 | Blue Swirl | } |
834 | 296af7c9 | Blue Swirl | exit(1);
|
835 | 296af7c9 | Blue Swirl | } |
836 | 296af7c9 | Blue Swirl | cpu_set_log(mask); |
837 | 296af7c9 | Blue Swirl | } |
838 | 29e922b6 | Blue Swirl | |
839 | 29e922b6 | Blue Swirl | /* Return the virtual CPU time, based on the instruction counter. */
|
840 | 29e922b6 | Blue Swirl | int64_t cpu_get_icount(void)
|
841 | 29e922b6 | Blue Swirl | { |
842 | 29e922b6 | Blue Swirl | int64_t icount; |
843 | 29e922b6 | Blue Swirl | CPUState *env = cpu_single_env;; |
844 | 29e922b6 | Blue Swirl | |
845 | 29e922b6 | Blue Swirl | icount = qemu_icount; |
846 | 29e922b6 | Blue Swirl | if (env) {
|
847 | 29e922b6 | Blue Swirl | if (!can_do_io(env)) {
|
848 | 29e922b6 | Blue Swirl | fprintf(stderr, "Bad clock read\n");
|
849 | 29e922b6 | Blue Swirl | } |
850 | 29e922b6 | Blue Swirl | icount -= (env->icount_decr.u16.low + env->icount_extra); |
851 | 29e922b6 | Blue Swirl | } |
852 | 29e922b6 | Blue Swirl | return qemu_icount_bias + (icount << icount_time_shift);
|
853 | 29e922b6 | Blue Swirl | } |
854 | 262353cb | Blue Swirl | |
855 | 262353cb | Blue Swirl | void list_cpus(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
856 | 262353cb | Blue Swirl | const char *optarg) |
857 | 262353cb | Blue Swirl | { |
858 | 262353cb | Blue Swirl | /* XXX: implement xxx_cpu_list for targets that still miss it */
|
859 | 262353cb | Blue Swirl | #if defined(cpu_list_id)
|
860 | 262353cb | Blue Swirl | cpu_list_id(f, cpu_fprintf, optarg); |
861 | 262353cb | Blue Swirl | #elif defined(cpu_list)
|
862 | 262353cb | Blue Swirl | cpu_list(f, cpu_fprintf); /* deprecated */
|
863 | 262353cb | Blue Swirl | #endif
|
864 | 262353cb | Blue Swirl | } |