root / cpus.c @ 55274a30
History | View | Annotate | Download (17.5 kB)
1 | 296af7c9 | Blue Swirl | /*
|
---|---|---|---|
2 | 296af7c9 | Blue Swirl | * QEMU System Emulator
|
3 | 296af7c9 | Blue Swirl | *
|
4 | 296af7c9 | Blue Swirl | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 296af7c9 | Blue Swirl | *
|
6 | 296af7c9 | Blue Swirl | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 296af7c9 | Blue Swirl | * of this software and associated documentation files (the "Software"), to deal
|
8 | 296af7c9 | Blue Swirl | * in the Software without restriction, including without limitation the rights
|
9 | 296af7c9 | Blue Swirl | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 296af7c9 | Blue Swirl | * copies of the Software, and to permit persons to whom the Software is
|
11 | 296af7c9 | Blue Swirl | * furnished to do so, subject to the following conditions:
|
12 | 296af7c9 | Blue Swirl | *
|
13 | 296af7c9 | Blue Swirl | * The above copyright notice and this permission notice shall be included in
|
14 | 296af7c9 | Blue Swirl | * all copies or substantial portions of the Software.
|
15 | 296af7c9 | Blue Swirl | *
|
16 | 296af7c9 | Blue Swirl | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 296af7c9 | Blue Swirl | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 296af7c9 | Blue Swirl | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 296af7c9 | Blue Swirl | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 296af7c9 | Blue Swirl | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 296af7c9 | Blue Swirl | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 296af7c9 | Blue Swirl | * THE SOFTWARE.
|
23 | 296af7c9 | Blue Swirl | */
|
24 | 296af7c9 | Blue Swirl | |
25 | 296af7c9 | Blue Swirl | /* Needed early for CONFIG_BSD etc. */
|
26 | 296af7c9 | Blue Swirl | #include "config-host.h" |
27 | 296af7c9 | Blue Swirl | |
28 | 296af7c9 | Blue Swirl | #include "monitor.h" |
29 | 296af7c9 | Blue Swirl | #include "sysemu.h" |
30 | 296af7c9 | Blue Swirl | #include "gdbstub.h" |
31 | 296af7c9 | Blue Swirl | #include "dma.h" |
32 | 296af7c9 | Blue Swirl | #include "kvm.h" |
33 | 296af7c9 | Blue Swirl | |
34 | 296af7c9 | Blue Swirl | #include "cpus.h" |
35 | 296af7c9 | Blue Swirl | |
36 | 296af7c9 | Blue Swirl | static CPUState *cur_cpu;
|
37 | 296af7c9 | Blue Swirl | static CPUState *next_cpu;
|
38 | 296af7c9 | Blue Swirl | |
39 | 296af7c9 | Blue Swirl | /***********************************************************/
|
40 | 296af7c9 | Blue Swirl | void hw_error(const char *fmt, ...) |
41 | 296af7c9 | Blue Swirl | { |
42 | 296af7c9 | Blue Swirl | va_list ap; |
43 | 296af7c9 | Blue Swirl | CPUState *env; |
44 | 296af7c9 | Blue Swirl | |
45 | 296af7c9 | Blue Swirl | va_start(ap, fmt); |
46 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu: hardware error: ");
|
47 | 296af7c9 | Blue Swirl | vfprintf(stderr, fmt, ap); |
48 | 296af7c9 | Blue Swirl | fprintf(stderr, "\n");
|
49 | 296af7c9 | Blue Swirl | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
50 | 296af7c9 | Blue Swirl | fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
51 | 296af7c9 | Blue Swirl | #ifdef TARGET_I386
|
52 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); |
53 | 296af7c9 | Blue Swirl | #else
|
54 | 296af7c9 | Blue Swirl | cpu_dump_state(env, stderr, fprintf, 0);
|
55 | 296af7c9 | Blue Swirl | #endif
|
56 | 296af7c9 | Blue Swirl | } |
57 | 296af7c9 | Blue Swirl | va_end(ap); |
58 | 296af7c9 | Blue Swirl | abort(); |
59 | 296af7c9 | Blue Swirl | } |
60 | 296af7c9 | Blue Swirl | |
61 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_states(void) |
62 | 296af7c9 | Blue Swirl | { |
63 | 296af7c9 | Blue Swirl | CPUState *cpu; |
64 | 296af7c9 | Blue Swirl | |
65 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
66 | 296af7c9 | Blue Swirl | cpu_synchronize_state(cpu); |
67 | 296af7c9 | Blue Swirl | } |
68 | 296af7c9 | Blue Swirl | } |
69 | 296af7c9 | Blue Swirl | |
70 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_reset(void) |
71 | 296af7c9 | Blue Swirl | { |
72 | 296af7c9 | Blue Swirl | CPUState *cpu; |
73 | 296af7c9 | Blue Swirl | |
74 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
75 | 296af7c9 | Blue Swirl | cpu_synchronize_post_reset(cpu); |
76 | 296af7c9 | Blue Swirl | } |
77 | 296af7c9 | Blue Swirl | } |
78 | 296af7c9 | Blue Swirl | |
79 | 296af7c9 | Blue Swirl | void cpu_synchronize_all_post_init(void) |
80 | 296af7c9 | Blue Swirl | { |
81 | 296af7c9 | Blue Swirl | CPUState *cpu; |
82 | 296af7c9 | Blue Swirl | |
83 | 296af7c9 | Blue Swirl | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
84 | 296af7c9 | Blue Swirl | cpu_synchronize_post_init(cpu); |
85 | 296af7c9 | Blue Swirl | } |
86 | 296af7c9 | Blue Swirl | } |
87 | 296af7c9 | Blue Swirl | |
88 | 296af7c9 | Blue Swirl | static void do_vm_stop(int reason) |
89 | 296af7c9 | Blue Swirl | { |
90 | 296af7c9 | Blue Swirl | if (vm_running) {
|
91 | 296af7c9 | Blue Swirl | cpu_disable_ticks(); |
92 | 296af7c9 | Blue Swirl | vm_running = 0;
|
93 | 296af7c9 | Blue Swirl | pause_all_vcpus(); |
94 | 296af7c9 | Blue Swirl | vm_state_notify(0, reason);
|
95 | 296af7c9 | Blue Swirl | monitor_protocol_event(QEVENT_STOP, NULL);
|
96 | 296af7c9 | Blue Swirl | } |
97 | 296af7c9 | Blue Swirl | } |
98 | 296af7c9 | Blue Swirl | |
99 | 296af7c9 | Blue Swirl | static int cpu_can_run(CPUState *env) |
100 | 296af7c9 | Blue Swirl | { |
101 | 296af7c9 | Blue Swirl | if (env->stop)
|
102 | 296af7c9 | Blue Swirl | return 0; |
103 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
104 | 296af7c9 | Blue Swirl | return 0; |
105 | 296af7c9 | Blue Swirl | return 1; |
106 | 296af7c9 | Blue Swirl | } |
107 | 296af7c9 | Blue Swirl | |
108 | 296af7c9 | Blue Swirl | static int cpu_has_work(CPUState *env) |
109 | 296af7c9 | Blue Swirl | { |
110 | 296af7c9 | Blue Swirl | if (env->stop)
|
111 | 296af7c9 | Blue Swirl | return 1; |
112 | 55274a30 | Paolo Bonzini | if (env->stopped || !vm_running)
|
113 | 296af7c9 | Blue Swirl | return 0; |
114 | 296af7c9 | Blue Swirl | if (!env->halted)
|
115 | 296af7c9 | Blue Swirl | return 1; |
116 | 296af7c9 | Blue Swirl | if (qemu_cpu_has_work(env))
|
117 | 296af7c9 | Blue Swirl | return 1; |
118 | 296af7c9 | Blue Swirl | return 0; |
119 | 296af7c9 | Blue Swirl | } |
120 | 296af7c9 | Blue Swirl | |
121 | 296af7c9 | Blue Swirl | static int tcg_has_work(void) |
122 | 296af7c9 | Blue Swirl | { |
123 | 296af7c9 | Blue Swirl | CPUState *env; |
124 | 296af7c9 | Blue Swirl | |
125 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
126 | 296af7c9 | Blue Swirl | if (cpu_has_work(env))
|
127 | 296af7c9 | Blue Swirl | return 1; |
128 | 296af7c9 | Blue Swirl | return 0; |
129 | 296af7c9 | Blue Swirl | } |
130 | 296af7c9 | Blue Swirl | |
131 | 296af7c9 | Blue Swirl | #ifndef _WIN32
|
132 | 296af7c9 | Blue Swirl | static int io_thread_fd = -1; |
133 | 296af7c9 | Blue Swirl | |
134 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
135 | 296af7c9 | Blue Swirl | { |
136 | 296af7c9 | Blue Swirl | /* Write 8 bytes to be compatible with eventfd. */
|
137 | 296af7c9 | Blue Swirl | static uint64_t val = 1; |
138 | 296af7c9 | Blue Swirl | ssize_t ret; |
139 | 296af7c9 | Blue Swirl | |
140 | 296af7c9 | Blue Swirl | if (io_thread_fd == -1) |
141 | 296af7c9 | Blue Swirl | return;
|
142 | 296af7c9 | Blue Swirl | |
143 | 296af7c9 | Blue Swirl | do {
|
144 | 296af7c9 | Blue Swirl | ret = write(io_thread_fd, &val, sizeof(val));
|
145 | 296af7c9 | Blue Swirl | } while (ret < 0 && errno == EINTR); |
146 | 296af7c9 | Blue Swirl | |
147 | 296af7c9 | Blue Swirl | /* EAGAIN is fine, a read must be pending. */
|
148 | 296af7c9 | Blue Swirl | if (ret < 0 && errno != EAGAIN) { |
149 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
|
150 | 296af7c9 | Blue Swirl | strerror(errno)); |
151 | 296af7c9 | Blue Swirl | exit (1);
|
152 | 296af7c9 | Blue Swirl | } |
153 | 296af7c9 | Blue Swirl | } |
154 | 296af7c9 | Blue Swirl | |
155 | 296af7c9 | Blue Swirl | static void qemu_event_read(void *opaque) |
156 | 296af7c9 | Blue Swirl | { |
157 | 296af7c9 | Blue Swirl | int fd = (unsigned long)opaque; |
158 | 296af7c9 | Blue Swirl | ssize_t len; |
159 | 296af7c9 | Blue Swirl | char buffer[512]; |
160 | 296af7c9 | Blue Swirl | |
161 | 296af7c9 | Blue Swirl | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
|
162 | 296af7c9 | Blue Swirl | do {
|
163 | 296af7c9 | Blue Swirl | len = read(fd, buffer, sizeof(buffer));
|
164 | 296af7c9 | Blue Swirl | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); |
165 | 296af7c9 | Blue Swirl | } |
166 | 296af7c9 | Blue Swirl | |
167 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
168 | 296af7c9 | Blue Swirl | { |
169 | 296af7c9 | Blue Swirl | int err;
|
170 | 296af7c9 | Blue Swirl | int fds[2]; |
171 | 296af7c9 | Blue Swirl | |
172 | 296af7c9 | Blue Swirl | err = qemu_eventfd(fds); |
173 | 296af7c9 | Blue Swirl | if (err == -1) |
174 | 296af7c9 | Blue Swirl | return -errno;
|
175 | 296af7c9 | Blue Swirl | |
176 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[0], O_NONBLOCK);
|
177 | 296af7c9 | Blue Swirl | if (err < 0) |
178 | 296af7c9 | Blue Swirl | goto fail;
|
179 | 296af7c9 | Blue Swirl | |
180 | 296af7c9 | Blue Swirl | err = fcntl_setfl(fds[1], O_NONBLOCK);
|
181 | 296af7c9 | Blue Swirl | if (err < 0) |
182 | 296af7c9 | Blue Swirl | goto fail;
|
183 | 296af7c9 | Blue Swirl | |
184 | 296af7c9 | Blue Swirl | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
185 | 296af7c9 | Blue Swirl | (void *)(unsigned long)fds[0]); |
186 | 296af7c9 | Blue Swirl | |
187 | 296af7c9 | Blue Swirl | io_thread_fd = fds[1];
|
188 | 296af7c9 | Blue Swirl | return 0; |
189 | 296af7c9 | Blue Swirl | |
190 | 296af7c9 | Blue Swirl | fail:
|
191 | 296af7c9 | Blue Swirl | close(fds[0]);
|
192 | 296af7c9 | Blue Swirl | close(fds[1]);
|
193 | 296af7c9 | Blue Swirl | return err;
|
194 | 296af7c9 | Blue Swirl | } |
195 | 296af7c9 | Blue Swirl | #else
|
196 | 296af7c9 | Blue Swirl | HANDLE qemu_event_handle; |
197 | 296af7c9 | Blue Swirl | |
198 | 296af7c9 | Blue Swirl | static void dummy_event_handler(void *opaque) |
199 | 296af7c9 | Blue Swirl | { |
200 | 296af7c9 | Blue Swirl | } |
201 | 296af7c9 | Blue Swirl | |
202 | 296af7c9 | Blue Swirl | static int qemu_event_init(void) |
203 | 296af7c9 | Blue Swirl | { |
204 | 296af7c9 | Blue Swirl | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); |
205 | 296af7c9 | Blue Swirl | if (!qemu_event_handle) {
|
206 | 296af7c9 | Blue Swirl | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
|
207 | 296af7c9 | Blue Swirl | return -1; |
208 | 296af7c9 | Blue Swirl | } |
209 | 296af7c9 | Blue Swirl | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
|
210 | 296af7c9 | Blue Swirl | return 0; |
211 | 296af7c9 | Blue Swirl | } |
212 | 296af7c9 | Blue Swirl | |
213 | 296af7c9 | Blue Swirl | static void qemu_event_increment(void) |
214 | 296af7c9 | Blue Swirl | { |
215 | 296af7c9 | Blue Swirl | if (!SetEvent(qemu_event_handle)) {
|
216 | 296af7c9 | Blue Swirl | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
|
217 | 296af7c9 | Blue Swirl | GetLastError()); |
218 | 296af7c9 | Blue Swirl | exit (1);
|
219 | 296af7c9 | Blue Swirl | } |
220 | 296af7c9 | Blue Swirl | } |
221 | 296af7c9 | Blue Swirl | #endif
|
222 | 296af7c9 | Blue Swirl | |
223 | 296af7c9 | Blue Swirl | #ifndef CONFIG_IOTHREAD
|
224 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
225 | 296af7c9 | Blue Swirl | { |
226 | 296af7c9 | Blue Swirl | return qemu_event_init();
|
227 | 296af7c9 | Blue Swirl | } |
228 | 296af7c9 | Blue Swirl | |
229 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
230 | 296af7c9 | Blue Swirl | { |
231 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
232 | 296af7c9 | Blue Swirl | |
233 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
234 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
235 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
236 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
237 | 296af7c9 | Blue Swirl | return;
|
238 | 296af7c9 | Blue Swirl | } |
239 | 296af7c9 | Blue Swirl | |
240 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *env) |
241 | 296af7c9 | Blue Swirl | { |
242 | 296af7c9 | Blue Swirl | return 1; |
243 | 296af7c9 | Blue Swirl | } |
244 | 296af7c9 | Blue Swirl | |
245 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
246 | 296af7c9 | Blue Swirl | { |
247 | 296af7c9 | Blue Swirl | } |
248 | 296af7c9 | Blue Swirl | |
249 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
250 | 296af7c9 | Blue Swirl | { |
251 | 296af7c9 | Blue Swirl | } |
252 | 296af7c9 | Blue Swirl | |
253 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *env) |
254 | 296af7c9 | Blue Swirl | { |
255 | 296af7c9 | Blue Swirl | return;
|
256 | 296af7c9 | Blue Swirl | } |
257 | 296af7c9 | Blue Swirl | |
258 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
259 | 296af7c9 | Blue Swirl | { |
260 | 296af7c9 | Blue Swirl | CPUState *env = cpu_single_env; |
261 | 296af7c9 | Blue Swirl | |
262 | 296af7c9 | Blue Swirl | qemu_event_increment (); |
263 | 296af7c9 | Blue Swirl | if (env) {
|
264 | 296af7c9 | Blue Swirl | cpu_exit(env); |
265 | 296af7c9 | Blue Swirl | } |
266 | 296af7c9 | Blue Swirl | if (next_cpu && env != next_cpu) {
|
267 | 296af7c9 | Blue Swirl | cpu_exit(next_cpu); |
268 | 296af7c9 | Blue Swirl | } |
269 | 296af7c9 | Blue Swirl | } |
270 | 296af7c9 | Blue Swirl | |
271 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) {} |
272 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) {} |
273 | 296af7c9 | Blue Swirl | |
274 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
275 | 296af7c9 | Blue Swirl | { |
276 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
277 | 296af7c9 | Blue Swirl | } |
278 | 296af7c9 | Blue Swirl | |
279 | 296af7c9 | Blue Swirl | #else /* CONFIG_IOTHREAD */ |
280 | 296af7c9 | Blue Swirl | |
281 | 296af7c9 | Blue Swirl | #include "qemu-thread.h" |
282 | 296af7c9 | Blue Swirl | |
283 | 296af7c9 | Blue Swirl | QemuMutex qemu_global_mutex; |
284 | 296af7c9 | Blue Swirl | static QemuMutex qemu_fair_mutex;
|
285 | 296af7c9 | Blue Swirl | |
286 | 296af7c9 | Blue Swirl | static QemuThread io_thread;
|
287 | 296af7c9 | Blue Swirl | |
288 | 296af7c9 | Blue Swirl | static QemuThread *tcg_cpu_thread;
|
289 | 296af7c9 | Blue Swirl | static QemuCond *tcg_halt_cond;
|
290 | 296af7c9 | Blue Swirl | |
291 | 296af7c9 | Blue Swirl | static int qemu_system_ready; |
292 | 296af7c9 | Blue Swirl | /* cpu creation */
|
293 | 296af7c9 | Blue Swirl | static QemuCond qemu_cpu_cond;
|
294 | 296af7c9 | Blue Swirl | /* system init */
|
295 | 296af7c9 | Blue Swirl | static QemuCond qemu_system_cond;
|
296 | 296af7c9 | Blue Swirl | static QemuCond qemu_pause_cond;
|
297 | 296af7c9 | Blue Swirl | |
298 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void); |
299 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env); |
300 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void); |
301 | 296af7c9 | Blue Swirl | |
302 | 296af7c9 | Blue Swirl | int qemu_init_main_loop(void) |
303 | 296af7c9 | Blue Swirl | { |
304 | 296af7c9 | Blue Swirl | int ret;
|
305 | 296af7c9 | Blue Swirl | |
306 | 296af7c9 | Blue Swirl | ret = qemu_event_init(); |
307 | 296af7c9 | Blue Swirl | if (ret)
|
308 | 296af7c9 | Blue Swirl | return ret;
|
309 | 296af7c9 | Blue Swirl | |
310 | 296af7c9 | Blue Swirl | qemu_cond_init(&qemu_pause_cond); |
311 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_fair_mutex); |
312 | 296af7c9 | Blue Swirl | qemu_mutex_init(&qemu_global_mutex); |
313 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
314 | 296af7c9 | Blue Swirl | |
315 | 296af7c9 | Blue Swirl | unblock_io_signals(); |
316 | 296af7c9 | Blue Swirl | qemu_thread_self(&io_thread); |
317 | 296af7c9 | Blue Swirl | |
318 | 296af7c9 | Blue Swirl | return 0; |
319 | 296af7c9 | Blue Swirl | } |
320 | 296af7c9 | Blue Swirl | |
321 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event_common(CPUState *env) |
322 | 296af7c9 | Blue Swirl | { |
323 | 296af7c9 | Blue Swirl | if (env->stop) {
|
324 | 296af7c9 | Blue Swirl | env->stop = 0;
|
325 | 296af7c9 | Blue Swirl | env->stopped = 1;
|
326 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_pause_cond); |
327 | 296af7c9 | Blue Swirl | } |
328 | 296af7c9 | Blue Swirl | } |
329 | 296af7c9 | Blue Swirl | |
330 | 296af7c9 | Blue Swirl | static void qemu_wait_io_event(CPUState *env) |
331 | 296af7c9 | Blue Swirl | { |
332 | 296af7c9 | Blue Swirl | while (!tcg_has_work())
|
333 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
334 | 296af7c9 | Blue Swirl | |
335 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
336 | 296af7c9 | Blue Swirl | |
337 | 296af7c9 | Blue Swirl | /*
|
338 | 296af7c9 | Blue Swirl | * Users of qemu_global_mutex can be starved, having no chance
|
339 | 296af7c9 | Blue Swirl | * to acquire it since this path will get to it first.
|
340 | 296af7c9 | Blue Swirl | * So use another lock to provide fairness.
|
341 | 296af7c9 | Blue Swirl | */
|
342 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
343 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
344 | 296af7c9 | Blue Swirl | |
345 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
346 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
347 | 296af7c9 | Blue Swirl | } |
348 | 296af7c9 | Blue Swirl | |
349 | 296af7c9 | Blue Swirl | static void qemu_kvm_eat_signal(CPUState *env, int timeout) |
350 | 296af7c9 | Blue Swirl | { |
351 | 296af7c9 | Blue Swirl | struct timespec ts;
|
352 | 296af7c9 | Blue Swirl | int r, e;
|
353 | 296af7c9 | Blue Swirl | siginfo_t siginfo; |
354 | 296af7c9 | Blue Swirl | sigset_t waitset; |
355 | 296af7c9 | Blue Swirl | |
356 | 296af7c9 | Blue Swirl | ts.tv_sec = timeout / 1000;
|
357 | 296af7c9 | Blue Swirl | ts.tv_nsec = (timeout % 1000) * 1000000; |
358 | 296af7c9 | Blue Swirl | |
359 | 296af7c9 | Blue Swirl | sigemptyset(&waitset); |
360 | 296af7c9 | Blue Swirl | sigaddset(&waitset, SIG_IPI); |
361 | 296af7c9 | Blue Swirl | |
362 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
363 | 296af7c9 | Blue Swirl | r = sigtimedwait(&waitset, &siginfo, &ts); |
364 | 296af7c9 | Blue Swirl | e = errno; |
365 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
366 | 296af7c9 | Blue Swirl | |
367 | 296af7c9 | Blue Swirl | if (r == -1 && !(e == EAGAIN || e == EINTR)) { |
368 | 296af7c9 | Blue Swirl | fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
|
369 | 296af7c9 | Blue Swirl | exit(1);
|
370 | 296af7c9 | Blue Swirl | } |
371 | 296af7c9 | Blue Swirl | } |
372 | 296af7c9 | Blue Swirl | |
373 | 296af7c9 | Blue Swirl | static void qemu_kvm_wait_io_event(CPUState *env) |
374 | 296af7c9 | Blue Swirl | { |
375 | 296af7c9 | Blue Swirl | while (!cpu_has_work(env))
|
376 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
|
377 | 296af7c9 | Blue Swirl | |
378 | 296af7c9 | Blue Swirl | qemu_kvm_eat_signal(env, 0);
|
379 | 296af7c9 | Blue Swirl | qemu_wait_io_event_common(env); |
380 | 296af7c9 | Blue Swirl | } |
381 | 296af7c9 | Blue Swirl | |
382 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env); |
383 | 296af7c9 | Blue Swirl | |
384 | 296af7c9 | Blue Swirl | static void *kvm_cpu_thread_fn(void *arg) |
385 | 296af7c9 | Blue Swirl | { |
386 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
387 | 296af7c9 | Blue Swirl | |
388 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
389 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
390 | 296af7c9 | Blue Swirl | kvm_init_vcpu(env); |
391 | 296af7c9 | Blue Swirl | |
392 | 296af7c9 | Blue Swirl | kvm_block_io_signals(env); |
393 | 296af7c9 | Blue Swirl | |
394 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
395 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
396 | 296af7c9 | Blue Swirl | env->created = 1;
|
397 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
398 | 296af7c9 | Blue Swirl | |
399 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
400 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
401 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
402 | 296af7c9 | Blue Swirl | |
403 | 296af7c9 | Blue Swirl | while (1) { |
404 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
405 | 296af7c9 | Blue Swirl | qemu_cpu_exec(env); |
406 | 296af7c9 | Blue Swirl | qemu_kvm_wait_io_event(env); |
407 | 296af7c9 | Blue Swirl | } |
408 | 296af7c9 | Blue Swirl | |
409 | 296af7c9 | Blue Swirl | return NULL; |
410 | 296af7c9 | Blue Swirl | } |
411 | 296af7c9 | Blue Swirl | |
412 | 296af7c9 | Blue Swirl | static void *tcg_cpu_thread_fn(void *arg) |
413 | 296af7c9 | Blue Swirl | { |
414 | 296af7c9 | Blue Swirl | CPUState *env = arg; |
415 | 296af7c9 | Blue Swirl | |
416 | 296af7c9 | Blue Swirl | tcg_block_io_signals(); |
417 | 296af7c9 | Blue Swirl | qemu_thread_self(env->thread); |
418 | 296af7c9 | Blue Swirl | |
419 | 296af7c9 | Blue Swirl | /* signal CPU creation */
|
420 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
421 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) |
422 | 296af7c9 | Blue Swirl | env->created = 1;
|
423 | 296af7c9 | Blue Swirl | qemu_cond_signal(&qemu_cpu_cond); |
424 | 296af7c9 | Blue Swirl | |
425 | 296af7c9 | Blue Swirl | /* and wait for machine initialization */
|
426 | 296af7c9 | Blue Swirl | while (!qemu_system_ready)
|
427 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
|
428 | 296af7c9 | Blue Swirl | |
429 | 296af7c9 | Blue Swirl | while (1) { |
430 | 296af7c9 | Blue Swirl | tcg_cpu_exec(); |
431 | 296af7c9 | Blue Swirl | qemu_wait_io_event(cur_cpu); |
432 | 296af7c9 | Blue Swirl | } |
433 | 296af7c9 | Blue Swirl | |
434 | 296af7c9 | Blue Swirl | return NULL; |
435 | 296af7c9 | Blue Swirl | } |
436 | 296af7c9 | Blue Swirl | |
437 | 296af7c9 | Blue Swirl | void qemu_cpu_kick(void *_env) |
438 | 296af7c9 | Blue Swirl | { |
439 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
440 | 296af7c9 | Blue Swirl | qemu_cond_broadcast(env->halt_cond); |
441 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
442 | 296af7c9 | Blue Swirl | qemu_thread_signal(env->thread, SIG_IPI); |
443 | 296af7c9 | Blue Swirl | } |
444 | 296af7c9 | Blue Swirl | |
445 | 296af7c9 | Blue Swirl | int qemu_cpu_self(void *_env) |
446 | 296af7c9 | Blue Swirl | { |
447 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
448 | 296af7c9 | Blue Swirl | QemuThread this; |
449 | 296af7c9 | Blue Swirl | |
450 | 296af7c9 | Blue Swirl | qemu_thread_self(&this); |
451 | 296af7c9 | Blue Swirl | |
452 | 296af7c9 | Blue Swirl | return qemu_thread_equal(&this, env->thread);
|
453 | 296af7c9 | Blue Swirl | } |
454 | 296af7c9 | Blue Swirl | |
455 | 296af7c9 | Blue Swirl | static void cpu_signal(int sig) |
456 | 296af7c9 | Blue Swirl | { |
457 | 296af7c9 | Blue Swirl | if (cpu_single_env)
|
458 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
459 | 296af7c9 | Blue Swirl | } |
460 | 296af7c9 | Blue Swirl | |
461 | 296af7c9 | Blue Swirl | static void tcg_block_io_signals(void) |
462 | 296af7c9 | Blue Swirl | { |
463 | 296af7c9 | Blue Swirl | sigset_t set; |
464 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
465 | 296af7c9 | Blue Swirl | |
466 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
467 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
468 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
469 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
470 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
471 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
472 | 296af7c9 | Blue Swirl | |
473 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
474 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
475 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
476 | 296af7c9 | Blue Swirl | |
477 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
478 | 296af7c9 | Blue Swirl | sigact.sa_handler = cpu_signal; |
479 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
480 | 296af7c9 | Blue Swirl | } |
481 | 296af7c9 | Blue Swirl | |
482 | 296af7c9 | Blue Swirl | static void dummy_signal(int sig) |
483 | 296af7c9 | Blue Swirl | { |
484 | 296af7c9 | Blue Swirl | } |
485 | 296af7c9 | Blue Swirl | |
486 | 296af7c9 | Blue Swirl | static void kvm_block_io_signals(CPUState *env) |
487 | 296af7c9 | Blue Swirl | { |
488 | 296af7c9 | Blue Swirl | int r;
|
489 | 296af7c9 | Blue Swirl | sigset_t set; |
490 | 296af7c9 | Blue Swirl | struct sigaction sigact;
|
491 | 296af7c9 | Blue Swirl | |
492 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
493 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
494 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
495 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
496 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGCHLD); |
497 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
498 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
499 | 296af7c9 | Blue Swirl | |
500 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, NULL, &set);
|
501 | 296af7c9 | Blue Swirl | sigdelset(&set, SIG_IPI); |
502 | 296af7c9 | Blue Swirl | |
503 | 296af7c9 | Blue Swirl | memset(&sigact, 0, sizeof(sigact)); |
504 | 296af7c9 | Blue Swirl | sigact.sa_handler = dummy_signal; |
505 | 296af7c9 | Blue Swirl | sigaction(SIG_IPI, &sigact, NULL);
|
506 | 296af7c9 | Blue Swirl | |
507 | 296af7c9 | Blue Swirl | r = kvm_set_signal_mask(env, &set); |
508 | 296af7c9 | Blue Swirl | if (r) {
|
509 | 296af7c9 | Blue Swirl | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
|
510 | 296af7c9 | Blue Swirl | exit(1);
|
511 | 296af7c9 | Blue Swirl | } |
512 | 296af7c9 | Blue Swirl | } |
513 | 296af7c9 | Blue Swirl | |
514 | 296af7c9 | Blue Swirl | static void unblock_io_signals(void) |
515 | 296af7c9 | Blue Swirl | { |
516 | 296af7c9 | Blue Swirl | sigset_t set; |
517 | 296af7c9 | Blue Swirl | |
518 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
519 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGUSR2); |
520 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGIO); |
521 | 296af7c9 | Blue Swirl | sigaddset(&set, SIGALRM); |
522 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
523 | 296af7c9 | Blue Swirl | |
524 | 296af7c9 | Blue Swirl | sigemptyset(&set); |
525 | 296af7c9 | Blue Swirl | sigaddset(&set, SIG_IPI); |
526 | 296af7c9 | Blue Swirl | pthread_sigmask(SIG_BLOCK, &set, NULL);
|
527 | 296af7c9 | Blue Swirl | } |
528 | 296af7c9 | Blue Swirl | |
529 | 296af7c9 | Blue Swirl | static void qemu_signal_lock(unsigned int msecs) |
530 | 296af7c9 | Blue Swirl | { |
531 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
532 | 296af7c9 | Blue Swirl | |
533 | 296af7c9 | Blue Swirl | while (qemu_mutex_trylock(&qemu_global_mutex)) {
|
534 | 296af7c9 | Blue Swirl | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); |
535 | 296af7c9 | Blue Swirl | if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
|
536 | 296af7c9 | Blue Swirl | break;
|
537 | 296af7c9 | Blue Swirl | } |
538 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
539 | 296af7c9 | Blue Swirl | } |
540 | 296af7c9 | Blue Swirl | |
541 | 296af7c9 | Blue Swirl | void qemu_mutex_lock_iothread(void) |
542 | 296af7c9 | Blue Swirl | { |
543 | 296af7c9 | Blue Swirl | if (kvm_enabled()) {
|
544 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_fair_mutex); |
545 | 296af7c9 | Blue Swirl | qemu_mutex_lock(&qemu_global_mutex); |
546 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_fair_mutex); |
547 | 296af7c9 | Blue Swirl | } else
|
548 | 296af7c9 | Blue Swirl | qemu_signal_lock(100);
|
549 | 296af7c9 | Blue Swirl | } |
550 | 296af7c9 | Blue Swirl | |
551 | 296af7c9 | Blue Swirl | void qemu_mutex_unlock_iothread(void) |
552 | 296af7c9 | Blue Swirl | { |
553 | 296af7c9 | Blue Swirl | qemu_mutex_unlock(&qemu_global_mutex); |
554 | 296af7c9 | Blue Swirl | } |
555 | 296af7c9 | Blue Swirl | |
556 | 296af7c9 | Blue Swirl | static int all_vcpus_paused(void) |
557 | 296af7c9 | Blue Swirl | { |
558 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
559 | 296af7c9 | Blue Swirl | |
560 | 296af7c9 | Blue Swirl | while (penv) {
|
561 | 296af7c9 | Blue Swirl | if (!penv->stopped)
|
562 | 296af7c9 | Blue Swirl | return 0; |
563 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
564 | 296af7c9 | Blue Swirl | } |
565 | 296af7c9 | Blue Swirl | |
566 | 296af7c9 | Blue Swirl | return 1; |
567 | 296af7c9 | Blue Swirl | } |
568 | 296af7c9 | Blue Swirl | |
569 | 296af7c9 | Blue Swirl | void pause_all_vcpus(void) |
570 | 296af7c9 | Blue Swirl | { |
571 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
572 | 296af7c9 | Blue Swirl | |
573 | 296af7c9 | Blue Swirl | while (penv) {
|
574 | 296af7c9 | Blue Swirl | penv->stop = 1;
|
575 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
576 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
577 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
578 | 296af7c9 | Blue Swirl | } |
579 | 296af7c9 | Blue Swirl | |
580 | 296af7c9 | Blue Swirl | while (!all_vcpus_paused()) {
|
581 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
|
582 | 296af7c9 | Blue Swirl | penv = first_cpu; |
583 | 296af7c9 | Blue Swirl | while (penv) {
|
584 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
585 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
586 | 296af7c9 | Blue Swirl | } |
587 | 296af7c9 | Blue Swirl | } |
588 | 296af7c9 | Blue Swirl | } |
589 | 296af7c9 | Blue Swirl | |
590 | 296af7c9 | Blue Swirl | void resume_all_vcpus(void) |
591 | 296af7c9 | Blue Swirl | { |
592 | 296af7c9 | Blue Swirl | CPUState *penv = first_cpu; |
593 | 296af7c9 | Blue Swirl | |
594 | 296af7c9 | Blue Swirl | while (penv) {
|
595 | 296af7c9 | Blue Swirl | penv->stop = 0;
|
596 | 296af7c9 | Blue Swirl | penv->stopped = 0;
|
597 | 296af7c9 | Blue Swirl | qemu_thread_signal(penv->thread, SIG_IPI); |
598 | 296af7c9 | Blue Swirl | qemu_cpu_kick(penv); |
599 | 296af7c9 | Blue Swirl | penv = (CPUState *)penv->next_cpu; |
600 | 296af7c9 | Blue Swirl | } |
601 | 296af7c9 | Blue Swirl | } |
602 | 296af7c9 | Blue Swirl | |
603 | 296af7c9 | Blue Swirl | static void tcg_init_vcpu(void *_env) |
604 | 296af7c9 | Blue Swirl | { |
605 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
606 | 296af7c9 | Blue Swirl | /* share a single thread for all cpus with TCG */
|
607 | 296af7c9 | Blue Swirl | if (!tcg_cpu_thread) {
|
608 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
609 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
610 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
611 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); |
612 | 296af7c9 | Blue Swirl | while (env->created == 0) |
613 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
614 | 296af7c9 | Blue Swirl | tcg_cpu_thread = env->thread; |
615 | 296af7c9 | Blue Swirl | tcg_halt_cond = env->halt_cond; |
616 | 296af7c9 | Blue Swirl | } else {
|
617 | 296af7c9 | Blue Swirl | env->thread = tcg_cpu_thread; |
618 | 296af7c9 | Blue Swirl | env->halt_cond = tcg_halt_cond; |
619 | 296af7c9 | Blue Swirl | } |
620 | 296af7c9 | Blue Swirl | } |
621 | 296af7c9 | Blue Swirl | |
622 | 296af7c9 | Blue Swirl | static void kvm_start_vcpu(CPUState *env) |
623 | 296af7c9 | Blue Swirl | { |
624 | 296af7c9 | Blue Swirl | env->thread = qemu_mallocz(sizeof(QemuThread));
|
625 | 296af7c9 | Blue Swirl | env->halt_cond = qemu_mallocz(sizeof(QemuCond));
|
626 | 296af7c9 | Blue Swirl | qemu_cond_init(env->halt_cond); |
627 | 296af7c9 | Blue Swirl | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); |
628 | 296af7c9 | Blue Swirl | while (env->created == 0) |
629 | 296af7c9 | Blue Swirl | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
|
630 | 296af7c9 | Blue Swirl | } |
631 | 296af7c9 | Blue Swirl | |
632 | 296af7c9 | Blue Swirl | void qemu_init_vcpu(void *_env) |
633 | 296af7c9 | Blue Swirl | { |
634 | 296af7c9 | Blue Swirl | CPUState *env = _env; |
635 | 296af7c9 | Blue Swirl | |
636 | 296af7c9 | Blue Swirl | env->nr_cores = smp_cores; |
637 | 296af7c9 | Blue Swirl | env->nr_threads = smp_threads; |
638 | 296af7c9 | Blue Swirl | if (kvm_enabled())
|
639 | 296af7c9 | Blue Swirl | kvm_start_vcpu(env); |
640 | 296af7c9 | Blue Swirl | else
|
641 | 296af7c9 | Blue Swirl | tcg_init_vcpu(env); |
642 | 296af7c9 | Blue Swirl | } |
643 | 296af7c9 | Blue Swirl | |
644 | 296af7c9 | Blue Swirl | void qemu_notify_event(void) |
645 | 296af7c9 | Blue Swirl | { |
646 | 296af7c9 | Blue Swirl | qemu_event_increment(); |
647 | 296af7c9 | Blue Swirl | } |
648 | 296af7c9 | Blue Swirl | |
649 | 296af7c9 | Blue Swirl | static void qemu_system_vmstop_request(int reason) |
650 | 296af7c9 | Blue Swirl | { |
651 | 296af7c9 | Blue Swirl | vmstop_requested = reason; |
652 | 296af7c9 | Blue Swirl | qemu_notify_event(); |
653 | 296af7c9 | Blue Swirl | } |
654 | 296af7c9 | Blue Swirl | |
655 | 296af7c9 | Blue Swirl | void vm_stop(int reason) |
656 | 296af7c9 | Blue Swirl | { |
657 | 296af7c9 | Blue Swirl | QemuThread me; |
658 | 296af7c9 | Blue Swirl | qemu_thread_self(&me); |
659 | 296af7c9 | Blue Swirl | |
660 | 296af7c9 | Blue Swirl | if (!qemu_thread_equal(&me, &io_thread)) {
|
661 | 296af7c9 | Blue Swirl | qemu_system_vmstop_request(reason); |
662 | 296af7c9 | Blue Swirl | /*
|
663 | 296af7c9 | Blue Swirl | * FIXME: should not return to device code in case
|
664 | 296af7c9 | Blue Swirl | * vm_stop() has been requested.
|
665 | 296af7c9 | Blue Swirl | */
|
666 | 296af7c9 | Blue Swirl | if (cpu_single_env) {
|
667 | 296af7c9 | Blue Swirl | cpu_exit(cpu_single_env); |
668 | 296af7c9 | Blue Swirl | cpu_single_env->stop = 1;
|
669 | 296af7c9 | Blue Swirl | } |
670 | 296af7c9 | Blue Swirl | return;
|
671 | 296af7c9 | Blue Swirl | } |
672 | 296af7c9 | Blue Swirl | do_vm_stop(reason); |
673 | 296af7c9 | Blue Swirl | } |
674 | 296af7c9 | Blue Swirl | |
675 | 296af7c9 | Blue Swirl | #endif
|
676 | 296af7c9 | Blue Swirl | |
677 | 296af7c9 | Blue Swirl | static int qemu_cpu_exec(CPUState *env) |
678 | 296af7c9 | Blue Swirl | { |
679 | 296af7c9 | Blue Swirl | int ret;
|
680 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
681 | 296af7c9 | Blue Swirl | int64_t ti; |
682 | 296af7c9 | Blue Swirl | #endif
|
683 | 296af7c9 | Blue Swirl | |
684 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
685 | 296af7c9 | Blue Swirl | ti = profile_getclock(); |
686 | 296af7c9 | Blue Swirl | #endif
|
687 | 296af7c9 | Blue Swirl | if (use_icount) {
|
688 | 296af7c9 | Blue Swirl | int64_t count; |
689 | 296af7c9 | Blue Swirl | int decr;
|
690 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); |
691 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = 0;
|
692 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
693 | 296af7c9 | Blue Swirl | count = qemu_icount_round (qemu_next_deadline()); |
694 | 296af7c9 | Blue Swirl | qemu_icount += count; |
695 | 296af7c9 | Blue Swirl | decr = (count > 0xffff) ? 0xffff : count; |
696 | 296af7c9 | Blue Swirl | count -= decr; |
697 | 296af7c9 | Blue Swirl | env->icount_decr.u16.low = decr; |
698 | 296af7c9 | Blue Swirl | env->icount_extra = count; |
699 | 296af7c9 | Blue Swirl | } |
700 | 296af7c9 | Blue Swirl | ret = cpu_exec(env); |
701 | 296af7c9 | Blue Swirl | #ifdef CONFIG_PROFILER
|
702 | 296af7c9 | Blue Swirl | qemu_time += profile_getclock() - ti; |
703 | 296af7c9 | Blue Swirl | #endif
|
704 | 296af7c9 | Blue Swirl | if (use_icount) {
|
705 | 296af7c9 | Blue Swirl | /* Fold pending instructions back into the
|
706 | 296af7c9 | Blue Swirl | instruction counter, and clear the interrupt flag. */
|
707 | 296af7c9 | Blue Swirl | qemu_icount -= (env->icount_decr.u16.low |
708 | 296af7c9 | Blue Swirl | + env->icount_extra); |
709 | 296af7c9 | Blue Swirl | env->icount_decr.u32 = 0;
|
710 | 296af7c9 | Blue Swirl | env->icount_extra = 0;
|
711 | 296af7c9 | Blue Swirl | } |
712 | 296af7c9 | Blue Swirl | return ret;
|
713 | 296af7c9 | Blue Swirl | } |
714 | 296af7c9 | Blue Swirl | |
715 | 296af7c9 | Blue Swirl | bool tcg_cpu_exec(void) |
716 | 296af7c9 | Blue Swirl | { |
717 | 296af7c9 | Blue Swirl | int ret = 0; |
718 | 296af7c9 | Blue Swirl | |
719 | 296af7c9 | Blue Swirl | if (next_cpu == NULL) |
720 | 296af7c9 | Blue Swirl | next_cpu = first_cpu; |
721 | 296af7c9 | Blue Swirl | for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { |
722 | 296af7c9 | Blue Swirl | CPUState *env = cur_cpu = next_cpu; |
723 | 296af7c9 | Blue Swirl | |
724 | 296af7c9 | Blue Swirl | qemu_clock_enable(vm_clock, |
725 | 296af7c9 | Blue Swirl | (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
726 | 296af7c9 | Blue Swirl | |
727 | 296af7c9 | Blue Swirl | if (qemu_alarm_pending())
|
728 | 296af7c9 | Blue Swirl | break;
|
729 | 296af7c9 | Blue Swirl | if (cpu_can_run(env))
|
730 | 296af7c9 | Blue Swirl | ret = qemu_cpu_exec(env); |
731 | 296af7c9 | Blue Swirl | else if (env->stop) |
732 | 296af7c9 | Blue Swirl | break;
|
733 | 296af7c9 | Blue Swirl | |
734 | 296af7c9 | Blue Swirl | if (ret == EXCP_DEBUG) {
|
735 | 296af7c9 | Blue Swirl | gdb_set_stop_cpu(env); |
736 | 296af7c9 | Blue Swirl | debug_requested = EXCP_DEBUG; |
737 | 296af7c9 | Blue Swirl | break;
|
738 | 296af7c9 | Blue Swirl | } |
739 | 296af7c9 | Blue Swirl | } |
740 | 296af7c9 | Blue Swirl | return tcg_has_work();
|
741 | 296af7c9 | Blue Swirl | } |
742 | 296af7c9 | Blue Swirl | |
743 | 296af7c9 | Blue Swirl | void set_numa_modes(void) |
744 | 296af7c9 | Blue Swirl | { |
745 | 296af7c9 | Blue Swirl | CPUState *env; |
746 | 296af7c9 | Blue Swirl | int i;
|
747 | 296af7c9 | Blue Swirl | |
748 | 296af7c9 | Blue Swirl | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
749 | 296af7c9 | Blue Swirl | for (i = 0; i < nb_numa_nodes; i++) { |
750 | 296af7c9 | Blue Swirl | if (node_cpumask[i] & (1 << env->cpu_index)) { |
751 | 296af7c9 | Blue Swirl | env->numa_node = i; |
752 | 296af7c9 | Blue Swirl | } |
753 | 296af7c9 | Blue Swirl | } |
754 | 296af7c9 | Blue Swirl | } |
755 | 296af7c9 | Blue Swirl | } |
756 | 296af7c9 | Blue Swirl | |
757 | 296af7c9 | Blue Swirl | void set_cpu_log(const char *optarg) |
758 | 296af7c9 | Blue Swirl | { |
759 | 296af7c9 | Blue Swirl | int mask;
|
760 | 296af7c9 | Blue Swirl | const CPULogItem *item;
|
761 | 296af7c9 | Blue Swirl | |
762 | 296af7c9 | Blue Swirl | mask = cpu_str_to_log_mask(optarg); |
763 | 296af7c9 | Blue Swirl | if (!mask) {
|
764 | 296af7c9 | Blue Swirl | printf("Log items (comma separated):\n");
|
765 | 296af7c9 | Blue Swirl | for (item = cpu_log_items; item->mask != 0; item++) { |
766 | 296af7c9 | Blue Swirl | printf("%-10s %s\n", item->name, item->help);
|
767 | 296af7c9 | Blue Swirl | } |
768 | 296af7c9 | Blue Swirl | exit(1);
|
769 | 296af7c9 | Blue Swirl | } |
770 | 296af7c9 | Blue Swirl | cpu_set_log(mask); |
771 | 296af7c9 | Blue Swirl | } |
772 | 29e922b6 | Blue Swirl | |
773 | 29e922b6 | Blue Swirl | /* Return the virtual CPU time, based on the instruction counter. */
|
774 | 29e922b6 | Blue Swirl | int64_t cpu_get_icount(void)
|
775 | 29e922b6 | Blue Swirl | { |
776 | 29e922b6 | Blue Swirl | int64_t icount; |
777 | 29e922b6 | Blue Swirl | CPUState *env = cpu_single_env;; |
778 | 29e922b6 | Blue Swirl | |
779 | 29e922b6 | Blue Swirl | icount = qemu_icount; |
780 | 29e922b6 | Blue Swirl | if (env) {
|
781 | 29e922b6 | Blue Swirl | if (!can_do_io(env)) {
|
782 | 29e922b6 | Blue Swirl | fprintf(stderr, "Bad clock read\n");
|
783 | 29e922b6 | Blue Swirl | } |
784 | 29e922b6 | Blue Swirl | icount -= (env->icount_decr.u16.low + env->icount_extra); |
785 | 29e922b6 | Blue Swirl | } |
786 | 29e922b6 | Blue Swirl | return qemu_icount_bias + (icount << icount_time_shift);
|
787 | 29e922b6 | Blue Swirl | } |