root / qemu-thread-win32.c @ 4e339882
History | View | Annotate | Download (8.8 kB)
1 | 9257d46d | Paolo Bonzini | /*
|
---|---|---|---|
2 | 9257d46d | Paolo Bonzini | * Win32 implementation for mutex/cond/thread functions
|
3 | 9257d46d | Paolo Bonzini | *
|
4 | 9257d46d | Paolo Bonzini | * Copyright Red Hat, Inc. 2010
|
5 | 9257d46d | Paolo Bonzini | *
|
6 | 9257d46d | Paolo Bonzini | * Author:
|
7 | 9257d46d | Paolo Bonzini | * Paolo Bonzini <pbonzini@redhat.com>
|
8 | 9257d46d | Paolo Bonzini | *
|
9 | 9257d46d | Paolo Bonzini | * This work is licensed under the terms of the GNU GPL, version 2 or later.
|
10 | 9257d46d | Paolo Bonzini | * See the COPYING file in the top-level directory.
|
11 | 9257d46d | Paolo Bonzini | *
|
12 | 9257d46d | Paolo Bonzini | */
|
13 | 9257d46d | Paolo Bonzini | #include "qemu-common.h" |
14 | 9257d46d | Paolo Bonzini | #include "qemu-thread.h" |
15 | 9257d46d | Paolo Bonzini | #include <process.h> |
16 | 9257d46d | Paolo Bonzini | #include <assert.h> |
17 | 9257d46d | Paolo Bonzini | #include <limits.h> |
18 | 9257d46d | Paolo Bonzini | |
19 | 9257d46d | Paolo Bonzini | static void error_exit(int err, const char *msg) |
20 | 9257d46d | Paolo Bonzini | { |
21 | 9257d46d | Paolo Bonzini | char *pstr;
|
22 | 9257d46d | Paolo Bonzini | |
23 | 9257d46d | Paolo Bonzini | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, |
24 | 9257d46d | Paolo Bonzini | NULL, err, 0, (LPTSTR)&pstr, 2, NULL); |
25 | 9257d46d | Paolo Bonzini | fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
|
26 | 9257d46d | Paolo Bonzini | LocalFree(pstr); |
27 | 53380ac3 | Jan Kiszka | abort(); |
28 | 9257d46d | Paolo Bonzini | } |
29 | 9257d46d | Paolo Bonzini | |
30 | 9257d46d | Paolo Bonzini | void qemu_mutex_init(QemuMutex *mutex)
|
31 | 9257d46d | Paolo Bonzini | { |
32 | 9257d46d | Paolo Bonzini | mutex->owner = 0;
|
33 | 9257d46d | Paolo Bonzini | InitializeCriticalSection(&mutex->lock); |
34 | 9257d46d | Paolo Bonzini | } |
35 | 9257d46d | Paolo Bonzini | |
36 | 1a290aea | Stefan Weil | void qemu_mutex_destroy(QemuMutex *mutex)
|
37 | 1a290aea | Stefan Weil | { |
38 | 1a290aea | Stefan Weil | assert(mutex->owner == 0);
|
39 | 1a290aea | Stefan Weil | DeleteCriticalSection(&mutex->lock); |
40 | 1a290aea | Stefan Weil | } |
41 | 1a290aea | Stefan Weil | |
42 | 9257d46d | Paolo Bonzini | void qemu_mutex_lock(QemuMutex *mutex)
|
43 | 9257d46d | Paolo Bonzini | { |
44 | 9257d46d | Paolo Bonzini | EnterCriticalSection(&mutex->lock); |
45 | 9257d46d | Paolo Bonzini | |
46 | 9257d46d | Paolo Bonzini | /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
|
47 | 9257d46d | Paolo Bonzini | * using them as such.
|
48 | 9257d46d | Paolo Bonzini | */
|
49 | 9257d46d | Paolo Bonzini | assert(mutex->owner == 0);
|
50 | 9257d46d | Paolo Bonzini | mutex->owner = GetCurrentThreadId(); |
51 | 9257d46d | Paolo Bonzini | } |
52 | 9257d46d | Paolo Bonzini | |
53 | 9257d46d | Paolo Bonzini | int qemu_mutex_trylock(QemuMutex *mutex)
|
54 | 9257d46d | Paolo Bonzini | { |
55 | 9257d46d | Paolo Bonzini | int owned;
|
56 | 9257d46d | Paolo Bonzini | |
57 | 9257d46d | Paolo Bonzini | owned = TryEnterCriticalSection(&mutex->lock); |
58 | 9257d46d | Paolo Bonzini | if (owned) {
|
59 | 9257d46d | Paolo Bonzini | assert(mutex->owner == 0);
|
60 | 9257d46d | Paolo Bonzini | mutex->owner = GetCurrentThreadId(); |
61 | 9257d46d | Paolo Bonzini | } |
62 | 9257d46d | Paolo Bonzini | return !owned;
|
63 | 9257d46d | Paolo Bonzini | } |
64 | 9257d46d | Paolo Bonzini | |
65 | 9257d46d | Paolo Bonzini | void qemu_mutex_unlock(QemuMutex *mutex)
|
66 | 9257d46d | Paolo Bonzini | { |
67 | 9257d46d | Paolo Bonzini | assert(mutex->owner == GetCurrentThreadId()); |
68 | 9257d46d | Paolo Bonzini | mutex->owner = 0;
|
69 | 9257d46d | Paolo Bonzini | LeaveCriticalSection(&mutex->lock); |
70 | 9257d46d | Paolo Bonzini | } |
71 | 9257d46d | Paolo Bonzini | |
72 | 9257d46d | Paolo Bonzini | void qemu_cond_init(QemuCond *cond)
|
73 | 9257d46d | Paolo Bonzini | { |
74 | 9257d46d | Paolo Bonzini | memset(cond, 0, sizeof(*cond)); |
75 | 9257d46d | Paolo Bonzini | |
76 | 9257d46d | Paolo Bonzini | cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); |
77 | 9257d46d | Paolo Bonzini | if (!cond->sema) {
|
78 | 9257d46d | Paolo Bonzini | error_exit(GetLastError(), __func__); |
79 | 9257d46d | Paolo Bonzini | } |
80 | 9257d46d | Paolo Bonzini | cond->continue_event = CreateEvent(NULL, /* security */ |
81 | 9257d46d | Paolo Bonzini | FALSE, /* auto-reset */
|
82 | 9257d46d | Paolo Bonzini | FALSE, /* not signaled */
|
83 | 9257d46d | Paolo Bonzini | NULL); /* name */ |
84 | 9257d46d | Paolo Bonzini | if (!cond->continue_event) {
|
85 | 9257d46d | Paolo Bonzini | error_exit(GetLastError(), __func__); |
86 | 9257d46d | Paolo Bonzini | } |
87 | 9257d46d | Paolo Bonzini | } |
88 | 9257d46d | Paolo Bonzini | |
89 | 1a290aea | Stefan Weil | void qemu_cond_destroy(QemuCond *cond)
|
90 | 1a290aea | Stefan Weil | { |
91 | 1a290aea | Stefan Weil | BOOL result; |
92 | 1a290aea | Stefan Weil | result = CloseHandle(cond->continue_event); |
93 | 1a290aea | Stefan Weil | if (!result) {
|
94 | 1a290aea | Stefan Weil | error_exit(GetLastError(), __func__); |
95 | 1a290aea | Stefan Weil | } |
96 | 1a290aea | Stefan Weil | cond->continue_event = 0;
|
97 | 1a290aea | Stefan Weil | result = CloseHandle(cond->sema); |
98 | 1a290aea | Stefan Weil | if (!result) {
|
99 | 1a290aea | Stefan Weil | error_exit(GetLastError(), __func__); |
100 | 1a290aea | Stefan Weil | } |
101 | 1a290aea | Stefan Weil | cond->sema = 0;
|
102 | 1a290aea | Stefan Weil | } |
103 | 1a290aea | Stefan Weil | |
104 | 9257d46d | Paolo Bonzini | void qemu_cond_signal(QemuCond *cond)
|
105 | 9257d46d | Paolo Bonzini | { |
106 | 9257d46d | Paolo Bonzini | DWORD result; |
107 | 9257d46d | Paolo Bonzini | |
108 | 9257d46d | Paolo Bonzini | /*
|
109 | 9257d46d | Paolo Bonzini | * Signal only when there are waiters. cond->waiters is
|
110 | 9257d46d | Paolo Bonzini | * incremented by pthread_cond_wait under the external lock,
|
111 | 9257d46d | Paolo Bonzini | * so we are safe about that.
|
112 | 9257d46d | Paolo Bonzini | */
|
113 | 9257d46d | Paolo Bonzini | if (cond->waiters == 0) { |
114 | 9257d46d | Paolo Bonzini | return;
|
115 | 9257d46d | Paolo Bonzini | } |
116 | 9257d46d | Paolo Bonzini | |
117 | 9257d46d | Paolo Bonzini | /*
|
118 | 9257d46d | Paolo Bonzini | * Waiting threads decrement it outside the external lock, but
|
119 | 9257d46d | Paolo Bonzini | * only if another thread is executing pthread_cond_broadcast and
|
120 | 9257d46d | Paolo Bonzini | * has the mutex. So, it also cannot be decremented concurrently
|
121 | 9257d46d | Paolo Bonzini | * with this particular access.
|
122 | 9257d46d | Paolo Bonzini | */
|
123 | 9257d46d | Paolo Bonzini | cond->target = cond->waiters - 1;
|
124 | 9257d46d | Paolo Bonzini | result = SignalObjectAndWait(cond->sema, cond->continue_event, |
125 | 9257d46d | Paolo Bonzini | INFINITE, FALSE); |
126 | 9257d46d | Paolo Bonzini | if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
|
127 | 9257d46d | Paolo Bonzini | error_exit(GetLastError(), __func__); |
128 | 9257d46d | Paolo Bonzini | } |
129 | 9257d46d | Paolo Bonzini | } |
130 | 9257d46d | Paolo Bonzini | |
131 | 9257d46d | Paolo Bonzini | void qemu_cond_broadcast(QemuCond *cond)
|
132 | 9257d46d | Paolo Bonzini | { |
133 | 9257d46d | Paolo Bonzini | BOOLEAN result; |
134 | 9257d46d | Paolo Bonzini | /*
|
135 | 9257d46d | Paolo Bonzini | * As in pthread_cond_signal, access to cond->waiters and
|
136 | 9257d46d | Paolo Bonzini | * cond->target is locked via the external mutex.
|
137 | 9257d46d | Paolo Bonzini | */
|
138 | 9257d46d | Paolo Bonzini | if (cond->waiters == 0) { |
139 | 9257d46d | Paolo Bonzini | return;
|
140 | 9257d46d | Paolo Bonzini | } |
141 | 9257d46d | Paolo Bonzini | |
142 | 9257d46d | Paolo Bonzini | cond->target = 0;
|
143 | 9257d46d | Paolo Bonzini | result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
|
144 | 9257d46d | Paolo Bonzini | if (!result) {
|
145 | 9257d46d | Paolo Bonzini | error_exit(GetLastError(), __func__); |
146 | 9257d46d | Paolo Bonzini | } |
147 | 9257d46d | Paolo Bonzini | |
148 | 9257d46d | Paolo Bonzini | /*
|
149 | 9257d46d | Paolo Bonzini | * At this point all waiters continue. Each one takes its
|
150 | 9257d46d | Paolo Bonzini | * slice of the semaphore. Now it's our turn to wait: Since
|
151 | 9257d46d | Paolo Bonzini | * the external mutex is held, no thread can leave cond_wait,
|
152 | 9257d46d | Paolo Bonzini | * yet. For this reason, we can be sure that no thread gets
|
153 | 9257d46d | Paolo Bonzini | * a chance to eat *more* than one slice. OTOH, it means
|
154 | 9257d46d | Paolo Bonzini | * that the last waiter must send us a wake-up.
|
155 | 9257d46d | Paolo Bonzini | */
|
156 | 9257d46d | Paolo Bonzini | WaitForSingleObject(cond->continue_event, INFINITE); |
157 | 9257d46d | Paolo Bonzini | } |
158 | 9257d46d | Paolo Bonzini | |
159 | 9257d46d | Paolo Bonzini | void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
|
160 | 9257d46d | Paolo Bonzini | { |
161 | 9257d46d | Paolo Bonzini | /*
|
162 | 9257d46d | Paolo Bonzini | * This access is protected under the mutex.
|
163 | 9257d46d | Paolo Bonzini | */
|
164 | 9257d46d | Paolo Bonzini | cond->waiters++; |
165 | 9257d46d | Paolo Bonzini | |
166 | 9257d46d | Paolo Bonzini | /*
|
167 | 9257d46d | Paolo Bonzini | * Unlock external mutex and wait for signal.
|
168 | 9257d46d | Paolo Bonzini | * NOTE: we've held mutex locked long enough to increment
|
169 | 9257d46d | Paolo Bonzini | * waiters count above, so there's no problem with
|
170 | 9257d46d | Paolo Bonzini | * leaving mutex unlocked before we wait on semaphore.
|
171 | 9257d46d | Paolo Bonzini | */
|
172 | 9257d46d | Paolo Bonzini | qemu_mutex_unlock(mutex); |
173 | 9257d46d | Paolo Bonzini | WaitForSingleObject(cond->sema, INFINITE); |
174 | 9257d46d | Paolo Bonzini | |
175 | 9257d46d | Paolo Bonzini | /* Now waiters must rendez-vous with the signaling thread and
|
176 | 9257d46d | Paolo Bonzini | * let it continue. For cond_broadcast this has heavy contention
|
177 | 9257d46d | Paolo Bonzini | * and triggers thundering herd. So goes life.
|
178 | 9257d46d | Paolo Bonzini | *
|
179 | 9257d46d | Paolo Bonzini | * Decrease waiters count. The mutex is not taken, so we have
|
180 | 9257d46d | Paolo Bonzini | * to do this atomically.
|
181 | 9257d46d | Paolo Bonzini | *
|
182 | 9257d46d | Paolo Bonzini | * All waiters contend for the mutex at the end of this function
|
183 | 9257d46d | Paolo Bonzini | * until the signaling thread relinquishes it. To ensure
|
184 | 9257d46d | Paolo Bonzini | * each waiter consumes exactly one slice of the semaphore,
|
185 | 9257d46d | Paolo Bonzini | * the signaling thread stops until it is told by the last
|
186 | 9257d46d | Paolo Bonzini | * waiter that it can go on.
|
187 | 9257d46d | Paolo Bonzini | */
|
188 | 9257d46d | Paolo Bonzini | if (InterlockedDecrement(&cond->waiters) == cond->target) {
|
189 | 9257d46d | Paolo Bonzini | SetEvent(cond->continue_event); |
190 | 9257d46d | Paolo Bonzini | } |
191 | 9257d46d | Paolo Bonzini | |
192 | 9257d46d | Paolo Bonzini | qemu_mutex_lock(mutex); |
193 | 9257d46d | Paolo Bonzini | } |
194 | 9257d46d | Paolo Bonzini | |
195 | 9257d46d | Paolo Bonzini | struct QemuThreadData {
|
196 | 403e6331 | Paolo Bonzini | /* Passed to win32_start_routine. */
|
197 | 403e6331 | Paolo Bonzini | void *(*start_routine)(void *); |
198 | 403e6331 | Paolo Bonzini | void *arg;
|
199 | 403e6331 | Paolo Bonzini | short mode;
|
200 | 403e6331 | Paolo Bonzini | |
201 | 403e6331 | Paolo Bonzini | /* Only used for joinable threads. */
|
202 | 403e6331 | Paolo Bonzini | bool exited;
|
203 | 403e6331 | Paolo Bonzini | void *ret;
|
204 | 403e6331 | Paolo Bonzini | CRITICAL_SECTION cs; |
205 | 9257d46d | Paolo Bonzini | }; |
206 | 9257d46d | Paolo Bonzini | |
207 | 9257d46d | Paolo Bonzini | static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES; |
208 | 9257d46d | Paolo Bonzini | |
209 | 9257d46d | Paolo Bonzini | static unsigned __stdcall win32_start_routine(void *arg) |
210 | 9257d46d | Paolo Bonzini | { |
211 | 403e6331 | Paolo Bonzini | QemuThreadData *data = (QemuThreadData *) arg; |
212 | 403e6331 | Paolo Bonzini | void *(*start_routine)(void *) = data->start_routine; |
213 | 403e6331 | Paolo Bonzini | void *thread_arg = data->arg;
|
214 | 403e6331 | Paolo Bonzini | |
215 | 403e6331 | Paolo Bonzini | if (data->mode == QEMU_THREAD_DETACHED) {
|
216 | 403e6331 | Paolo Bonzini | g_free(data); |
217 | 403e6331 | Paolo Bonzini | data = NULL;
|
218 | 403e6331 | Paolo Bonzini | } |
219 | 403e6331 | Paolo Bonzini | TlsSetValue(qemu_thread_tls_index, data); |
220 | 403e6331 | Paolo Bonzini | qemu_thread_exit(start_routine(thread_arg)); |
221 | 9257d46d | Paolo Bonzini | abort(); |
222 | 9257d46d | Paolo Bonzini | } |
223 | 9257d46d | Paolo Bonzini | |
224 | 9257d46d | Paolo Bonzini | void qemu_thread_exit(void *arg) |
225 | 9257d46d | Paolo Bonzini | { |
226 | 403e6331 | Paolo Bonzini | QemuThreadData *data = TlsGetValue(qemu_thread_tls_index); |
227 | 403e6331 | Paolo Bonzini | if (data) {
|
228 | edc1de97 | Stefan Weil | assert(data->mode != QEMU_THREAD_DETACHED); |
229 | 403e6331 | Paolo Bonzini | data->ret = arg; |
230 | 403e6331 | Paolo Bonzini | EnterCriticalSection(&data->cs); |
231 | 403e6331 | Paolo Bonzini | data->exited = true;
|
232 | 403e6331 | Paolo Bonzini | LeaveCriticalSection(&data->cs); |
233 | 403e6331 | Paolo Bonzini | } |
234 | 403e6331 | Paolo Bonzini | _endthreadex(0);
|
235 | 403e6331 | Paolo Bonzini | } |
236 | 403e6331 | Paolo Bonzini | |
237 | 403e6331 | Paolo Bonzini | void *qemu_thread_join(QemuThread *thread)
|
238 | 403e6331 | Paolo Bonzini | { |
239 | 403e6331 | Paolo Bonzini | QemuThreadData *data; |
240 | 403e6331 | Paolo Bonzini | void *ret;
|
241 | 403e6331 | Paolo Bonzini | HANDLE handle; |
242 | 403e6331 | Paolo Bonzini | |
243 | 403e6331 | Paolo Bonzini | data = thread->data; |
244 | 403e6331 | Paolo Bonzini | if (!data) {
|
245 | 403e6331 | Paolo Bonzini | return NULL; |
246 | 403e6331 | Paolo Bonzini | } |
247 | 403e6331 | Paolo Bonzini | /*
|
248 | 403e6331 | Paolo Bonzini | * Because multiple copies of the QemuThread can exist via
|
249 | 403e6331 | Paolo Bonzini | * qemu_thread_get_self, we need to store a value that cannot
|
250 | 403e6331 | Paolo Bonzini | * leak there. The simplest, non racy way is to store the TID,
|
251 | 403e6331 | Paolo Bonzini | * discard the handle that _beginthreadex gives back, and
|
252 | 403e6331 | Paolo Bonzini | * get another copy of the handle here.
|
253 | 403e6331 | Paolo Bonzini | */
|
254 | 1ecf47bf | Paolo Bonzini | handle = qemu_thread_get_handle(thread); |
255 | 1ecf47bf | Paolo Bonzini | if (handle) {
|
256 | 403e6331 | Paolo Bonzini | WaitForSingleObject(handle, INFINITE); |
257 | 403e6331 | Paolo Bonzini | CloseHandle(handle); |
258 | 403e6331 | Paolo Bonzini | } |
259 | 403e6331 | Paolo Bonzini | ret = data->ret; |
260 | edc1de97 | Stefan Weil | assert(data->mode != QEMU_THREAD_DETACHED); |
261 | 403e6331 | Paolo Bonzini | DeleteCriticalSection(&data->cs); |
262 | 403e6331 | Paolo Bonzini | g_free(data); |
263 | 403e6331 | Paolo Bonzini | return ret;
|
264 | 9257d46d | Paolo Bonzini | } |
265 | 9257d46d | Paolo Bonzini | |
266 | 9257d46d | Paolo Bonzini | static inline void qemu_thread_init(void) |
267 | 9257d46d | Paolo Bonzini | { |
268 | 9257d46d | Paolo Bonzini | if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
|
269 | 9257d46d | Paolo Bonzini | qemu_thread_tls_index = TlsAlloc(); |
270 | 9257d46d | Paolo Bonzini | if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
|
271 | 9257d46d | Paolo Bonzini | error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__); |
272 | 9257d46d | Paolo Bonzini | } |
273 | 9257d46d | Paolo Bonzini | } |
274 | 9257d46d | Paolo Bonzini | } |
275 | 9257d46d | Paolo Bonzini | |
276 | 9257d46d | Paolo Bonzini | |
277 | 9257d46d | Paolo Bonzini | void qemu_thread_create(QemuThread *thread,
|
278 | 9257d46d | Paolo Bonzini | void *(*start_routine)(void *), |
279 | cf218714 | Jan Kiszka | void *arg, int mode) |
280 | 9257d46d | Paolo Bonzini | { |
281 | 9257d46d | Paolo Bonzini | HANDLE hThread; |
282 | 9257d46d | Paolo Bonzini | |
283 | 9257d46d | Paolo Bonzini | struct QemuThreadData *data;
|
284 | 9257d46d | Paolo Bonzini | qemu_thread_init(); |
285 | 7267c094 | Anthony Liguori | data = g_malloc(sizeof *data);
|
286 | 9257d46d | Paolo Bonzini | data->start_routine = start_routine; |
287 | 9257d46d | Paolo Bonzini | data->arg = arg; |
288 | 403e6331 | Paolo Bonzini | data->mode = mode; |
289 | 403e6331 | Paolo Bonzini | data->exited = false;
|
290 | 9257d46d | Paolo Bonzini | |
291 | edc1de97 | Stefan Weil | if (data->mode != QEMU_THREAD_DETACHED) {
|
292 | edc1de97 | Stefan Weil | InitializeCriticalSection(&data->cs); |
293 | edc1de97 | Stefan Weil | } |
294 | edc1de97 | Stefan Weil | |
295 | 9257d46d | Paolo Bonzini | hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine, |
296 | 403e6331 | Paolo Bonzini | data, 0, &thread->tid);
|
297 | 9257d46d | Paolo Bonzini | if (!hThread) {
|
298 | 9257d46d | Paolo Bonzini | error_exit(GetLastError(), __func__); |
299 | 9257d46d | Paolo Bonzini | } |
300 | 9257d46d | Paolo Bonzini | CloseHandle(hThread); |
301 | 403e6331 | Paolo Bonzini | thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data;
|
302 | 9257d46d | Paolo Bonzini | } |
303 | 9257d46d | Paolo Bonzini | |
304 | 9257d46d | Paolo Bonzini | void qemu_thread_get_self(QemuThread *thread)
|
305 | 9257d46d | Paolo Bonzini | { |
306 | 403e6331 | Paolo Bonzini | qemu_thread_init(); |
307 | 403e6331 | Paolo Bonzini | thread->data = TlsGetValue(qemu_thread_tls_index); |
308 | 403e6331 | Paolo Bonzini | thread->tid = GetCurrentThreadId(); |
309 | 9257d46d | Paolo Bonzini | } |
310 | 9257d46d | Paolo Bonzini | |
311 | 1ecf47bf | Paolo Bonzini | HANDLE qemu_thread_get_handle(QemuThread *thread) |
312 | 1ecf47bf | Paolo Bonzini | { |
313 | 1ecf47bf | Paolo Bonzini | QemuThreadData *data; |
314 | 1ecf47bf | Paolo Bonzini | HANDLE handle; |
315 | 1ecf47bf | Paolo Bonzini | |
316 | 1ecf47bf | Paolo Bonzini | data = thread->data; |
317 | 1ecf47bf | Paolo Bonzini | if (!data) {
|
318 | 1ecf47bf | Paolo Bonzini | return NULL; |
319 | 1ecf47bf | Paolo Bonzini | } |
320 | 1ecf47bf | Paolo Bonzini | |
321 | edc1de97 | Stefan Weil | assert(data->mode != QEMU_THREAD_DETACHED); |
322 | 1ecf47bf | Paolo Bonzini | EnterCriticalSection(&data->cs); |
323 | 1ecf47bf | Paolo Bonzini | if (!data->exited) {
|
324 | 1ecf47bf | Paolo Bonzini | handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE, |
325 | 1ecf47bf | Paolo Bonzini | thread->tid); |
326 | 1ecf47bf | Paolo Bonzini | } else {
|
327 | 1ecf47bf | Paolo Bonzini | handle = NULL;
|
328 | 1ecf47bf | Paolo Bonzini | } |
329 | 1ecf47bf | Paolo Bonzini | LeaveCriticalSection(&data->cs); |
330 | 1ecf47bf | Paolo Bonzini | return handle;
|
331 | 1ecf47bf | Paolo Bonzini | } |
332 | 1ecf47bf | Paolo Bonzini | |
333 | 9257d46d | Paolo Bonzini | int qemu_thread_is_self(QemuThread *thread)
|
334 | 9257d46d | Paolo Bonzini | { |
335 | 403e6331 | Paolo Bonzini | return GetCurrentThreadId() == thread->tid;
|
336 | 9257d46d | Paolo Bonzini | } |