Revision c2764719 linux-user/main.c
b/linux-user/main.c | ||
---|---|---|
143 | 143 |
We don't require a full sync, only that no cpus are executing guest code. |
144 | 144 |
The alternative is to map target atomic ops onto host equivalents, |
145 | 145 |
which requires quite a lot of per host/target work. */ |
146 |
static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
146 | 147 |
static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; |
147 | 148 |
static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; |
148 | 149 |
static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; |
... | ... | |
165 | 166 |
thread_env->next_cpu = NULL; |
166 | 167 |
pending_cpus = 0; |
167 | 168 |
pthread_mutex_init(&exclusive_lock, NULL); |
169 |
pthread_mutex_init(&cpu_list_mutex, NULL); |
|
168 | 170 |
pthread_cond_init(&exclusive_cond, NULL); |
169 | 171 |
pthread_cond_init(&exclusive_resume, NULL); |
170 | 172 |
pthread_mutex_init(&tb_lock, NULL); |
... | ... | |
237 | 239 |
exclusive_idle(); |
238 | 240 |
pthread_mutex_unlock(&exclusive_lock); |
239 | 241 |
} |
242 |
|
|
243 |
void cpu_list_lock(void) |
|
244 |
{ |
|
245 |
pthread_mutex_lock(&cpu_list_mutex); |
|
246 |
} |
|
247 |
|
|
248 |
void cpu_list_unlock(void) |
|
249 |
{ |
|
250 |
pthread_mutex_unlock(&cpu_list_mutex); |
|
251 |
} |
|
240 | 252 |
#else /* if !USE_NPTL */ |
241 | 253 |
/* These are no-ops because we are not threadsafe. */ |
242 | 254 |
static inline void cpu_exec_start(CPUState *env) |
... | ... | |
265 | 277 |
gdbserver_fork(thread_env); |
266 | 278 |
} |
267 | 279 |
} |
280 |
|
|
281 |
void cpu_list_lock(void) |
|
282 |
{ |
|
283 |
} |
|
284 |
|
|
285 |
void cpu_list_unlock(void) |
|
286 |
{ |
|
287 |
} |
|
268 | 288 |
#endif |
269 | 289 |
|
270 | 290 |
|
Also available in: Unified diff