Revision 14f24e14 linux-user/mmap.c
b/linux-user/mmap.c | ||
---|---|---|
264 | 264 |
return 0; |
265 | 265 |
} |
266 | 266 |
|
267 |
#if defined(__CYGWIN__) |
|
267 |
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 |
|
268 |
# define TASK_UNMAPPED_BASE (1ul << 38) |
|
269 |
#elif defined(__CYGWIN__) |
|
268 | 270 |
/* Cygwin doesn't have a whole lot of address space. */ |
269 |
static abi_ulong mmap_next_start = 0x18000000;
|
|
271 |
# define TASK_UNMAPPED_BASE 0x18000000
|
|
270 | 272 |
#else |
271 |
static abi_ulong mmap_next_start = 0x40000000;
|
|
273 |
# define TASK_UNMAPPED_BASE 0x40000000
|
|
272 | 274 |
#endif |
275 |
static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; |
|
273 | 276 |
|
274 | 277 |
unsigned long last_brk; |
275 | 278 |
|
... | ... | |
281 | 284 |
*/ |
282 | 285 |
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) |
283 | 286 |
{ |
284 |
void *ptr; |
|
287 |
void *ptr, *prev;
|
|
285 | 288 |
abi_ulong addr; |
286 |
|
|
287 |
size = HOST_PAGE_ALIGN(size); |
|
288 |
start &= qemu_host_page_mask; |
|
289 |
int wrapped, repeat; |
|
289 | 290 |
|
290 | 291 |
/* If 'start' == 0, then a default start address is used. */ |
291 |
if (start == 0) |
|
292 |
if (start == 0) {
|
|
292 | 293 |
start = mmap_next_start; |
294 |
} else { |
|
295 |
start &= qemu_host_page_mask; |
|
296 |
} |
|
297 |
|
|
298 |
size = HOST_PAGE_ALIGN(size); |
|
293 | 299 |
|
294 | 300 |
addr = start; |
301 |
wrapped = repeat = 0; |
|
302 |
prev = 0; |
|
295 | 303 |
|
296 |
for(;;) {
|
|
304 |
for (;; prev = ptr) {
|
|
297 | 305 |
/* |
298 | 306 |
* Reserve needed memory area to avoid a race. |
299 | 307 |
* It should be discarded using: |
... | ... | |
301 | 309 |
* - mremap() with MREMAP_FIXED flag |
302 | 310 |
* - shmat() with SHM_REMAP flag |
303 | 311 |
*/ |
304 |
ptr = mmap((void *)(unsigned long)addr, size, PROT_NONE,
|
|
312 |
ptr = mmap(g2h(addr), size, PROT_NONE,
|
|
305 | 313 |
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); |
306 | 314 |
|
307 | 315 |
/* ENOMEM, if host address space has no memory */ |
308 |
if (ptr == MAP_FAILED) |
|
316 |
if (ptr == MAP_FAILED) {
|
|
309 | 317 |
return (abi_ulong)-1; |
318 |
} |
|
310 | 319 |
|
311 |
/* If address fits target address space we've found what we need */
|
|
312 |
if ((unsigned long)ptr + size - 1 <= (abi_ulong)-1)
|
|
313 |
break;
|
|
320 |
/* Count the number of sequential returns of the same address.
|
|
321 |
This is used to modify the search algorithm below. */
|
|
322 |
repeat = (ptr == prev ? repeat + 1 : 0);
|
|
314 | 323 |
|
315 |
/* Unmap and try again with new page */ |
|
324 |
if (h2g_valid(ptr + size - 1)) { |
|
325 |
addr = h2g(ptr); |
|
326 |
|
|
327 |
if ((addr & ~TARGET_PAGE_MASK) == 0) { |
|
328 |
/* Success. */ |
|
329 |
if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { |
|
330 |
mmap_next_start = addr + size; |
|
331 |
} |
|
332 |
return addr; |
|
333 |
} |
|
334 |
|
|
335 |
/* The address is not properly aligned for the target. */ |
|
336 |
switch (repeat) { |
|
337 |
case 0: |
|
338 |
/* Assume the result that the kernel gave us is the |
|
339 |
first with enough free space, so start again at the |
|
340 |
next higher target page. */ |
|
341 |
addr = TARGET_PAGE_ALIGN(addr); |
|
342 |
break; |
|
343 |
case 1: |
|
344 |
/* Sometimes the kernel decides to perform the allocation |
|
345 |
at the top end of memory instead. */ |
|
346 |
addr &= TARGET_PAGE_MASK; |
|
347 |
break; |
|
348 |
case 2: |
|
349 |
/* Start over at low memory. */ |
|
350 |
addr = 0; |
|
351 |
break; |
|
352 |
default: |
|
353 |
/* Fail. This unaligned block must the last. */ |
|
354 |
addr = -1; |
|
355 |
break; |
|
356 |
} |
|
357 |
} else { |
|
358 |
/* Since the result the kernel gave didn't fit, start |
|
359 |
again at low memory. If any repetition, fail. */ |
|
360 |
addr = (repeat ? -1 : 0); |
|
361 |
} |
|
362 |
|
|
363 |
/* Unmap and try again. */ |
|
316 | 364 |
munmap(ptr, size); |
317 |
addr += qemu_host_page_size; |
|
318 | 365 |
|
319 |
/* ENOMEM if we check whole of target address space */
|
|
320 |
if (addr == start)
|
|
366 |
/* ENOMEM if we checked the whole of the target address space. */
|
|
367 |
if (addr == -1ul) {
|
|
321 | 368 |
return (abi_ulong)-1; |
369 |
} else if (addr == 0) { |
|
370 |
if (wrapped) { |
|
371 |
return (abi_ulong)-1; |
|
372 |
} |
|
373 |
wrapped = 1; |
|
374 |
/* Don't actually use 0 when wrapping, instead indicate |
|
375 |
that we'd truely like an allocation in low memory. */ |
|
376 |
addr = (mmap_min_addr > TARGET_PAGE_SIZE |
|
377 |
? TARGET_PAGE_ALIGN(mmap_min_addr) |
|
378 |
: TARGET_PAGE_SIZE); |
|
379 |
} else if (wrapped && addr >= start) { |
|
380 |
return (abi_ulong)-1; |
|
381 |
} |
|
322 | 382 |
} |
323 |
|
|
324 |
/* Update default start address */ |
|
325 |
if (start == mmap_next_start) |
|
326 |
mmap_next_start = (unsigned long)ptr + size; |
|
327 |
|
|
328 |
return h2g(ptr); |
|
329 | 383 |
} |
330 | 384 |
|
331 | 385 |
/* NOTE: all the constants are the HOST ones */ |
Also available in: Unified diff