Revision 2e9a5713

b/cpu-all.h
742 742
/* original state of the write flag (used when tracking self-modifying
743 743
   code */
744 744
#define PAGE_WRITE_ORG 0x0010
745
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
746
/* FIXME: Code that sets/uses this is broken and needs to go away.  */
745 747
#define PAGE_RESERVED  0x0020
748
#endif
746 749

  
747 750
#if defined(CONFIG_USER_ONLY)
748 751
void page_dump(FILE *f);
b/exec.c
288 288
        qemu_host_page_bits++;
289 289
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
290 290

  
291
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
291
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
292 292
    {
293 293
#ifdef HAVE_KINFO_GETVMMAP
294 294
        struct kinfo_vmentry *freep;
......
324 324

  
325 325
        last_brk = (unsigned long)sbrk(0);
326 326

  
327
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
328 327
        f = fopen("/compat/linux/proc/self/maps", "r");
329
#else
330
        f = fopen("/proc/self/maps", "r");
331
#endif
332 328
        if (f) {
333 329
            mmap_lock();
334 330

  
......
365 361
    int i;
366 362

  
367 363
#if defined(CONFIG_USER_ONLY)
368
    /* We can't use qemu_malloc because it may recurse into a locked mutex.
369
       Neither can we record the new pages we reserve while allocating a
370
       given page because that may recurse into an unallocated page table
371
       entry.  Stuff the allocations we do make into a queue and process
372
       them after having completed one entire page table allocation.  */
373

  
374
    unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
375
    int reserve_idx = 0;
376

  
364
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
377 365
# define ALLOC(P, SIZE)                                 \
378 366
    do {                                                \
379 367
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
380 368
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
381
        if (h2g_valid(P)) {                             \
382
            reserve[reserve_idx] = h2g(P);              \
383
            reserve[reserve_idx + 1] = SIZE;            \
384
            reserve_idx += 2;                           \
385
        }                                               \
386 369
    } while (0)
387 370
#else
388 371
# define ALLOC(P, SIZE) \
......
417 400
    }
418 401

  
419 402
#undef ALLOC
420
#if defined(CONFIG_USER_ONLY)
421
    for (i = 0; i < reserve_idx; i += 2) {
422
        unsigned long addr = reserve[i];
423
        unsigned long len = reserve[i + 1];
424

  
425
        page_set_flags(addr & TARGET_PAGE_MASK,
426
                       TARGET_PAGE_ALIGN(addr + len),
427
                       PAGE_RESERVED);
428
    }
429
#endif
430 403

  
431 404
    return pd + (index & (L2_SIZE - 1));
432 405
}
b/linux-user/elfload.c
2159 2159
{
2160 2160
    struct mm_struct *mm = (struct mm_struct *)priv;
2161 2161

  
2162
    /*
2163
     * Don't dump anything that qemu has reserved for internal use.
2164
     */
2165
    if (flags & PAGE_RESERVED)
2166
        return (0);
2167

  
2168 2162
    vma_add_mapping(mm, start, end, flags);
2169 2163
    return (0);
2170 2164
}
b/linux-user/mmap.c
85 85
    /* Use map and mark the pages as used.  */
86 86
    p = mmap(NULL, size, PROT_READ | PROT_WRITE,
87 87
             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
88

  
89
    if (h2g_valid(p)) {
90
        /* Allocated region overlaps guest address space. This may recurse.  */
91
        abi_ulong addr = h2g(p);
92
        page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
93
                       PAGE_RESERVED);
94
    }
95

  
96 88
    mmap_unlock();
97 89
    return p;
98 90
}
......
484 476
        }
485 477
        start = h2g(host_start);
486 478
    } else {
487
        int flg;
488
        target_ulong addr;
489

  
490 479
        if (start & ~TARGET_PAGE_MASK) {
491 480
            errno = EINVAL;
492 481
            goto fail;
......
504 493
            goto fail;
505 494
        }
506 495

  
507
        for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
508
            flg = page_get_flags(addr);
509
            if (flg & PAGE_RESERVED) {
510
                errno = ENXIO;
511
                goto fail;
512
            }
513
        }
514

  
515 496
        /* worst case: we cannot map the file because the offset is not
516 497
           aligned, so we read it */
517 498
        if (!(flags & MAP_ANONYMOUS) &&

Also available in: Unified diff