Statistics
| Branch: | Revision:

root / linux-user / mmap.c @ ae017a5b

History | View | Annotate | Download (23.6 kB)

1
/*
2
 *  mmap support for qemu
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 *  This program is free software; you can redistribute it and/or modify
7
 *  it under the terms of the GNU General Public License as published by
8
 *  the Free Software Foundation; either version 2 of the License, or
9
 *  (at your option) any later version.
10
 *
11
 *  This program is distributed in the hope that it will be useful,
12
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 *  GNU General Public License for more details.
15
 *
16
 *  You should have received a copy of the GNU General Public License
17
 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include <stdio.h>
21
#include <stdarg.h>
22
#include <string.h>
23
#include <unistd.h>
24
#include <errno.h>
25
#include <sys/types.h>
26
#include <sys/stat.h>
27
#include <sys/mman.h>
28
#include <linux/mman.h>
29
#include <linux/unistd.h>
30

    
31
#include "qemu.h"
32
#include "qemu-common.h"
33

    
34
//#define DEBUG_MMAP
35

    
36
#if defined(CONFIG_USE_NPTL)
37
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38
static __thread int mmap_lock_count;
39

    
40
void mmap_lock(void)
41
{
42
    if (mmap_lock_count++ == 0) {
43
        pthread_mutex_lock(&mmap_mutex);
44
    }
45
}
46

    
47
void mmap_unlock(void)
48
{
49
    if (--mmap_lock_count == 0) {
50
        pthread_mutex_unlock(&mmap_mutex);
51
    }
52
}
53

    
54
/* Grab lock to make sure things are in a consistent state after fork().  */
55
void mmap_fork_start(void)
56
{
57
    if (mmap_lock_count)
58
        abort();
59
    pthread_mutex_lock(&mmap_mutex);
60
}
61

    
62
void mmap_fork_end(int child)
63
{
64
    if (child)
65
        pthread_mutex_init(&mmap_mutex, NULL);
66
    else
67
        pthread_mutex_unlock(&mmap_mutex);
68
}
69
#else
70
/* We aren't threadsafe to start with, so no need to worry about locking.  */
71
void mmap_lock(void)
72
{
73
}
74

    
75
void mmap_unlock(void)
76
{
77
}
78
#endif
79

    
80
/* NOTE: all the constants are the HOST ones, but addresses are target. */
81
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
82
{
83
    abi_ulong end, host_start, host_end, addr;
84
    int prot1, ret;
85

    
86
#ifdef DEBUG_MMAP
87
    printf("mprotect: start=0x" TARGET_ABI_FMT_lx
88
           "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
89
           prot & PROT_READ ? 'r' : '-',
90
           prot & PROT_WRITE ? 'w' : '-',
91
           prot & PROT_EXEC ? 'x' : '-');
92
#endif
93

    
94
    if ((start & ~TARGET_PAGE_MASK) != 0)
95
        return -EINVAL;
96
    len = TARGET_PAGE_ALIGN(len);
97
    end = start + len;
98
    if (end < start)
99
        return -EINVAL;
100
    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
101
    if (len == 0)
102
        return 0;
103

    
104
    mmap_lock();
105
    host_start = start & qemu_host_page_mask;
106
    host_end = HOST_PAGE_ALIGN(end);
107
    if (start > host_start) {
108
        /* handle host page containing start */
109
        prot1 = prot;
110
        for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
111
            prot1 |= page_get_flags(addr);
112
        }
113
        if (host_end == host_start + qemu_host_page_size) {
114
            for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
115
                prot1 |= page_get_flags(addr);
116
            }
117
            end = host_end;
118
        }
119
        ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
120
        if (ret != 0)
121
            goto error;
122
        host_start += qemu_host_page_size;
123
    }
124
    if (end < host_end) {
125
        prot1 = prot;
126
        for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
127
            prot1 |= page_get_flags(addr);
128
        }
129
        ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
130
                       prot1 & PAGE_BITS);
131
        if (ret != 0)
132
            goto error;
133
        host_end -= qemu_host_page_size;
134
    }
135

    
136
    /* handle the pages in the middle */
137
    if (host_start < host_end) {
138
        ret = mprotect(g2h(host_start), host_end - host_start, prot);
139
        if (ret != 0)
140
            goto error;
141
    }
142
    page_set_flags(start, start + len, prot | PAGE_VALID);
143
    mmap_unlock();
144
    return 0;
145
error:
146
    mmap_unlock();
147
    return ret;
148
}
149

    
150
/* map an incomplete host page */
151
static int mmap_frag(abi_ulong real_start,
152
                     abi_ulong start, abi_ulong end,
153
                     int prot, int flags, int fd, abi_ulong offset)
154
{
155
    abi_ulong real_end, addr;
156
    void *host_start;
157
    int prot1, prot_new;
158

    
159
    real_end = real_start + qemu_host_page_size;
160
    host_start = g2h(real_start);
161

    
162
    /* get the protection of the target pages outside the mapping */
163
    prot1 = 0;
164
    for(addr = real_start; addr < real_end; addr++) {
165
        if (addr < start || addr >= end)
166
            prot1 |= page_get_flags(addr);
167
    }
168

    
169
    if (prot1 == 0) {
170
        /* no page was there, so we allocate one */
171
        void *p = mmap(host_start, qemu_host_page_size, prot,
172
                       flags | MAP_ANONYMOUS, -1, 0);
173
        if (p == MAP_FAILED)
174
            return -1;
175
        prot1 = prot;
176
    }
177
    prot1 &= PAGE_BITS;
178

    
179
    prot_new = prot | prot1;
180
    if (!(flags & MAP_ANONYMOUS)) {
181
        /* msync() won't work here, so we return an error if write is
182
           possible while it is a shared mapping */
183
        if ((flags & MAP_TYPE) == MAP_SHARED &&
184
            (prot & PROT_WRITE))
185
            return -1;
186

    
187
        /* adjust protection to be able to read */
188
        if (!(prot1 & PROT_WRITE))
189
            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190

    
191
        /* read the corresponding file data */
192
        if (pread(fd, g2h(start), end - start, offset) == -1)
193
            return -1;
194

    
195
        /* put final protection */
196
        if (prot_new != (prot1 | PROT_WRITE))
197
            mprotect(host_start, qemu_host_page_size, prot_new);
198
    } else {
199
        /* just update the protection */
200
        if (prot_new != prot1) {
201
            mprotect(host_start, qemu_host_page_size, prot_new);
202
        }
203
    }
204
    return 0;
205
}
206

    
207
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
208
# define TASK_UNMAPPED_BASE  (1ul << 38)
209
#elif defined(__CYGWIN__)
210
/* Cygwin doesn't have a whole lot of address space.  */
211
# define TASK_UNMAPPED_BASE  0x18000000
212
#else
213
# define TASK_UNMAPPED_BASE  0x40000000
214
#endif
215
abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216

    
217
unsigned long last_brk;
218

    
219
#ifdef CONFIG_USE_GUEST_BASE
220
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
221
   of guest address space.  */
222
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
223
{
224
    abi_ulong addr;
225
    abi_ulong end_addr;
226
    int prot;
227
    int looped = 0;
228

    
229
    if (size > RESERVED_VA) {
230
        return (abi_ulong)-1;
231
    }
232

    
233
    size = HOST_PAGE_ALIGN(size);
234
    end_addr = start + size;
235
    if (end_addr > RESERVED_VA) {
236
        end_addr = RESERVED_VA;
237
    }
238
    addr = end_addr - qemu_host_page_size;
239

    
240
    while (1) {
241
        if (addr > end_addr) {
242
            if (looped) {
243
                return (abi_ulong)-1;
244
            }
245
            end_addr = RESERVED_VA;
246
            addr = end_addr - qemu_host_page_size;
247
            looped = 1;
248
            continue;
249
        }
250
        prot = page_get_flags(addr);
251
        if (prot) {
252
            end_addr = addr;
253
        }
254
        if (addr + size == end_addr) {
255
            break;
256
        }
257
        addr -= qemu_host_page_size;
258
    }
259

    
260
    if (start == mmap_next_start) {
261
        mmap_next_start = addr;
262
    }
263

    
264
    return addr;
265
}
266
#endif
267

    
268
/*
269
 * Find and reserve a free memory area of size 'size'. The search
270
 * starts at 'start'.
271
 * It must be called with mmap_lock() held.
272
 * Return -1 if error.
273
 */
274
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
275
{
276
    void *ptr, *prev;
277
    abi_ulong addr;
278
    int wrapped, repeat;
279

    
280
    /* If 'start' == 0, then a default start address is used. */
281
    if (start == 0) {
282
        start = mmap_next_start;
283
    } else {
284
        start &= qemu_host_page_mask;
285
    }
286

    
287
    size = HOST_PAGE_ALIGN(size);
288

    
289
#ifdef CONFIG_USE_GUEST_BASE
290
    if (RESERVED_VA) {
291
        return mmap_find_vma_reserved(start, size);
292
    }
293
#endif
294

    
295
    addr = start;
296
    wrapped = repeat = 0;
297
    prev = 0;
298

    
299
    for (;; prev = ptr) {
300
        /*
301
         * Reserve needed memory area to avoid a race.
302
         * It should be discarded using:
303
         *  - mmap() with MAP_FIXED flag
304
         *  - mremap() with MREMAP_FIXED flag
305
         *  - shmat() with SHM_REMAP flag
306
         */
307
        ptr = mmap(g2h(addr), size, PROT_NONE,
308
                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
309

    
310
        /* ENOMEM, if host address space has no memory */
311
        if (ptr == MAP_FAILED) {
312
            return (abi_ulong)-1;
313
        }
314

    
315
        /* Count the number of sequential returns of the same address.
316
           This is used to modify the search algorithm below.  */
317
        repeat = (ptr == prev ? repeat + 1 : 0);
318

    
319
        if (h2g_valid(ptr + size - 1)) {
320
            addr = h2g(ptr);
321

    
322
            if ((addr & ~TARGET_PAGE_MASK) == 0) {
323
                /* Success.  */
324
                if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
325
                    mmap_next_start = addr + size;
326
                }
327
                return addr;
328
            }
329

    
330
            /* The address is not properly aligned for the target.  */
331
            switch (repeat) {
332
            case 0:
333
                /* Assume the result that the kernel gave us is the
334
                   first with enough free space, so start again at the
335
                   next higher target page.  */
336
                addr = TARGET_PAGE_ALIGN(addr);
337
                break;
338
            case 1:
339
                /* Sometimes the kernel decides to perform the allocation
340
                   at the top end of memory instead.  */
341
                addr &= TARGET_PAGE_MASK;
342
                break;
343
            case 2:
344
                /* Start over at low memory.  */
345
                addr = 0;
346
                break;
347
            default:
348
                /* Fail.  This unaligned block must the last.  */
349
                addr = -1;
350
                break;
351
            }
352
        } else {
353
            /* Since the result the kernel gave didn't fit, start
354
               again at low memory.  If any repetition, fail.  */
355
            addr = (repeat ? -1 : 0);
356
        }
357

    
358
        /* Unmap and try again.  */
359
        munmap(ptr, size);
360

    
361
        /* ENOMEM if we checked the whole of the target address space.  */
362
        if (addr == (abi_ulong)-1) {
363
            return (abi_ulong)-1;
364
        } else if (addr == 0) {
365
            if (wrapped) {
366
                return (abi_ulong)-1;
367
            }
368
            wrapped = 1;
369
            /* Don't actually use 0 when wrapping, instead indicate
370
               that we'd truly like an allocation in low memory.  */
371
            addr = (mmap_min_addr > TARGET_PAGE_SIZE
372
                     ? TARGET_PAGE_ALIGN(mmap_min_addr)
373
                     : TARGET_PAGE_SIZE);
374
        } else if (wrapped && addr >= start) {
375
            return (abi_ulong)-1;
376
        }
377
    }
378
}
379

    
380
/* NOTE: all the constants are the HOST ones */
381
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
382
                     int flags, int fd, abi_ulong offset)
383
{
384
    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
385

    
386
    mmap_lock();
387
#ifdef DEBUG_MMAP
388
    {
389
        printf("mmap: start=0x" TARGET_ABI_FMT_lx
390
               " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
391
               start, len,
392
               prot & PROT_READ ? 'r' : '-',
393
               prot & PROT_WRITE ? 'w' : '-',
394
               prot & PROT_EXEC ? 'x' : '-');
395
        if (flags & MAP_FIXED)
396
            printf("MAP_FIXED ");
397
        if (flags & MAP_ANONYMOUS)
398
            printf("MAP_ANON ");
399
        switch(flags & MAP_TYPE) {
400
        case MAP_PRIVATE:
401
            printf("MAP_PRIVATE ");
402
            break;
403
        case MAP_SHARED:
404
            printf("MAP_SHARED ");
405
            break;
406
        default:
407
            printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
408
            break;
409
        }
410
        printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
411
    }
412
#endif
413

    
414
    if (offset & ~TARGET_PAGE_MASK) {
415
        errno = EINVAL;
416
        goto fail;
417
    }
418

    
419
    len = TARGET_PAGE_ALIGN(len);
420
    if (len == 0)
421
        goto the_end;
422
    real_start = start & qemu_host_page_mask;
423
    host_offset = offset & qemu_host_page_mask;
424

    
425
    /* If the user is asking for the kernel to find a location, do that
426
       before we truncate the length for mapping files below.  */
427
    if (!(flags & MAP_FIXED)) {
428
        host_len = len + offset - host_offset;
429
        host_len = HOST_PAGE_ALIGN(host_len);
430
        start = mmap_find_vma(real_start, host_len);
431
        if (start == (abi_ulong)-1) {
432
            errno = ENOMEM;
433
            goto fail;
434
        }
435
    }
436

    
437
    /* When mapping files into a memory area larger than the file, accesses
438
       to pages beyond the file size will cause a SIGBUS. 
439

440
       For example, if mmaping a file of 100 bytes on a host with 4K pages
441
       emulating a target with 8K pages, the target expects to be able to
442
       access the first 8K. But the host will trap us on any access beyond
443
       4K.  
444

445
       When emulating a target with a larger page-size than the hosts, we
446
       may need to truncate file maps at EOF and add extra anonymous pages
447
       up to the targets page boundary.  */
448

    
449
    if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
450
        && !(flags & MAP_ANONYMOUS)) {
451
       struct stat sb;
452

    
453
       if (fstat (fd, &sb) == -1)
454
           goto fail;
455

    
456
       /* Are we trying to create a map beyond EOF?.  */
457
       if (offset + len > sb.st_size) {
458
           /* If so, truncate the file map at eof aligned with 
459
              the hosts real pagesize. Additional anonymous maps
460
              will be created beyond EOF.  */
461
           len = (sb.st_size - offset);
462
           len += qemu_real_host_page_size - 1;
463
           len &= ~(qemu_real_host_page_size - 1);
464
       }
465
    }
466

    
467
    if (!(flags & MAP_FIXED)) {
468
        unsigned long host_start;
469
        void *p;
470

    
471
        host_len = len + offset - host_offset;
472
        host_len = HOST_PAGE_ALIGN(host_len);
473

    
474
        /* Note: we prefer to control the mapping address. It is
475
           especially important if qemu_host_page_size >
476
           qemu_real_host_page_size */
477
        p = mmap(g2h(start), host_len, prot,
478
                 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
479
        if (p == MAP_FAILED)
480
            goto fail;
481
        /* update start so that it points to the file position at 'offset' */
482
        host_start = (unsigned long)p;
483
        if (!(flags & MAP_ANONYMOUS)) {
484
            p = mmap(g2h(start), len, prot,
485
                     flags | MAP_FIXED, fd, host_offset);
486
            host_start += offset - host_offset;
487
        }
488
        start = h2g(host_start);
489
    } else {
490
        if (start & ~TARGET_PAGE_MASK) {
491
            errno = EINVAL;
492
            goto fail;
493
        }
494
        end = start + len;
495
        real_end = HOST_PAGE_ALIGN(end);
496

    
497
        /*
498
         * Test if requested memory area fits target address space
499
         * It can fail only on 64-bit host with 32-bit target.
500
         * On any other target/host host mmap() handles this error correctly.
501
         */
502
        if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
503
            errno = EINVAL;
504
            goto fail;
505
        }
506

    
507
        /* worst case: we cannot map the file because the offset is not
508
           aligned, so we read it */
509
        if (!(flags & MAP_ANONYMOUS) &&
510
            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
511
            /* msync() won't work here, so we return an error if write is
512
               possible while it is a shared mapping */
513
            if ((flags & MAP_TYPE) == MAP_SHARED &&
514
                (prot & PROT_WRITE)) {
515
                errno = EINVAL;
516
                goto fail;
517
            }
518
            retaddr = target_mmap(start, len, prot | PROT_WRITE,
519
                                  MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
520
                                  -1, 0);
521
            if (retaddr == -1)
522
                goto fail;
523
            if (pread(fd, g2h(start), len, offset) == -1)
524
                goto fail;
525
            if (!(prot & PROT_WRITE)) {
526
                ret = target_mprotect(start, len, prot);
527
                if (ret != 0) {
528
                    start = ret;
529
                    goto the_end;
530
                }
531
            }
532
            goto the_end;
533
        }
534
        
535
        /* handle the start of the mapping */
536
        if (start > real_start) {
537
            if (real_end == real_start + qemu_host_page_size) {
538
                /* one single host page */
539
                ret = mmap_frag(real_start, start, end,
540
                                prot, flags, fd, offset);
541
                if (ret == -1)
542
                    goto fail;
543
                goto the_end1;
544
            }
545
            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
546
                            prot, flags, fd, offset);
547
            if (ret == -1)
548
                goto fail;
549
            real_start += qemu_host_page_size;
550
        }
551
        /* handle the end of the mapping */
552
        if (end < real_end) {
553
            ret = mmap_frag(real_end - qemu_host_page_size,
554
                            real_end - qemu_host_page_size, real_end,
555
                            prot, flags, fd,
556
                            offset + real_end - qemu_host_page_size - start);
557
            if (ret == -1)
558
                goto fail;
559
            real_end -= qemu_host_page_size;
560
        }
561

    
562
        /* map the middle (easier) */
563
        if (real_start < real_end) {
564
            void *p;
565
            unsigned long offset1;
566
            if (flags & MAP_ANONYMOUS)
567
                offset1 = 0;
568
            else
569
                offset1 = offset + real_start - start;
570
            p = mmap(g2h(real_start), real_end - real_start,
571
                     prot, flags, fd, offset1);
572
            if (p == MAP_FAILED)
573
                goto fail;
574
        }
575
    }
576
 the_end1:
577
    page_set_flags(start, start + len, prot | PAGE_VALID);
578
 the_end:
579
#ifdef DEBUG_MMAP
580
    printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
581
    page_dump(stdout);
582
    printf("\n");
583
#endif
584
    tb_invalidate_phys_range(start, start + len, 0);
585
    mmap_unlock();
586
    return start;
587
fail:
588
    mmap_unlock();
589
    return -1;
590
}
591

    
592
static void mmap_reserve(abi_ulong start, abi_ulong size)
593
{
594
    abi_ulong real_start;
595
    abi_ulong real_end;
596
    abi_ulong addr;
597
    abi_ulong end;
598
    int prot;
599

    
600
    real_start = start & qemu_host_page_mask;
601
    real_end = HOST_PAGE_ALIGN(start + size);
602
    end = start + size;
603
    if (start > real_start) {
604
        /* handle host page containing start */
605
        prot = 0;
606
        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
607
            prot |= page_get_flags(addr);
608
        }
609
        if (real_end == real_start + qemu_host_page_size) {
610
            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
611
                prot |= page_get_flags(addr);
612
            }
613
            end = real_end;
614
        }
615
        if (prot != 0)
616
            real_start += qemu_host_page_size;
617
    }
618
    if (end < real_end) {
619
        prot = 0;
620
        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
621
            prot |= page_get_flags(addr);
622
        }
623
        if (prot != 0)
624
            real_end -= qemu_host_page_size;
625
    }
626
    if (real_start != real_end) {
627
        mmap(g2h(real_start), real_end - real_start, PROT_NONE,
628
                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
629
                 -1, 0);
630
    }
631
}
632

    
633
int target_munmap(abi_ulong start, abi_ulong len)
634
{
635
    abi_ulong end, real_start, real_end, addr;
636
    int prot, ret;
637

    
638
#ifdef DEBUG_MMAP
639
    printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
640
           TARGET_ABI_FMT_lx "\n",
641
           start, len);
642
#endif
643
    if (start & ~TARGET_PAGE_MASK)
644
        return -EINVAL;
645
    len = TARGET_PAGE_ALIGN(len);
646
    if (len == 0)
647
        return -EINVAL;
648
    mmap_lock();
649
    end = start + len;
650
    real_start = start & qemu_host_page_mask;
651
    real_end = HOST_PAGE_ALIGN(end);
652

    
653
    if (start > real_start) {
654
        /* handle host page containing start */
655
        prot = 0;
656
        for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
657
            prot |= page_get_flags(addr);
658
        }
659
        if (real_end == real_start + qemu_host_page_size) {
660
            for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
661
                prot |= page_get_flags(addr);
662
            }
663
            end = real_end;
664
        }
665
        if (prot != 0)
666
            real_start += qemu_host_page_size;
667
    }
668
    if (end < real_end) {
669
        prot = 0;
670
        for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
671
            prot |= page_get_flags(addr);
672
        }
673
        if (prot != 0)
674
            real_end -= qemu_host_page_size;
675
    }
676

    
677
    ret = 0;
678
    /* unmap what we can */
679
    if (real_start < real_end) {
680
        if (RESERVED_VA) {
681
            mmap_reserve(real_start, real_end - real_start);
682
        } else {
683
            ret = munmap(g2h(real_start), real_end - real_start);
684
        }
685
    }
686

    
687
    if (ret == 0) {
688
        page_set_flags(start, start + len, 0);
689
        tb_invalidate_phys_range(start, start + len, 0);
690
    }
691
    mmap_unlock();
692
    return ret;
693
}
694

    
695
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
696
                       abi_ulong new_size, unsigned long flags,
697
                       abi_ulong new_addr)
698
{
699
    int prot;
700
    void *host_addr;
701

    
702
    mmap_lock();
703

    
704
    if (flags & MREMAP_FIXED) {
705
        host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
706
                                     old_size, new_size,
707
                                     flags,
708
                                     g2h(new_addr));
709

    
710
        if (RESERVED_VA && host_addr != MAP_FAILED) {
711
            /* If new and old addresses overlap then the above mremap will
712
               already have failed with EINVAL.  */
713
            mmap_reserve(old_addr, old_size);
714
        }
715
    } else if (flags & MREMAP_MAYMOVE) {
716
        abi_ulong mmap_start;
717

    
718
        mmap_start = mmap_find_vma(0, new_size);
719

    
720
        if (mmap_start == -1) {
721
            errno = ENOMEM;
722
            host_addr = MAP_FAILED;
723
        } else {
724
            host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
725
                                         old_size, new_size,
726
                                         flags | MREMAP_FIXED,
727
                                         g2h(mmap_start));
728
            if ( RESERVED_VA ) {
729
                mmap_reserve(old_addr, old_size);
730
            }
731
        }
732
    } else {
733
        int prot = 0;
734
        if (RESERVED_VA && old_size < new_size) {
735
            abi_ulong addr;
736
            for (addr = old_addr + old_size;
737
                 addr < old_addr + new_size;
738
                 addr++) {
739
                prot |= page_get_flags(addr);
740
            }
741
        }
742
        if (prot == 0) {
743
            host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
744
            if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
745
                mmap_reserve(old_addr + old_size, new_size - old_size);
746
            }
747
        } else {
748
            errno = ENOMEM;
749
            host_addr = MAP_FAILED;
750
        }
751
        /* Check if address fits target address space */
752
        if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
753
            /* Revert mremap() changes */
754
            host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
755
            errno = ENOMEM;
756
            host_addr = MAP_FAILED;
757
        }
758
    }
759

    
760
    if (host_addr == MAP_FAILED) {
761
        new_addr = -1;
762
    } else {
763
        new_addr = h2g(host_addr);
764
        prot = page_get_flags(old_addr);
765
        page_set_flags(old_addr, old_addr + old_size, 0);
766
        page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
767
    }
768
    tb_invalidate_phys_range(new_addr, new_addr + new_size, 0);
769
    mmap_unlock();
770
    return new_addr;
771
}
772

    
773
int target_msync(abi_ulong start, abi_ulong len, int flags)
774
{
775
    abi_ulong end;
776

    
777
    if (start & ~TARGET_PAGE_MASK)
778
        return -EINVAL;
779
    len = TARGET_PAGE_ALIGN(len);
780
    end = start + len;
781
    if (end < start)
782
        return -EINVAL;
783
    if (end == start)
784
        return 0;
785

    
786
    start &= qemu_host_page_mask;
787
    return msync(g2h(start), end - start, flags);
788
}