Statistics
| Branch: | Revision:

root / translate-all.c @ 907a5e32

History | View | Annotate | Download (55.5 kB)

1
/*
2
 *  Host code generation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#ifdef _WIN32
20
#include <windows.h>
21
#else
22
#include <sys/types.h>
23
#include <sys/mman.h>
24
#endif
25
#include <stdarg.h>
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <string.h>
29
#include <inttypes.h>
30

    
31
#include "config.h"
32

    
33
#include "qemu-common.h"
34
#define NO_CPU_IO_DEFS
35
#include "cpu.h"
36
#include "disas/disas.h"
37
#include "tcg.h"
38
#include "qemu/timer.h"
39
#include "exec/memory.h"
40
#include "exec/address-spaces.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include "qemu.h"
43
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
44
#include <sys/param.h>
45
#if __FreeBSD_version >= 700104
46
#define HAVE_KINFO_GETVMMAP
47
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
48
#include <sys/time.h>
49
#include <sys/proc.h>
50
#include <machine/profile.h>
51
#define _KERNEL
52
#include <sys/user.h>
53
#undef _KERNEL
54
#undef sigqueue
55
#include <libutil.h>
56
#endif
57
#endif
58
#endif
59

    
60
#include "exec/cputlb.h"
61
#include "translate-all.h"
62

    
63
//#define DEBUG_TB_INVALIDATE
64
//#define DEBUG_FLUSH
65
/* make various TB consistency checks */
66
//#define DEBUG_TB_CHECK
67

    
68
#if !defined(CONFIG_USER_ONLY)
69
/* TB consistency checks only implemented for usermode emulation.  */
70
#undef DEBUG_TB_CHECK
71
#endif
72

    
73
#define SMC_BITMAP_USE_THRESHOLD 10
74

    
75
typedef struct PageDesc {
76
    /* list of TBs intersecting this ram page */
77
    TranslationBlock *first_tb;
78
    /* in order to optimize self modifying code, we count the number
79
       of lookups we do to a given page to use a bitmap */
80
    unsigned int code_write_count;
81
    uint8_t *code_bitmap;
82
#if defined(CONFIG_USER_ONLY)
83
    unsigned long flags;
84
#endif
85
} PageDesc;
86

    
87
/* In system mode we want L1_MAP to be based on ram offsets,
88
   while in user mode we want it to be based on virtual addresses.  */
89
#if !defined(CONFIG_USER_ONLY)
90
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
92
#else
93
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
94
#endif
95
#else
96
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
97
#endif
98

    
99
/* The bits remaining after N lower levels of page tables.  */
100
#define V_L1_BITS_REM \
101
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
102

    
103
#if V_L1_BITS_REM < 4
104
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
105
#else
106
#define V_L1_BITS  V_L1_BITS_REM
107
#endif
108

    
109
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
110

    
111
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
112

    
113
uintptr_t qemu_real_host_page_size;
114
uintptr_t qemu_host_page_size;
115
uintptr_t qemu_host_page_mask;
116

    
117
/* This is a multi-level map on the virtual address space.
118
   The bottom level has pointers to PageDesc.  */
119
static void *l1_map[V_L1_SIZE];
120

    
121
/* code generation context */
122
TCGContext tcg_ctx;
123

    
124
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125
                         tb_page_addr_t phys_page2);
126
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
127

    
128
void cpu_gen_init(void)
129
{
130
    tcg_context_init(&tcg_ctx); 
131
}
132

    
133
/* return non zero if the very first instruction is invalid so that
134
   the virtual CPU can trigger an exception.
135

136
   '*gen_code_size_ptr' contains the size of the generated code (host
137
   code).
138
*/
139
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
140
{
141
    TCGContext *s = &tcg_ctx;
142
    uint8_t *gen_code_buf;
143
    int gen_code_size;
144
#ifdef CONFIG_PROFILER
145
    int64_t ti;
146
#endif
147

    
148
#ifdef CONFIG_PROFILER
149
    s->tb_count1++; /* includes aborted translations because of
150
                       exceptions */
151
    ti = profile_getclock();
152
#endif
153
    tcg_func_start(s);
154

    
155
    gen_intermediate_code(env, tb);
156

    
157
    /* generate machine code */
158
    gen_code_buf = tb->tc_ptr;
159
    tb->tb_next_offset[0] = 0xffff;
160
    tb->tb_next_offset[1] = 0xffff;
161
    s->tb_next_offset = tb->tb_next_offset;
162
#ifdef USE_DIRECT_JUMP
163
    s->tb_jmp_offset = tb->tb_jmp_offset;
164
    s->tb_next = NULL;
165
#else
166
    s->tb_jmp_offset = NULL;
167
    s->tb_next = tb->tb_next;
168
#endif
169

    
170
#ifdef CONFIG_PROFILER
171
    s->tb_count++;
172
    s->interm_time += profile_getclock() - ti;
173
    s->code_time -= profile_getclock();
174
#endif
175
    gen_code_size = tcg_gen_code(s, gen_code_buf);
176
    *gen_code_size_ptr = gen_code_size;
177
#ifdef CONFIG_PROFILER
178
    s->code_time += profile_getclock();
179
    s->code_in_len += tb->size;
180
    s->code_out_len += gen_code_size;
181
#endif
182

    
183
#ifdef DEBUG_DISAS
184
    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
185
        qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186
        log_disas(tb->tc_ptr, *gen_code_size_ptr);
187
        qemu_log("\n");
188
        qemu_log_flush();
189
    }
190
#endif
191
    return 0;
192
}
193

    
194
/* The cpu state corresponding to 'searched_pc' is restored.
195
 */
196
static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197
                                     uintptr_t searched_pc)
198
{
199
    TCGContext *s = &tcg_ctx;
200
    int j;
201
    uintptr_t tc_ptr;
202
#ifdef CONFIG_PROFILER
203
    int64_t ti;
204
#endif
205

    
206
#ifdef CONFIG_PROFILER
207
    ti = profile_getclock();
208
#endif
209
    tcg_func_start(s);
210

    
211
    gen_intermediate_code_pc(env, tb);
212

    
213
    if (use_icount) {
214
        /* Reset the cycle counter to the start of the block.  */
215
        env->icount_decr.u16.low += tb->icount;
216
        /* Clear the IO flag.  */
217
        env->can_do_io = 0;
218
    }
219

    
220
    /* find opc index corresponding to search_pc */
221
    tc_ptr = (uintptr_t)tb->tc_ptr;
222
    if (searched_pc < tc_ptr)
223
        return -1;
224

    
225
    s->tb_next_offset = tb->tb_next_offset;
226
#ifdef USE_DIRECT_JUMP
227
    s->tb_jmp_offset = tb->tb_jmp_offset;
228
    s->tb_next = NULL;
229
#else
230
    s->tb_jmp_offset = NULL;
231
    s->tb_next = tb->tb_next;
232
#endif
233
    j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
234
    if (j < 0)
235
        return -1;
236
    /* now find start of instruction before */
237
    while (s->gen_opc_instr_start[j] == 0) {
238
        j--;
239
    }
240
    env->icount_decr.u16.low -= s->gen_opc_icount[j];
241

    
242
    restore_state_to_opc(env, tb, j);
243

    
244
#ifdef CONFIG_PROFILER
245
    s->restore_time += profile_getclock() - ti;
246
    s->restore_count++;
247
#endif
248
    return 0;
249
}
250

    
251
bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
252
{
253
    TranslationBlock *tb;
254

    
255
    tb = tb_find_pc(retaddr);
256
    if (tb) {
257
        cpu_restore_state_from_tb(tb, env, retaddr);
258
        return true;
259
    }
260
    return false;
261
}
262

    
263
#ifdef _WIN32
264
static inline void map_exec(void *addr, long size)
265
{
266
    DWORD old_protect;
267
    VirtualProtect(addr, size,
268
                   PAGE_EXECUTE_READWRITE, &old_protect);
269
}
270
#else
271
static inline void map_exec(void *addr, long size)
272
{
273
    unsigned long start, end, page_size;
274

    
275
    page_size = getpagesize();
276
    start = (unsigned long)addr;
277
    start &= ~(page_size - 1);
278

    
279
    end = (unsigned long)addr + size;
280
    end += page_size - 1;
281
    end &= ~(page_size - 1);
282

    
283
    mprotect((void *)start, end - start,
284
             PROT_READ | PROT_WRITE | PROT_EXEC);
285
}
286
#endif
287

    
288
static void page_init(void)
289
{
290
    /* NOTE: we can always suppose that qemu_host_page_size >=
291
       TARGET_PAGE_SIZE */
292
#ifdef _WIN32
293
    {
294
        SYSTEM_INFO system_info;
295

    
296
        GetSystemInfo(&system_info);
297
        qemu_real_host_page_size = system_info.dwPageSize;
298
    }
299
#else
300
    qemu_real_host_page_size = getpagesize();
301
#endif
302
    if (qemu_host_page_size == 0) {
303
        qemu_host_page_size = qemu_real_host_page_size;
304
    }
305
    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306
        qemu_host_page_size = TARGET_PAGE_SIZE;
307
    }
308
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
309

    
310
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
311
    {
312
#ifdef HAVE_KINFO_GETVMMAP
313
        struct kinfo_vmentry *freep;
314
        int i, cnt;
315

    
316
        freep = kinfo_getvmmap(getpid(), &cnt);
317
        if (freep) {
318
            mmap_lock();
319
            for (i = 0; i < cnt; i++) {
320
                unsigned long startaddr, endaddr;
321

    
322
                startaddr = freep[i].kve_start;
323
                endaddr = freep[i].kve_end;
324
                if (h2g_valid(startaddr)) {
325
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326

    
327
                    if (h2g_valid(endaddr)) {
328
                        endaddr = h2g(endaddr);
329
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330
                    } else {
331
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332
                        endaddr = ~0ul;
333
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334
#endif
335
                    }
336
                }
337
            }
338
            free(freep);
339
            mmap_unlock();
340
        }
341
#else
342
        FILE *f;
343

    
344
        last_brk = (unsigned long)sbrk(0);
345

    
346
        f = fopen("/compat/linux/proc/self/maps", "r");
347
        if (f) {
348
            mmap_lock();
349

    
350
            do {
351
                unsigned long startaddr, endaddr;
352
                int n;
353

    
354
                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
355

    
356
                if (n == 2 && h2g_valid(startaddr)) {
357
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358

    
359
                    if (h2g_valid(endaddr)) {
360
                        endaddr = h2g(endaddr);
361
                    } else {
362
                        endaddr = ~0ul;
363
                    }
364
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
365
                }
366
            } while (!feof(f));
367

    
368
            fclose(f);
369
            mmap_unlock();
370
        }
371
#endif
372
    }
373
#endif
374
}
375

    
376
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
377
{
378
    PageDesc *pd;
379
    void **lp;
380
    int i;
381

    
382
#if defined(CONFIG_USER_ONLY)
383
    /* We can't use g_malloc because it may recurse into a locked mutex. */
384
# define ALLOC(P, SIZE)                                 \
385
    do {                                                \
386
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
387
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
388
    } while (0)
389
#else
390
# define ALLOC(P, SIZE) \
391
    do { P = g_malloc0(SIZE); } while (0)
392
#endif
393

    
394
    /* Level 1.  Always allocated.  */
395
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
396

    
397
    /* Level 2..N-1.  */
398
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399
        void **p = *lp;
400

    
401
        if (p == NULL) {
402
            if (!alloc) {
403
                return NULL;
404
            }
405
            ALLOC(p, sizeof(void *) * L2_SIZE);
406
            *lp = p;
407
        }
408

    
409
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
410
    }
411

    
412
    pd = *lp;
413
    if (pd == NULL) {
414
        if (!alloc) {
415
            return NULL;
416
        }
417
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418
        *lp = pd;
419
    }
420

    
421
#undef ALLOC
422

    
423
    return pd + (index & (L2_SIZE - 1));
424
}
425

    
426
static inline PageDesc *page_find(tb_page_addr_t index)
427
{
428
    return page_find_alloc(index, 0);
429
}
430

    
431
#if !defined(CONFIG_USER_ONLY)
432
#define mmap_lock() do { } while (0)
433
#define mmap_unlock() do { } while (0)
434
#endif
435

    
436
#if defined(CONFIG_USER_ONLY)
437
/* Currently it is not recommended to allocate big chunks of data in
438
   user mode. It will change when a dedicated libc will be used.  */
439
/* ??? 64-bit hosts ought to have no problem mmaping data outside the
440
   region in which the guest needs to run.  Revisit this.  */
441
#define USE_STATIC_CODE_GEN_BUFFER
442
#endif
443

    
444
/* ??? Should configure for this, not list operating systems here.  */
445
#if (defined(__linux__) \
446
    || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447
    || defined(__DragonFly__) || defined(__OpenBSD__) \
448
    || defined(__NetBSD__))
449
# define USE_MMAP
450
#endif
451

    
452
/* Minimum size of the code gen buffer.  This number is randomly chosen,
453
   but not so small that we can't have a fair number of TB's live.  */
454
#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
455

    
456
/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
457
   indicated, this is constrained by the range of direct branches on the
458
   host cpu, as used by the TCG implementation of goto_tb.  */
459
#if defined(__x86_64__)
460
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
461
#elif defined(__sparc__)
462
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
463
#elif defined(__arm__)
464
# define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
465
#elif defined(__s390x__)
466
  /* We have a +- 4GB range on the branches; leave some slop.  */
467
# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
468
#else
469
# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
470
#endif
471

    
472
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
473

    
474
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
475
  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
476
   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
477

    
478
static inline size_t size_code_gen_buffer(size_t tb_size)
479
{
480
    /* Size the buffer.  */
481
    if (tb_size == 0) {
482
#ifdef USE_STATIC_CODE_GEN_BUFFER
483
        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
484
#else
485
        /* ??? Needs adjustments.  */
486
        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
487
           static buffer, we could size this on RESERVED_VA, on the text
488
           segment size of the executable, or continue to use the default.  */
489
        tb_size = (unsigned long)(ram_size / 4);
490
#endif
491
    }
492
    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
493
        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
494
    }
495
    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
496
        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
497
    }
498
    tcg_ctx.code_gen_buffer_size = tb_size;
499
    return tb_size;
500
}
501

    
502
#ifdef USE_STATIC_CODE_GEN_BUFFER
503
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504
    __attribute__((aligned(CODE_GEN_ALIGN)));
505

    
506
static inline void *alloc_code_gen_buffer(void)
507
{
508
    map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
509
    return static_code_gen_buffer;
510
}
511
#elif defined(USE_MMAP)
512
static inline void *alloc_code_gen_buffer(void)
513
{
514
    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
515
    uintptr_t start = 0;
516
    void *buf;
517

    
518
    /* Constrain the position of the buffer based on the host cpu.
519
       Note that these addresses are chosen in concert with the
520
       addresses assigned in the relevant linker script file.  */
521
# if defined(__PIE__) || defined(__PIC__)
522
    /* Don't bother setting a preferred location if we're building
523
       a position-independent executable.  We're more likely to get
524
       an address near the main executable if we let the kernel
525
       choose the address.  */
526
# elif defined(__x86_64__) && defined(MAP_32BIT)
527
    /* Force the memory down into low memory with the executable.
528
       Leave the choice of exact location with the kernel.  */
529
    flags |= MAP_32BIT;
530
    /* Cannot expect to map more than 800MB in low memory.  */
531
    if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
532
        tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
533
    }
534
# elif defined(__sparc__)
535
    start = 0x40000000ul;
536
# elif defined(__s390x__)
537
    start = 0x90000000ul;
538
# endif
539

    
540
    buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
541
               PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
542
    return buf == MAP_FAILED ? NULL : buf;
543
}
544
#else
545
static inline void *alloc_code_gen_buffer(void)
546
{
547
    void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
548

    
549
    if (buf) {
550
        map_exec(buf, tcg_ctx.code_gen_buffer_size);
551
    }
552
    return buf;
553
}
554
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
555

    
556
static inline void code_gen_alloc(size_t tb_size)
557
{
558
    tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
559
    tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
560
    if (tcg_ctx.code_gen_buffer == NULL) {
561
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562
        exit(1);
563
    }
564

    
565
    qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
566
            QEMU_MADV_HUGEPAGE);
567

    
568
    /* Steal room for the prologue at the end of the buffer.  This ensures
569
       (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
570
       from TB's to the prologue are going to be in range.  It also means
571
       that we don't need to mark (additional) portions of the data segment
572
       as executable.  */
573
    tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
574
            tcg_ctx.code_gen_buffer_size - 1024;
575
    tcg_ctx.code_gen_buffer_size -= 1024;
576

    
577
    tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
578
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
579
    tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
580
            CODE_GEN_AVG_BLOCK_SIZE;
581
    tcg_ctx.tb_ctx.tbs =
582
            g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
583
}
584

    
585
/* Must be called before using the QEMU cpus. 'tb_size' is the size
586
   (in bytes) allocated to the translation buffer. Zero means default
587
   size. */
588
void tcg_exec_init(unsigned long tb_size)
589
{
590
    cpu_gen_init();
591
    code_gen_alloc(tb_size);
592
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
593
    tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
594
    page_init();
595
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
596
    /* There's no guest base to take into account, so go ahead and
597
       initialize the prologue now.  */
598
    tcg_prologue_init(&tcg_ctx);
599
#endif
600
}
601

    
602
bool tcg_enabled(void)
603
{
604
    return tcg_ctx.code_gen_buffer != NULL;
605
}
606

    
607
/* Allocate a new translation block. Flush the translation buffer if
608
   too many translation blocks or too much generated code. */
609
static TranslationBlock *tb_alloc(target_ulong pc)
610
{
611
    TranslationBlock *tb;
612

    
613
    if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
614
        (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
615
         tcg_ctx.code_gen_buffer_max_size) {
616
        return NULL;
617
    }
618
    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
619
    tb->pc = pc;
620
    tb->cflags = 0;
621
    return tb;
622
}
623

    
624
void tb_free(TranslationBlock *tb)
625
{
626
    /* In practice this is mostly used for single use temporary TB
627
       Ignore the hard cases and just back up if this TB happens to
628
       be the last one generated.  */
629
    if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
630
            tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
631
        tcg_ctx.code_gen_ptr = tb->tc_ptr;
632
        tcg_ctx.tb_ctx.nb_tbs--;
633
    }
634
}
635

    
636
static inline void invalidate_page_bitmap(PageDesc *p)
637
{
638
    if (p->code_bitmap) {
639
        g_free(p->code_bitmap);
640
        p->code_bitmap = NULL;
641
    }
642
    p->code_write_count = 0;
643
}
644

    
645
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
646
static void page_flush_tb_1(int level, void **lp)
647
{
648
    int i;
649

    
650
    if (*lp == NULL) {
651
        return;
652
    }
653
    if (level == 0) {
654
        PageDesc *pd = *lp;
655

    
656
        for (i = 0; i < L2_SIZE; ++i) {
657
            pd[i].first_tb = NULL;
658
            invalidate_page_bitmap(pd + i);
659
        }
660
    } else {
661
        void **pp = *lp;
662

    
663
        for (i = 0; i < L2_SIZE; ++i) {
664
            page_flush_tb_1(level - 1, pp + i);
665
        }
666
    }
667
}
668

    
669
static void page_flush_tb(void)
670
{
671
    int i;
672

    
673
    for (i = 0; i < V_L1_SIZE; i++) {
674
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
675
    }
676
}
677

    
678
/* flush all the translation blocks */
679
/* XXX: tb_flush is currently not thread safe */
680
void tb_flush(CPUArchState *env1)
681
{
682
    CPUArchState *env;
683

    
684
#if defined(DEBUG_FLUSH)
685
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
686
           (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
687
           tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
688
           ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
689
           tcg_ctx.tb_ctx.nb_tbs : 0);
690
#endif
691
    if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
692
        > tcg_ctx.code_gen_buffer_size) {
693
        cpu_abort(env1, "Internal error: code buffer overflow\n");
694
    }
695
    tcg_ctx.tb_ctx.nb_tbs = 0;
696

    
697
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
698
        memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
699
    }
700

    
701
    memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
702
            CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
703
    page_flush_tb();
704

    
705
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
706
    /* XXX: flush processor icache at this point if cache flush is
707
       expensive */
708
    tcg_ctx.tb_ctx.tb_flush_count++;
709
}
710

    
711
#ifdef DEBUG_TB_CHECK
712

    
713
static void tb_invalidate_check(target_ulong address)
714
{
715
    TranslationBlock *tb;
716
    int i;
717

    
718
    address &= TARGET_PAGE_MASK;
719
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
720
        for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
721
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
722
                  address >= tb->pc + tb->size)) {
723
                printf("ERROR invalidate: address=" TARGET_FMT_lx
724
                       " PC=%08lx size=%04x\n",
725
                       address, (long)tb->pc, tb->size);
726
            }
727
        }
728
    }
729
}
730

    
731
/* verify that all the pages have correct rights for code */
732
static void tb_page_check(void)
733
{
734
    TranslationBlock *tb;
735
    int i, flags1, flags2;
736

    
737
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
738
        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
739
                tb = tb->phys_hash_next) {
740
            flags1 = page_get_flags(tb->pc);
741
            flags2 = page_get_flags(tb->pc + tb->size - 1);
742
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
743
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
744
                       (long)tb->pc, tb->size, flags1, flags2);
745
            }
746
        }
747
    }
748
}
749

    
750
#endif
751

    
752
static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
753
{
754
    TranslationBlock *tb1;
755

    
756
    for (;;) {
757
        tb1 = *ptb;
758
        if (tb1 == tb) {
759
            *ptb = tb1->phys_hash_next;
760
            break;
761
        }
762
        ptb = &tb1->phys_hash_next;
763
    }
764
}
765

    
766
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
767
{
768
    TranslationBlock *tb1;
769
    unsigned int n1;
770

    
771
    for (;;) {
772
        tb1 = *ptb;
773
        n1 = (uintptr_t)tb1 & 3;
774
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
775
        if (tb1 == tb) {
776
            *ptb = tb1->page_next[n1];
777
            break;
778
        }
779
        ptb = &tb1->page_next[n1];
780
    }
781
}
782

    
783
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
784
{
785
    TranslationBlock *tb1, **ptb;
786
    unsigned int n1;
787

    
788
    ptb = &tb->jmp_next[n];
789
    tb1 = *ptb;
790
    if (tb1) {
791
        /* find tb(n) in circular list */
792
        for (;;) {
793
            tb1 = *ptb;
794
            n1 = (uintptr_t)tb1 & 3;
795
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
796
            if (n1 == n && tb1 == tb) {
797
                break;
798
            }
799
            if (n1 == 2) {
800
                ptb = &tb1->jmp_first;
801
            } else {
802
                ptb = &tb1->jmp_next[n1];
803
            }
804
        }
805
        /* now we can suppress tb(n) from the list */
806
        *ptb = tb->jmp_next[n];
807

    
808
        tb->jmp_next[n] = NULL;
809
    }
810
}
811

    
812
/* reset the jump entry 'n' of a TB so that it is not chained to
813
   another TB */
814
static inline void tb_reset_jump(TranslationBlock *tb, int n)
815
{
816
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
817
}
818

    
819
/* invalidate one TB */
820
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
821
{
822
    CPUArchState *env;
823
    PageDesc *p;
824
    unsigned int h, n1;
825
    tb_page_addr_t phys_pc;
826
    TranslationBlock *tb1, *tb2;
827

    
828
    /* remove the TB from the hash list */
829
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
830
    h = tb_phys_hash_func(phys_pc);
831
    tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
832

    
833
    /* remove the TB from the page list */
834
    if (tb->page_addr[0] != page_addr) {
835
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
836
        tb_page_remove(&p->first_tb, tb);
837
        invalidate_page_bitmap(p);
838
    }
839
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
840
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
841
        tb_page_remove(&p->first_tb, tb);
842
        invalidate_page_bitmap(p);
843
    }
844

    
845
    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
846

    
847
    /* remove the TB from the hash list */
848
    h = tb_jmp_cache_hash_func(tb->pc);
849
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
850
        if (env->tb_jmp_cache[h] == tb) {
851
            env->tb_jmp_cache[h] = NULL;
852
        }
853
    }
854

    
855
    /* suppress this TB from the two jump lists */
856
    tb_jmp_remove(tb, 0);
857
    tb_jmp_remove(tb, 1);
858

    
859
    /* suppress any remaining jumps to this TB */
860
    tb1 = tb->jmp_first;
861
    for (;;) {
862
        n1 = (uintptr_t)tb1 & 3;
863
        if (n1 == 2) {
864
            break;
865
        }
866
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
867
        tb2 = tb1->jmp_next[n1];
868
        tb_reset_jump(tb1, n1);
869
        tb1->jmp_next[n1] = NULL;
870
        tb1 = tb2;
871
    }
872
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
873

    
874
    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
875
}
876

    
877
static inline void set_bits(uint8_t *tab, int start, int len)
878
{
879
    int end, mask, end1;
880

    
881
    end = start + len;
882
    tab += start >> 3;
883
    mask = 0xff << (start & 7);
884
    if ((start & ~7) == (end & ~7)) {
885
        if (start < end) {
886
            mask &= ~(0xff << (end & 7));
887
            *tab |= mask;
888
        }
889
    } else {
890
        *tab++ |= mask;
891
        start = (start + 8) & ~7;
892
        end1 = end & ~7;
893
        while (start < end1) {
894
            *tab++ = 0xff;
895
            start += 8;
896
        }
897
        if (start < end) {
898
            mask = ~(0xff << (end & 7));
899
            *tab |= mask;
900
        }
901
    }
902
}
903

    
904
static void build_page_bitmap(PageDesc *p)
905
{
906
    int n, tb_start, tb_end;
907
    TranslationBlock *tb;
908

    
909
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
910

    
911
    tb = p->first_tb;
912
    while (tb != NULL) {
913
        n = (uintptr_t)tb & 3;
914
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
915
        /* NOTE: this is subtle as a TB may span two physical pages */
916
        if (n == 0) {
917
            /* NOTE: tb_end may be after the end of the page, but
918
               it is not a problem */
919
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
920
            tb_end = tb_start + tb->size;
921
            if (tb_end > TARGET_PAGE_SIZE) {
922
                tb_end = TARGET_PAGE_SIZE;
923
            }
924
        } else {
925
            tb_start = 0;
926
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
927
        }
928
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
929
        tb = tb->page_next[n];
930
    }
931
}
932

    
933
TranslationBlock *tb_gen_code(CPUArchState *env,
934
                              target_ulong pc, target_ulong cs_base,
935
                              int flags, int cflags)
936
{
937
    TranslationBlock *tb;
938
    uint8_t *tc_ptr;
939
    tb_page_addr_t phys_pc, phys_page2;
940
    target_ulong virt_page2;
941
    int code_gen_size;
942

    
943
    phys_pc = get_page_addr_code(env, pc);
944
    tb = tb_alloc(pc);
945
    if (!tb) {
946
        /* flush must be done */
947
        tb_flush(env);
948
        /* cannot fail at this point */
949
        tb = tb_alloc(pc);
950
        /* Don't forget to invalidate previous TB info.  */
951
        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
952
    }
953
    tc_ptr = tcg_ctx.code_gen_ptr;
954
    tb->tc_ptr = tc_ptr;
955
    tb->cs_base = cs_base;
956
    tb->flags = flags;
957
    tb->cflags = cflags;
958
    cpu_gen_code(env, tb, &code_gen_size);
959
    tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
960
            code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
961

    
962
    /* check next page if needed */
963
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
964
    phys_page2 = -1;
965
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
966
        phys_page2 = get_page_addr_code(env, virt_page2);
967
    }
968
    tb_link_page(tb, phys_pc, phys_page2);
969
    return tb;
970
}
971

    
972
/*
973
 * Invalidate all TBs which intersect with the target physical address range
974
 * [start;end[. NOTE: start and end may refer to *different* physical pages.
975
 * 'is_cpu_write_access' should be true if called from a real cpu write
976
 * access: the virtual CPU will exit the current TB if code is modified inside
977
 * this TB.
978
 */
979
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
980
                              int is_cpu_write_access)
981
{
982
    while (start < end) {
983
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
984
        start &= TARGET_PAGE_MASK;
985
        start += TARGET_PAGE_SIZE;
986
    }
987
}
988

    
989
/*
990
 * Invalidate all TBs which intersect with the target physical address range
991
 * [start;end[. NOTE: start and end must refer to the *same* physical page.
992
 * 'is_cpu_write_access' should be true if called from a real cpu write
993
 * access: the virtual CPU will exit the current TB if code is modified inside
994
 * this TB.
995
 */
996
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
997
                                   int is_cpu_write_access)
998
{
999
    TranslationBlock *tb, *tb_next, *saved_tb;
1000
    CPUArchState *env = cpu_single_env;
1001
    CPUState *cpu = NULL;
1002
    tb_page_addr_t tb_start, tb_end;
1003
    PageDesc *p;
1004
    int n;
1005
#ifdef TARGET_HAS_PRECISE_SMC
1006
    int current_tb_not_found = is_cpu_write_access;
1007
    TranslationBlock *current_tb = NULL;
1008
    int current_tb_modified = 0;
1009
    target_ulong current_pc = 0;
1010
    target_ulong current_cs_base = 0;
1011
    int current_flags = 0;
1012
#endif /* TARGET_HAS_PRECISE_SMC */
1013

    
1014
    p = page_find(start >> TARGET_PAGE_BITS);
1015
    if (!p) {
1016
        return;
1017
    }
1018
    if (!p->code_bitmap &&
1019
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1020
        is_cpu_write_access) {
1021
        /* build code bitmap */
1022
        build_page_bitmap(p);
1023
    }
1024
    if (env != NULL) {
1025
        cpu = ENV_GET_CPU(env);
1026
    }
1027

    
1028
    /* we remove all the TBs in the range [start, end[ */
1029
    /* XXX: see if in some cases it could be faster to invalidate all
1030
       the code */
1031
    tb = p->first_tb;
1032
    while (tb != NULL) {
1033
        n = (uintptr_t)tb & 3;
1034
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1035
        tb_next = tb->page_next[n];
1036
        /* NOTE: this is subtle as a TB may span two physical pages */
1037
        if (n == 0) {
1038
            /* NOTE: tb_end may be after the end of the page, but
1039
               it is not a problem */
1040
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1041
            tb_end = tb_start + tb->size;
1042
        } else {
1043
            tb_start = tb->page_addr[1];
1044
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1045
        }
1046
        if (!(tb_end <= start || tb_start >= end)) {
1047
#ifdef TARGET_HAS_PRECISE_SMC
1048
            if (current_tb_not_found) {
1049
                current_tb_not_found = 0;
1050
                current_tb = NULL;
1051
                if (env->mem_io_pc) {
1052
                    /* now we have a real cpu fault */
1053
                    current_tb = tb_find_pc(env->mem_io_pc);
1054
                }
1055
            }
1056
            if (current_tb == tb &&
1057
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1058
                /* If we are modifying the current TB, we must stop
1059
                its execution. We could be more precise by checking
1060
                that the modification is after the current PC, but it
1061
                would require a specialized function to partially
1062
                restore the CPU state */
1063

    
1064
                current_tb_modified = 1;
1065
                cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1066
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1067
                                     &current_flags);
1068
            }
1069
#endif /* TARGET_HAS_PRECISE_SMC */
1070
            /* we need to do that to handle the case where a signal
1071
               occurs while doing tb_phys_invalidate() */
1072
            saved_tb = NULL;
1073
            if (cpu != NULL) {
1074
                saved_tb = cpu->current_tb;
1075
                cpu->current_tb = NULL;
1076
            }
1077
            tb_phys_invalidate(tb, -1);
1078
            if (cpu != NULL) {
1079
                cpu->current_tb = saved_tb;
1080
                if (env && env->interrupt_request && cpu->current_tb) {
1081
                    cpu_interrupt(env, env->interrupt_request);
1082
                }
1083
            }
1084
        }
1085
        tb = tb_next;
1086
    }
1087
#if !defined(CONFIG_USER_ONLY)
1088
    /* if no code remaining, no need to continue to use slow writes */
1089
    if (!p->first_tb) {
1090
        invalidate_page_bitmap(p);
1091
        if (is_cpu_write_access) {
1092
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1093
        }
1094
    }
1095
#endif
1096
#ifdef TARGET_HAS_PRECISE_SMC
1097
    if (current_tb_modified) {
1098
        /* we generate a block containing just the instruction
1099
           modifying the memory. It will ensure that it cannot modify
1100
           itself */
1101
        cpu->current_tb = NULL;
1102
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1103
        cpu_resume_from_signal(env, NULL);
1104
    }
1105
#endif
1106
}
1107

    
1108
/* len must be <= 8 and start must be a multiple of len */
1109
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1110
{
1111
    PageDesc *p;
1112
    int offset, b;
1113

    
1114
#if 0
1115
    if (1) {
1116
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1117
                  cpu_single_env->mem_io_vaddr, len,
1118
                  cpu_single_env->eip,
1119
                  cpu_single_env->eip +
1120
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1121
    }
1122
#endif
1123
    p = page_find(start >> TARGET_PAGE_BITS);
1124
    if (!p) {
1125
        return;
1126
    }
1127
    if (p->code_bitmap) {
1128
        offset = start & ~TARGET_PAGE_MASK;
1129
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1130
        if (b & ((1 << len) - 1)) {
1131
            goto do_invalidate;
1132
        }
1133
    } else {
1134
    do_invalidate:
1135
        tb_invalidate_phys_page_range(start, start + len, 1);
1136
    }
1137
}
1138

    
1139
#if !defined(CONFIG_SOFTMMU)
1140
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1141
                                    uintptr_t pc, void *puc)
1142
{
1143
    TranslationBlock *tb;
1144
    PageDesc *p;
1145
    int n;
1146
#ifdef TARGET_HAS_PRECISE_SMC
1147
    TranslationBlock *current_tb = NULL;
1148
    CPUArchState *env = cpu_single_env;
1149
    CPUState *cpu = NULL;
1150
    int current_tb_modified = 0;
1151
    target_ulong current_pc = 0;
1152
    target_ulong current_cs_base = 0;
1153
    int current_flags = 0;
1154
#endif
1155

    
1156
    addr &= TARGET_PAGE_MASK;
1157
    p = page_find(addr >> TARGET_PAGE_BITS);
1158
    if (!p) {
1159
        return;
1160
    }
1161
    tb = p->first_tb;
1162
#ifdef TARGET_HAS_PRECISE_SMC
1163
    if (tb && pc != 0) {
1164
        current_tb = tb_find_pc(pc);
1165
    }
1166
    if (env != NULL) {
1167
        cpu = ENV_GET_CPU(env);
1168
    }
1169
#endif
1170
    while (tb != NULL) {
1171
        n = (uintptr_t)tb & 3;
1172
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1173
#ifdef TARGET_HAS_PRECISE_SMC
1174
        if (current_tb == tb &&
1175
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176
                /* If we are modifying the current TB, we must stop
1177
                   its execution. We could be more precise by checking
1178
                   that the modification is after the current PC, but it
1179
                   would require a specialized function to partially
1180
                   restore the CPU state */
1181

    
1182
            current_tb_modified = 1;
1183
            cpu_restore_state_from_tb(current_tb, env, pc);
1184
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185
                                 &current_flags);
1186
        }
1187
#endif /* TARGET_HAS_PRECISE_SMC */
1188
        tb_phys_invalidate(tb, addr);
1189
        tb = tb->page_next[n];
1190
    }
1191
    p->first_tb = NULL;
1192
#ifdef TARGET_HAS_PRECISE_SMC
1193
    if (current_tb_modified) {
1194
        /* we generate a block containing just the instruction
1195
           modifying the memory. It will ensure that it cannot modify
1196
           itself */
1197
        cpu->current_tb = NULL;
1198
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199
        cpu_resume_from_signal(env, puc);
1200
    }
1201
#endif
1202
}
1203
#endif
1204

    
1205
/* add the tb in the target page and protect it if necessary */
1206
static inline void tb_alloc_page(TranslationBlock *tb,
1207
                                 unsigned int n, tb_page_addr_t page_addr)
1208
{
1209
    PageDesc *p;
1210
#ifndef CONFIG_USER_ONLY
1211
    bool page_already_protected;
1212
#endif
1213

    
1214
    tb->page_addr[n] = page_addr;
1215
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1216
    tb->page_next[n] = p->first_tb;
1217
#ifndef CONFIG_USER_ONLY
1218
    page_already_protected = p->first_tb != NULL;
1219
#endif
1220
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1221
    invalidate_page_bitmap(p);
1222

    
1223
#if defined(TARGET_HAS_SMC) || 1
1224

    
1225
#if defined(CONFIG_USER_ONLY)
1226
    if (p->flags & PAGE_WRITE) {
1227
        target_ulong addr;
1228
        PageDesc *p2;
1229
        int prot;
1230

    
1231
        /* force the host page as non writable (writes will have a
1232
           page fault + mprotect overhead) */
1233
        page_addr &= qemu_host_page_mask;
1234
        prot = 0;
1235
        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1236
            addr += TARGET_PAGE_SIZE) {
1237

    
1238
            p2 = page_find(addr >> TARGET_PAGE_BITS);
1239
            if (!p2) {
1240
                continue;
1241
            }
1242
            prot |= p2->flags;
1243
            p2->flags &= ~PAGE_WRITE;
1244
          }
1245
        mprotect(g2h(page_addr), qemu_host_page_size,
1246
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1247
#ifdef DEBUG_TB_INVALIDATE
1248
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1249
               page_addr);
1250
#endif
1251
    }
1252
#else
1253
    /* if some code is already present, then the pages are already
1254
       protected. So we handle the case where only the first TB is
1255
       allocated in a physical page */
1256
    if (!page_already_protected) {
1257
        tlb_protect_code(page_addr);
1258
    }
1259
#endif
1260

    
1261
#endif /* TARGET_HAS_SMC */
1262
}
1263

    
1264
/* add a new TB and link it to the physical page tables. phys_page2 is
1265
   (-1) to indicate that only one page contains the TB. */
1266
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1267
                         tb_page_addr_t phys_page2)
1268
{
1269
    unsigned int h;
1270
    TranslationBlock **ptb;
1271

    
1272
    /* Grab the mmap lock to stop another thread invalidating this TB
1273
       before we are done.  */
1274
    mmap_lock();
1275
    /* add in the physical hash table */
1276
    h = tb_phys_hash_func(phys_pc);
1277
    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1278
    tb->phys_hash_next = *ptb;
1279
    *ptb = tb;
1280

    
1281
    /* add in the page list */
1282
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1283
    if (phys_page2 != -1) {
1284
        tb_alloc_page(tb, 1, phys_page2);
1285
    } else {
1286
        tb->page_addr[1] = -1;
1287
    }
1288

    
1289
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1290
    tb->jmp_next[0] = NULL;
1291
    tb->jmp_next[1] = NULL;
1292

    
1293
    /* init original jump addresses */
1294
    if (tb->tb_next_offset[0] != 0xffff) {
1295
        tb_reset_jump(tb, 0);
1296
    }
1297
    if (tb->tb_next_offset[1] != 0xffff) {
1298
        tb_reset_jump(tb, 1);
1299
    }
1300

    
1301
#ifdef DEBUG_TB_CHECK
1302
    tb_page_check();
1303
#endif
1304
    mmap_unlock();
1305
}
1306

    
1307
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1308
/* check whether the given addr is in TCG generated code buffer or not */
1309
bool is_tcg_gen_code(uintptr_t tc_ptr)
1310
{
1311
    /* This can be called during code generation, code_gen_buffer_max_size
1312
       is used instead of code_gen_ptr for upper boundary checking */
1313
    return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
1314
            tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
1315
                    tcg_ctx.code_gen_buffer_max_size));
1316
}
1317
#endif
1318

    
1319
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1320
   tb[1].tc_ptr. Return NULL if not found */
1321
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1322
{
1323
    int m_min, m_max, m;
1324
    uintptr_t v;
1325
    TranslationBlock *tb;
1326

    
1327
    if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1328
        return NULL;
1329
    }
1330
    if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1331
        tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1332
        return NULL;
1333
    }
1334
    /* binary search (cf Knuth) */
1335
    m_min = 0;
1336
    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1337
    while (m_min <= m_max) {
1338
        m = (m_min + m_max) >> 1;
1339
        tb = &tcg_ctx.tb_ctx.tbs[m];
1340
        v = (uintptr_t)tb->tc_ptr;
1341
        if (v == tc_ptr) {
1342
            return tb;
1343
        } else if (tc_ptr < v) {
1344
            m_max = m - 1;
1345
        } else {
1346
            m_min = m + 1;
1347
        }
1348
    }
1349
    return &tcg_ctx.tb_ctx.tbs[m_max];
1350
}
1351

    
1352
static void tb_reset_jump_recursive(TranslationBlock *tb);
1353

    
1354
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1355
{
1356
    TranslationBlock *tb1, *tb_next, **ptb;
1357
    unsigned int n1;
1358

    
1359
    tb1 = tb->jmp_next[n];
1360
    if (tb1 != NULL) {
1361
        /* find head of list */
1362
        for (;;) {
1363
            n1 = (uintptr_t)tb1 & 3;
1364
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1365
            if (n1 == 2) {
1366
                break;
1367
            }
1368
            tb1 = tb1->jmp_next[n1];
1369
        }
1370
        /* we are now sure now that tb jumps to tb1 */
1371
        tb_next = tb1;
1372

    
1373
        /* remove tb from the jmp_first list */
1374
        ptb = &tb_next->jmp_first;
1375
        for (;;) {
1376
            tb1 = *ptb;
1377
            n1 = (uintptr_t)tb1 & 3;
1378
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1379
            if (n1 == n && tb1 == tb) {
1380
                break;
1381
            }
1382
            ptb = &tb1->jmp_next[n1];
1383
        }
1384
        *ptb = tb->jmp_next[n];
1385
        tb->jmp_next[n] = NULL;
1386

    
1387
        /* suppress the jump to next tb in generated code */
1388
        tb_reset_jump(tb, n);
1389

    
1390
        /* suppress jumps in the tb on which we could have jumped */
1391
        tb_reset_jump_recursive(tb_next);
1392
    }
1393
}
1394

    
1395
static void tb_reset_jump_recursive(TranslationBlock *tb)
1396
{
1397
    tb_reset_jump_recursive2(tb, 0);
1398
    tb_reset_jump_recursive2(tb, 1);
1399
}
1400

    
1401
#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1402
void tb_invalidate_phys_addr(hwaddr addr)
1403
{
1404
    ram_addr_t ram_addr;
1405
    MemoryRegionSection *section;
1406

    
1407
    section = phys_page_find(address_space_memory.dispatch,
1408
                             addr >> TARGET_PAGE_BITS);
1409
    if (!(memory_region_is_ram(section->mr)
1410
          || (section->mr->rom_device && section->mr->readable))) {
1411
        return;
1412
    }
1413
    ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1414
        + memory_region_section_addr(section, addr);
1415
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1416
}
1417
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1418

    
1419
void cpu_unlink_tb(CPUState *cpu)
1420
{
1421
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1422
       problem and hope the cpu will stop of its own accord.  For userspace
1423
       emulation this often isn't actually as bad as it sounds.  Often
1424
       signals are used primarily to interrupt blocking syscalls.  */
1425
    TranslationBlock *tb;
1426
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1427

    
1428
    spin_lock(&interrupt_lock);
1429
    tb = cpu->current_tb;
1430
    /* if the cpu is currently executing code, we must unlink it and
1431
       all the potentially executing TB */
1432
    if (tb) {
1433
        cpu->current_tb = NULL;
1434
        tb_reset_jump_recursive(tb);
1435
    }
1436
    spin_unlock(&interrupt_lock);
1437
}
1438

    
1439
void tb_check_watchpoint(CPUArchState *env)
1440
{
1441
    TranslationBlock *tb;
1442

    
1443
    tb = tb_find_pc(env->mem_io_pc);
1444
    if (!tb) {
1445
        cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1446
                  (void *)env->mem_io_pc);
1447
    }
1448
    cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1449
    tb_phys_invalidate(tb, -1);
1450
}
1451

    
1452
#ifndef CONFIG_USER_ONLY
1453
/* mask must never be zero, except for A20 change call */
1454
static void tcg_handle_interrupt(CPUArchState *env, int mask)
1455
{
1456
    CPUState *cpu = ENV_GET_CPU(env);
1457
    int old_mask;
1458

    
1459
    old_mask = env->interrupt_request;
1460
    env->interrupt_request |= mask;
1461

    
1462
    /*
1463
     * If called from iothread context, wake the target cpu in
1464
     * case its halted.
1465
     */
1466
    if (!qemu_cpu_is_self(cpu)) {
1467
        qemu_cpu_kick(cpu);
1468
        return;
1469
    }
1470

    
1471
    if (use_icount) {
1472
        env->icount_decr.u16.high = 0xffff;
1473
        if (!can_do_io(env)
1474
            && (mask & ~old_mask) != 0) {
1475
            cpu_abort(env, "Raised interrupt while not in I/O function");
1476
        }
1477
    } else {
1478
        cpu_unlink_tb(cpu);
1479
    }
1480
}
1481

    
1482
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1483

    
1484
/* in deterministic execution mode, instructions doing device I/Os
1485
   must be at the end of the TB */
1486
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1487
{
1488
    TranslationBlock *tb;
1489
    uint32_t n, cflags;
1490
    target_ulong pc, cs_base;
1491
    uint64_t flags;
1492

    
1493
    tb = tb_find_pc(retaddr);
1494
    if (!tb) {
1495
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1496
                  (void *)retaddr);
1497
    }
1498
    n = env->icount_decr.u16.low + tb->icount;
1499
    cpu_restore_state_from_tb(tb, env, retaddr);
1500
    /* Calculate how many instructions had been executed before the fault
1501
       occurred.  */
1502
    n = n - env->icount_decr.u16.low;
1503
    /* Generate a new TB ending on the I/O insn.  */
1504
    n++;
1505
    /* On MIPS and SH, delay slot instructions can only be restarted if
1506
       they were already the first instruction in the TB.  If this is not
1507
       the first instruction in a TB then re-execute the preceding
1508
       branch.  */
1509
#if defined(TARGET_MIPS)
1510
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1511
        env->active_tc.PC -= 4;
1512
        env->icount_decr.u16.low++;
1513
        env->hflags &= ~MIPS_HFLAG_BMASK;
1514
    }
1515
#elif defined(TARGET_SH4)
1516
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1517
            && n > 1) {
1518
        env->pc -= 2;
1519
        env->icount_decr.u16.low++;
1520
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1521
    }
1522
#endif
1523
    /* This should never happen.  */
1524
    if (n > CF_COUNT_MASK) {
1525
        cpu_abort(env, "TB too big during recompile");
1526
    }
1527

    
1528
    cflags = n | CF_LAST_IO;
1529
    pc = tb->pc;
1530
    cs_base = tb->cs_base;
1531
    flags = tb->flags;
1532
    tb_phys_invalidate(tb, -1);
1533
    /* FIXME: In theory this could raise an exception.  In practice
1534
       we have already translated the block once so it's probably ok.  */
1535
    tb_gen_code(env, pc, cs_base, flags, cflags);
1536
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1537
       the first in the TB) then we end up generating a whole new TB and
1538
       repeating the fault, which is horribly inefficient.
1539
       Better would be to execute just this insn uncached, or generate a
1540
       second new TB.  */
1541
    cpu_resume_from_signal(env, NULL);
1542
}
1543

    
1544
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1545
{
1546
    unsigned int i;
1547

    
1548
    /* Discard jump cache entries for any tb which might potentially
1549
       overlap the flushed page.  */
1550
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1551
    memset(&env->tb_jmp_cache[i], 0,
1552
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1553

    
1554
    i = tb_jmp_cache_hash_page(addr);
1555
    memset(&env->tb_jmp_cache[i], 0,
1556
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1557
}
1558

    
1559
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1560
{
1561
    int i, target_code_size, max_target_code_size;
1562
    int direct_jmp_count, direct_jmp2_count, cross_page;
1563
    TranslationBlock *tb;
1564

    
1565
    target_code_size = 0;
1566
    max_target_code_size = 0;
1567
    cross_page = 0;
1568
    direct_jmp_count = 0;
1569
    direct_jmp2_count = 0;
1570
    for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1571
        tb = &tcg_ctx.tb_ctx.tbs[i];
1572
        target_code_size += tb->size;
1573
        if (tb->size > max_target_code_size) {
1574
            max_target_code_size = tb->size;
1575
        }
1576
        if (tb->page_addr[1] != -1) {
1577
            cross_page++;
1578
        }
1579
        if (tb->tb_next_offset[0] != 0xffff) {
1580
            direct_jmp_count++;
1581
            if (tb->tb_next_offset[1] != 0xffff) {
1582
                direct_jmp2_count++;
1583
            }
1584
        }
1585
    }
1586
    /* XXX: avoid using doubles ? */
1587
    cpu_fprintf(f, "Translation buffer state:\n");
1588
    cpu_fprintf(f, "gen code size       %td/%zd\n",
1589
                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1590
                tcg_ctx.code_gen_buffer_max_size);
1591
    cpu_fprintf(f, "TB count            %d/%d\n",
1592
            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1593
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1594
            tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1595
                    tcg_ctx.tb_ctx.nb_tbs : 0,
1596
            max_target_code_size);
1597
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1598
            tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1599
                                     tcg_ctx.code_gen_buffer) /
1600
                                     tcg_ctx.tb_ctx.nb_tbs : 0,
1601
                target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1602
                                             tcg_ctx.code_gen_buffer) /
1603
                                             target_code_size : 0);
1604
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1605
            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1606
                                    tcg_ctx.tb_ctx.nb_tbs : 0);
1607
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1608
                direct_jmp_count,
1609
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1610
                        tcg_ctx.tb_ctx.nb_tbs : 0,
1611
                direct_jmp2_count,
1612
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1613
                        tcg_ctx.tb_ctx.nb_tbs : 0);
1614
    cpu_fprintf(f, "\nStatistics:\n");
1615
    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1616
    cpu_fprintf(f, "TB invalidate count %d\n",
1617
            tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1618
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1619
    tcg_dump_info(f, cpu_fprintf);
1620
}
1621

    
1622
#else /* CONFIG_USER_ONLY */
1623

    
1624
void cpu_interrupt(CPUArchState *env, int mask)
1625
{
1626
    CPUState *cpu = ENV_GET_CPU(env);
1627

    
1628
    env->interrupt_request |= mask;
1629
    cpu_unlink_tb(cpu);
1630
}
1631

    
1632
/*
1633
 * Walks guest process memory "regions" one by one
1634
 * and calls callback function 'fn' for each region.
1635
 */
1636
struct walk_memory_regions_data {
1637
    walk_memory_regions_fn fn;
1638
    void *priv;
1639
    uintptr_t start;
1640
    int prot;
1641
};
1642

    
1643
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1644
                                   abi_ulong end, int new_prot)
1645
{
1646
    if (data->start != -1ul) {
1647
        int rc = data->fn(data->priv, data->start, end, data->prot);
1648
        if (rc != 0) {
1649
            return rc;
1650
        }
1651
    }
1652

    
1653
    data->start = (new_prot ? end : -1ul);
1654
    data->prot = new_prot;
1655

    
1656
    return 0;
1657
}
1658

    
1659
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1660
                                 abi_ulong base, int level, void **lp)
1661
{
1662
    abi_ulong pa;
1663
    int i, rc;
1664

    
1665
    if (*lp == NULL) {
1666
        return walk_memory_regions_end(data, base, 0);
1667
    }
1668

    
1669
    if (level == 0) {
1670
        PageDesc *pd = *lp;
1671

    
1672
        for (i = 0; i < L2_SIZE; ++i) {
1673
            int prot = pd[i].flags;
1674

    
1675
            pa = base | (i << TARGET_PAGE_BITS);
1676
            if (prot != data->prot) {
1677
                rc = walk_memory_regions_end(data, pa, prot);
1678
                if (rc != 0) {
1679
                    return rc;
1680
                }
1681
            }
1682
        }
1683
    } else {
1684
        void **pp = *lp;
1685

    
1686
        for (i = 0; i < L2_SIZE; ++i) {
1687
            pa = base | ((abi_ulong)i <<
1688
                (TARGET_PAGE_BITS + L2_BITS * level));
1689
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1690
            if (rc != 0) {
1691
                return rc;
1692
            }
1693
        }
1694
    }
1695

    
1696
    return 0;
1697
}
1698

    
1699
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1700
{
1701
    struct walk_memory_regions_data data;
1702
    uintptr_t i;
1703

    
1704
    data.fn = fn;
1705
    data.priv = priv;
1706
    data.start = -1ul;
1707
    data.prot = 0;
1708

    
1709
    for (i = 0; i < V_L1_SIZE; i++) {
1710
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1711
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1712

    
1713
        if (rc != 0) {
1714
            return rc;
1715
        }
1716
    }
1717

    
1718
    return walk_memory_regions_end(&data, 0, 0);
1719
}
1720

    
1721
static int dump_region(void *priv, abi_ulong start,
1722
    abi_ulong end, unsigned long prot)
1723
{
1724
    FILE *f = (FILE *)priv;
1725

    
1726
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1727
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
1728
        start, end, end - start,
1729
        ((prot & PAGE_READ) ? 'r' : '-'),
1730
        ((prot & PAGE_WRITE) ? 'w' : '-'),
1731
        ((prot & PAGE_EXEC) ? 'x' : '-'));
1732

    
1733
    return 0;
1734
}
1735

    
1736
/* dump memory mappings */
1737
void page_dump(FILE *f)
1738
{
1739
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
1740
            "start", "end", "size", "prot");
1741
    walk_memory_regions(f, dump_region);
1742
}
1743

    
1744
int page_get_flags(target_ulong address)
1745
{
1746
    PageDesc *p;
1747

    
1748
    p = page_find(address >> TARGET_PAGE_BITS);
1749
    if (!p) {
1750
        return 0;
1751
    }
1752
    return p->flags;
1753
}
1754

    
1755
/* Modify the flags of a page and invalidate the code if necessary.
1756
   The flag PAGE_WRITE_ORG is positioned automatically depending
1757
   on PAGE_WRITE.  The mmap_lock should already be held.  */
1758
void page_set_flags(target_ulong start, target_ulong end, int flags)
1759
{
1760
    target_ulong addr, len;
1761

    
1762
    /* This function should never be called with addresses outside the
1763
       guest address space.  If this assert fires, it probably indicates
1764
       a missing call to h2g_valid.  */
1765
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1766
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1767
#endif
1768
    assert(start < end);
1769

    
1770
    start = start & TARGET_PAGE_MASK;
1771
    end = TARGET_PAGE_ALIGN(end);
1772

    
1773
    if (flags & PAGE_WRITE) {
1774
        flags |= PAGE_WRITE_ORG;
1775
    }
1776

    
1777
    for (addr = start, len = end - start;
1778
         len != 0;
1779
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1780
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1781

    
1782
        /* If the write protection bit is set, then we invalidate
1783
           the code inside.  */
1784
        if (!(p->flags & PAGE_WRITE) &&
1785
            (flags & PAGE_WRITE) &&
1786
            p->first_tb) {
1787
            tb_invalidate_phys_page(addr, 0, NULL);
1788
        }
1789
        p->flags = flags;
1790
    }
1791
}
1792

    
1793
int page_check_range(target_ulong start, target_ulong len, int flags)
1794
{
1795
    PageDesc *p;
1796
    target_ulong end;
1797
    target_ulong addr;
1798

    
1799
    /* This function should never be called with addresses outside the
1800
       guest address space.  If this assert fires, it probably indicates
1801
       a missing call to h2g_valid.  */
1802
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1803
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1804
#endif
1805

    
1806
    if (len == 0) {
1807
        return 0;
1808
    }
1809
    if (start + len - 1 < start) {
1810
        /* We've wrapped around.  */
1811
        return -1;
1812
    }
1813

    
1814
    /* must do before we loose bits in the next step */
1815
    end = TARGET_PAGE_ALIGN(start + len);
1816
    start = start & TARGET_PAGE_MASK;
1817

    
1818
    for (addr = start, len = end - start;
1819
         len != 0;
1820
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1821
        p = page_find(addr >> TARGET_PAGE_BITS);
1822
        if (!p) {
1823
            return -1;
1824
        }
1825
        if (!(p->flags & PAGE_VALID)) {
1826
            return -1;
1827
        }
1828

    
1829
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1830
            return -1;
1831
        }
1832
        if (flags & PAGE_WRITE) {
1833
            if (!(p->flags & PAGE_WRITE_ORG)) {
1834
                return -1;
1835
            }
1836
            /* unprotect the page if it was put read-only because it
1837
               contains translated code */
1838
            if (!(p->flags & PAGE_WRITE)) {
1839
                if (!page_unprotect(addr, 0, NULL)) {
1840
                    return -1;
1841
                }
1842
            }
1843
            return 0;
1844
        }
1845
    }
1846
    return 0;
1847
}
1848

    
1849
/* called from signal handler: invalidate the code and unprotect the
1850
   page. Return TRUE if the fault was successfully handled. */
1851
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1852
{
1853
    unsigned int prot;
1854
    PageDesc *p;
1855
    target_ulong host_start, host_end, addr;
1856

    
1857
    /* Technically this isn't safe inside a signal handler.  However we
1858
       know this only ever happens in a synchronous SEGV handler, so in
1859
       practice it seems to be ok.  */
1860
    mmap_lock();
1861

    
1862
    p = page_find(address >> TARGET_PAGE_BITS);
1863
    if (!p) {
1864
        mmap_unlock();
1865
        return 0;
1866
    }
1867

    
1868
    /* if the page was really writable, then we change its
1869
       protection back to writable */
1870
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1871
        host_start = address & qemu_host_page_mask;
1872
        host_end = host_start + qemu_host_page_size;
1873

    
1874
        prot = 0;
1875
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1876
            p = page_find(addr >> TARGET_PAGE_BITS);
1877
            p->flags |= PAGE_WRITE;
1878
            prot |= p->flags;
1879

    
1880
            /* and since the content will be modified, we must invalidate
1881
               the corresponding translated code. */
1882
            tb_invalidate_phys_page(addr, pc, puc);
1883
#ifdef DEBUG_TB_CHECK
1884
            tb_invalidate_check(addr);
1885
#endif
1886
        }
1887
        mprotect((void *)g2h(host_start), qemu_host_page_size,
1888
                 prot & PAGE_BITS);
1889

    
1890
        mmap_unlock();
1891
        return 1;
1892
    }
1893
    mmap_unlock();
1894
    return 0;
1895
}
1896
#endif /* CONFIG_USER_ONLY */