Statistics
| Branch: | Revision:

root / translate-all.c @ feature-archipelago

History | View | Annotate | Download (53.4 kB)

1 d19893da bellard
/*
2 d19893da bellard
 *  Host code generation
3 5fafdf24 ths
 *
4 d19893da bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 d19893da bellard
 *
6 d19893da bellard
 * This library is free software; you can redistribute it and/or
7 d19893da bellard
 * modify it under the terms of the GNU Lesser General Public
8 d19893da bellard
 * License as published by the Free Software Foundation; either
9 d19893da bellard
 * version 2 of the License, or (at your option) any later version.
10 d19893da bellard
 *
11 d19893da bellard
 * This library is distributed in the hope that it will be useful,
12 d19893da bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 d19893da bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 d19893da bellard
 * Lesser General Public License for more details.
15 d19893da bellard
 *
16 d19893da bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 d19893da bellard
 */
19 5b6dd868 Blue Swirl
#ifdef _WIN32
20 5b6dd868 Blue Swirl
#include <windows.h>
21 5b6dd868 Blue Swirl
#else
22 5b6dd868 Blue Swirl
#include <sys/types.h>
23 5b6dd868 Blue Swirl
#include <sys/mman.h>
24 5b6dd868 Blue Swirl
#endif
25 d19893da bellard
#include <stdarg.h>
26 d19893da bellard
#include <stdlib.h>
27 d19893da bellard
#include <stdio.h>
28 d19893da bellard
#include <string.h>
29 d19893da bellard
#include <inttypes.h>
30 d19893da bellard
31 d19893da bellard
#include "config.h"
32 2054396a bellard
33 5b6dd868 Blue Swirl
#include "qemu-common.h"
34 af5ad107 bellard
#define NO_CPU_IO_DEFS
35 d3eead2e bellard
#include "cpu.h"
36 76cad711 Paolo Bonzini
#include "disas/disas.h"
37 57fec1fe bellard
#include "tcg.h"
38 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
39 5b6dd868 Blue Swirl
#include "qemu.h"
40 5b6dd868 Blue Swirl
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 5b6dd868 Blue Swirl
#include <sys/param.h>
42 5b6dd868 Blue Swirl
#if __FreeBSD_version >= 700104
43 5b6dd868 Blue Swirl
#define HAVE_KINFO_GETVMMAP
44 5b6dd868 Blue Swirl
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 5b6dd868 Blue Swirl
#include <sys/time.h>
46 5b6dd868 Blue Swirl
#include <sys/proc.h>
47 5b6dd868 Blue Swirl
#include <machine/profile.h>
48 5b6dd868 Blue Swirl
#define _KERNEL
49 5b6dd868 Blue Swirl
#include <sys/user.h>
50 5b6dd868 Blue Swirl
#undef _KERNEL
51 5b6dd868 Blue Swirl
#undef sigqueue
52 5b6dd868 Blue Swirl
#include <libutil.h>
53 5b6dd868 Blue Swirl
#endif
54 5b6dd868 Blue Swirl
#endif
55 0bc3cd62 Paolo Bonzini
#else
56 0bc3cd62 Paolo Bonzini
#include "exec/address-spaces.h"
57 5b6dd868 Blue Swirl
#endif
58 5b6dd868 Blue Swirl
59 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
60 5b6dd868 Blue Swirl
#include "translate-all.h"
61 0aa09897 Alexey Kardashevskiy
#include "qemu/timer.h"
62 5b6dd868 Blue Swirl
63 5b6dd868 Blue Swirl
//#define DEBUG_TB_INVALIDATE
64 5b6dd868 Blue Swirl
//#define DEBUG_FLUSH
65 5b6dd868 Blue Swirl
/* make various TB consistency checks */
66 5b6dd868 Blue Swirl
//#define DEBUG_TB_CHECK
67 5b6dd868 Blue Swirl
68 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
69 5b6dd868 Blue Swirl
/* TB consistency checks only implemented for usermode emulation.  */
70 5b6dd868 Blue Swirl
#undef DEBUG_TB_CHECK
71 5b6dd868 Blue Swirl
#endif
72 5b6dd868 Blue Swirl
73 5b6dd868 Blue Swirl
#define SMC_BITMAP_USE_THRESHOLD 10
74 5b6dd868 Blue Swirl
75 5b6dd868 Blue Swirl
typedef struct PageDesc {
76 5b6dd868 Blue Swirl
    /* list of TBs intersecting this ram page */
77 5b6dd868 Blue Swirl
    TranslationBlock *first_tb;
78 5b6dd868 Blue Swirl
    /* in order to optimize self modifying code, we count the number
79 5b6dd868 Blue Swirl
       of lookups we do to a given page to use a bitmap */
80 5b6dd868 Blue Swirl
    unsigned int code_write_count;
81 5b6dd868 Blue Swirl
    uint8_t *code_bitmap;
82 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
83 5b6dd868 Blue Swirl
    unsigned long flags;
84 5b6dd868 Blue Swirl
#endif
85 5b6dd868 Blue Swirl
} PageDesc;
86 5b6dd868 Blue Swirl
87 5b6dd868 Blue Swirl
/* In system mode we want L1_MAP to be based on ram offsets,
88 5b6dd868 Blue Swirl
   while in user mode we want it to be based on virtual addresses.  */
89 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
90 5b6dd868 Blue Swirl
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
92 5b6dd868 Blue Swirl
#else
93 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
94 5b6dd868 Blue Swirl
#endif
95 5b6dd868 Blue Swirl
#else
96 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
97 5b6dd868 Blue Swirl
#endif
98 5b6dd868 Blue Swirl
99 03f49957 Paolo Bonzini
/* Size of the L2 (and L3, etc) page tables.  */
100 03f49957 Paolo Bonzini
#define V_L2_BITS 10
101 03f49957 Paolo Bonzini
#define V_L2_SIZE (1 << V_L2_BITS)
102 03f49957 Paolo Bonzini
103 5b6dd868 Blue Swirl
/* The bits remaining after N lower levels of page tables.  */
104 5b6dd868 Blue Swirl
#define V_L1_BITS_REM \
105 03f49957 Paolo Bonzini
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
106 5b6dd868 Blue Swirl
107 5b6dd868 Blue Swirl
#if V_L1_BITS_REM < 4
108 03f49957 Paolo Bonzini
#define V_L1_BITS  (V_L1_BITS_REM + V_L2_BITS)
109 5b6dd868 Blue Swirl
#else
110 5b6dd868 Blue Swirl
#define V_L1_BITS  V_L1_BITS_REM
111 5b6dd868 Blue Swirl
#endif
112 5b6dd868 Blue Swirl
113 5b6dd868 Blue Swirl
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
114 5b6dd868 Blue Swirl
115 5b6dd868 Blue Swirl
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
116 5b6dd868 Blue Swirl
117 5b6dd868 Blue Swirl
uintptr_t qemu_real_host_page_size;
118 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_size;
119 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_mask;
120 5b6dd868 Blue Swirl
121 5b6dd868 Blue Swirl
/* This is a multi-level map on the virtual address space.
122 5b6dd868 Blue Swirl
   The bottom level has pointers to PageDesc.  */
123 5b6dd868 Blue Swirl
static void *l1_map[V_L1_SIZE];
124 5b6dd868 Blue Swirl
125 57fec1fe bellard
/* code generation context */
126 57fec1fe bellard
TCGContext tcg_ctx;
127 d19893da bellard
128 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
129 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2);
130 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
131 5b6dd868 Blue Swirl
132 57fec1fe bellard
void cpu_gen_init(void)
133 57fec1fe bellard
{
134 57fec1fe bellard
    tcg_context_init(&tcg_ctx); 
135 57fec1fe bellard
}
136 57fec1fe bellard
137 d19893da bellard
/* return non zero if the very first instruction is invalid so that
138 5fafdf24 ths
   the virtual CPU can trigger an exception.
139 d19893da bellard

140 d19893da bellard
   '*gen_code_size_ptr' contains the size of the generated code (host
141 d19893da bellard
   code).
142 d19893da bellard
*/
143 9349b4f9 Andreas Fรคrber
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
144 d19893da bellard
{
145 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
146 d19893da bellard
    uint8_t *gen_code_buf;
147 d19893da bellard
    int gen_code_size;
148 57fec1fe bellard
#ifdef CONFIG_PROFILER
149 57fec1fe bellard
    int64_t ti;
150 57fec1fe bellard
#endif
151 57fec1fe bellard
152 57fec1fe bellard
#ifdef CONFIG_PROFILER
153 b67d9a52 bellard
    s->tb_count1++; /* includes aborted translations because of
154 b67d9a52 bellard
                       exceptions */
155 57fec1fe bellard
    ti = profile_getclock();
156 57fec1fe bellard
#endif
157 57fec1fe bellard
    tcg_func_start(s);
158 d19893da bellard
159 2cfc5f17 ths
    gen_intermediate_code(env, tb);
160 2cfc5f17 ths
161 ec6338ba bellard
    /* generate machine code */
162 57fec1fe bellard
    gen_code_buf = tb->tc_ptr;
163 ec6338ba bellard
    tb->tb_next_offset[0] = 0xffff;
164 ec6338ba bellard
    tb->tb_next_offset[1] = 0xffff;
165 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
166 4cbb86e1 bellard
#ifdef USE_DIRECT_JUMP
167 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
168 57fec1fe bellard
    s->tb_next = NULL;
169 d19893da bellard
#else
170 57fec1fe bellard
    s->tb_jmp_offset = NULL;
171 57fec1fe bellard
    s->tb_next = tb->tb_next;
172 d19893da bellard
#endif
173 57fec1fe bellard
174 57fec1fe bellard
#ifdef CONFIG_PROFILER
175 b67d9a52 bellard
    s->tb_count++;
176 b67d9a52 bellard
    s->interm_time += profile_getclock() - ti;
177 b67d9a52 bellard
    s->code_time -= profile_getclock();
178 57fec1fe bellard
#endif
179 54604f74 aurel32
    gen_code_size = tcg_gen_code(s, gen_code_buf);
180 d19893da bellard
    *gen_code_size_ptr = gen_code_size;
181 57fec1fe bellard
#ifdef CONFIG_PROFILER
182 b67d9a52 bellard
    s->code_time += profile_getclock();
183 b67d9a52 bellard
    s->code_in_len += tb->size;
184 b67d9a52 bellard
    s->code_out_len += gen_code_size;
185 57fec1fe bellard
#endif
186 57fec1fe bellard
187 d19893da bellard
#ifdef DEBUG_DISAS
188 8fec2b8c aliguori
    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
189 93fcfe39 aliguori
        qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
190 93fcfe39 aliguori
        log_disas(tb->tc_ptr, *gen_code_size_ptr);
191 93fcfe39 aliguori
        qemu_log("\n");
192 31b1a7b4 aliguori
        qemu_log_flush();
193 d19893da bellard
    }
194 d19893da bellard
#endif
195 d19893da bellard
    return 0;
196 d19893da bellard
}
197 d19893da bellard
198 5fafdf24 ths
/* The cpu state corresponding to 'searched_pc' is restored.
199 d19893da bellard
 */
200 a8a826a3 Blue Swirl
static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
201 a8a826a3 Blue Swirl
                                     uintptr_t searched_pc)
202 d19893da bellard
{
203 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
204 57fec1fe bellard
    int j;
205 6375e09e Stefan Weil
    uintptr_t tc_ptr;
206 57fec1fe bellard
#ifdef CONFIG_PROFILER
207 57fec1fe bellard
    int64_t ti;
208 57fec1fe bellard
#endif
209 57fec1fe bellard
210 57fec1fe bellard
#ifdef CONFIG_PROFILER
211 57fec1fe bellard
    ti = profile_getclock();
212 57fec1fe bellard
#endif
213 57fec1fe bellard
    tcg_func_start(s);
214 d19893da bellard
215 2cfc5f17 ths
    gen_intermediate_code_pc(env, tb);
216 3b46e624 ths
217 2e70f6ef pbrook
    if (use_icount) {
218 2e70f6ef pbrook
        /* Reset the cycle counter to the start of the block.  */
219 2e70f6ef pbrook
        env->icount_decr.u16.low += tb->icount;
220 2e70f6ef pbrook
        /* Clear the IO flag.  */
221 2e70f6ef pbrook
        env->can_do_io = 0;
222 2e70f6ef pbrook
    }
223 2e70f6ef pbrook
224 d19893da bellard
    /* find opc index corresponding to search_pc */
225 6375e09e Stefan Weil
    tc_ptr = (uintptr_t)tb->tc_ptr;
226 d19893da bellard
    if (searched_pc < tc_ptr)
227 d19893da bellard
        return -1;
228 57fec1fe bellard
229 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
230 57fec1fe bellard
#ifdef USE_DIRECT_JUMP
231 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
232 57fec1fe bellard
    s->tb_next = NULL;
233 57fec1fe bellard
#else
234 57fec1fe bellard
    s->tb_jmp_offset = NULL;
235 57fec1fe bellard
    s->tb_next = tb->tb_next;
236 57fec1fe bellard
#endif
237 54604f74 aurel32
    j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
238 57fec1fe bellard
    if (j < 0)
239 57fec1fe bellard
        return -1;
240 d19893da bellard
    /* now find start of instruction before */
241 ab1103de Evgeny Voevodin
    while (s->gen_opc_instr_start[j] == 0) {
242 d19893da bellard
        j--;
243 ab1103de Evgeny Voevodin
    }
244 c9c99c22 Evgeny Voevodin
    env->icount_decr.u16.low -= s->gen_opc_icount[j];
245 3b46e624 ths
246 e87b7cb0 Stefan Weil
    restore_state_to_opc(env, tb, j);
247 57fec1fe bellard
248 57fec1fe bellard
#ifdef CONFIG_PROFILER
249 b67d9a52 bellard
    s->restore_time += profile_getclock() - ti;
250 b67d9a52 bellard
    s->restore_count++;
251 57fec1fe bellard
#endif
252 d19893da bellard
    return 0;
253 d19893da bellard
}
254 5b6dd868 Blue Swirl
255 a8a826a3 Blue Swirl
bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
256 a8a826a3 Blue Swirl
{
257 a8a826a3 Blue Swirl
    TranslationBlock *tb;
258 a8a826a3 Blue Swirl
259 a8a826a3 Blue Swirl
    tb = tb_find_pc(retaddr);
260 a8a826a3 Blue Swirl
    if (tb) {
261 a8a826a3 Blue Swirl
        cpu_restore_state_from_tb(tb, env, retaddr);
262 a8a826a3 Blue Swirl
        return true;
263 a8a826a3 Blue Swirl
    }
264 a8a826a3 Blue Swirl
    return false;
265 a8a826a3 Blue Swirl
}
266 a8a826a3 Blue Swirl
267 5b6dd868 Blue Swirl
#ifdef _WIN32
268 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
269 5b6dd868 Blue Swirl
{
270 5b6dd868 Blue Swirl
    DWORD old_protect;
271 5b6dd868 Blue Swirl
    VirtualProtect(addr, size,
272 5b6dd868 Blue Swirl
                   PAGE_EXECUTE_READWRITE, &old_protect);
273 5b6dd868 Blue Swirl
}
274 5b6dd868 Blue Swirl
#else
275 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
276 5b6dd868 Blue Swirl
{
277 5b6dd868 Blue Swirl
    unsigned long start, end, page_size;
278 5b6dd868 Blue Swirl
279 5b6dd868 Blue Swirl
    page_size = getpagesize();
280 5b6dd868 Blue Swirl
    start = (unsigned long)addr;
281 5b6dd868 Blue Swirl
    start &= ~(page_size - 1);
282 5b6dd868 Blue Swirl
283 5b6dd868 Blue Swirl
    end = (unsigned long)addr + size;
284 5b6dd868 Blue Swirl
    end += page_size - 1;
285 5b6dd868 Blue Swirl
    end &= ~(page_size - 1);
286 5b6dd868 Blue Swirl
287 5b6dd868 Blue Swirl
    mprotect((void *)start, end - start,
288 5b6dd868 Blue Swirl
             PROT_READ | PROT_WRITE | PROT_EXEC);
289 5b6dd868 Blue Swirl
}
290 5b6dd868 Blue Swirl
#endif
291 5b6dd868 Blue Swirl
292 47c16ed5 Alexey Kardashevskiy
void page_size_init(void)
293 5b6dd868 Blue Swirl
{
294 5b6dd868 Blue Swirl
    /* NOTE: we can always suppose that qemu_host_page_size >=
295 5b6dd868 Blue Swirl
       TARGET_PAGE_SIZE */
296 5b6dd868 Blue Swirl
#ifdef _WIN32
297 47c16ed5 Alexey Kardashevskiy
    SYSTEM_INFO system_info;
298 5b6dd868 Blue Swirl
299 47c16ed5 Alexey Kardashevskiy
    GetSystemInfo(&system_info);
300 47c16ed5 Alexey Kardashevskiy
    qemu_real_host_page_size = system_info.dwPageSize;
301 5b6dd868 Blue Swirl
#else
302 5b6dd868 Blue Swirl
    qemu_real_host_page_size = getpagesize();
303 5b6dd868 Blue Swirl
#endif
304 5b6dd868 Blue Swirl
    if (qemu_host_page_size == 0) {
305 5b6dd868 Blue Swirl
        qemu_host_page_size = qemu_real_host_page_size;
306 5b6dd868 Blue Swirl
    }
307 5b6dd868 Blue Swirl
    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
308 5b6dd868 Blue Swirl
        qemu_host_page_size = TARGET_PAGE_SIZE;
309 5b6dd868 Blue Swirl
    }
310 5b6dd868 Blue Swirl
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
311 47c16ed5 Alexey Kardashevskiy
}
312 5b6dd868 Blue Swirl
313 47c16ed5 Alexey Kardashevskiy
static void page_init(void)
314 47c16ed5 Alexey Kardashevskiy
{
315 47c16ed5 Alexey Kardashevskiy
    page_size_init();
316 5b6dd868 Blue Swirl
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
317 5b6dd868 Blue Swirl
    {
318 5b6dd868 Blue Swirl
#ifdef HAVE_KINFO_GETVMMAP
319 5b6dd868 Blue Swirl
        struct kinfo_vmentry *freep;
320 5b6dd868 Blue Swirl
        int i, cnt;
321 5b6dd868 Blue Swirl
322 5b6dd868 Blue Swirl
        freep = kinfo_getvmmap(getpid(), &cnt);
323 5b6dd868 Blue Swirl
        if (freep) {
324 5b6dd868 Blue Swirl
            mmap_lock();
325 5b6dd868 Blue Swirl
            for (i = 0; i < cnt; i++) {
326 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
327 5b6dd868 Blue Swirl
328 5b6dd868 Blue Swirl
                startaddr = freep[i].kve_start;
329 5b6dd868 Blue Swirl
                endaddr = freep[i].kve_end;
330 5b6dd868 Blue Swirl
                if (h2g_valid(startaddr)) {
331 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332 5b6dd868 Blue Swirl
333 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
334 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
335 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
336 5b6dd868 Blue Swirl
                    } else {
337 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
338 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
339 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
340 5b6dd868 Blue Swirl
#endif
341 5b6dd868 Blue Swirl
                    }
342 5b6dd868 Blue Swirl
                }
343 5b6dd868 Blue Swirl
            }
344 5b6dd868 Blue Swirl
            free(freep);
345 5b6dd868 Blue Swirl
            mmap_unlock();
346 5b6dd868 Blue Swirl
        }
347 5b6dd868 Blue Swirl
#else
348 5b6dd868 Blue Swirl
        FILE *f;
349 5b6dd868 Blue Swirl
350 5b6dd868 Blue Swirl
        last_brk = (unsigned long)sbrk(0);
351 5b6dd868 Blue Swirl
352 5b6dd868 Blue Swirl
        f = fopen("/compat/linux/proc/self/maps", "r");
353 5b6dd868 Blue Swirl
        if (f) {
354 5b6dd868 Blue Swirl
            mmap_lock();
355 5b6dd868 Blue Swirl
356 5b6dd868 Blue Swirl
            do {
357 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
358 5b6dd868 Blue Swirl
                int n;
359 5b6dd868 Blue Swirl
360 5b6dd868 Blue Swirl
                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
361 5b6dd868 Blue Swirl
362 5b6dd868 Blue Swirl
                if (n == 2 && h2g_valid(startaddr)) {
363 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
364 5b6dd868 Blue Swirl
365 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
366 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
367 5b6dd868 Blue Swirl
                    } else {
368 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
369 5b6dd868 Blue Swirl
                    }
370 5b6dd868 Blue Swirl
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
371 5b6dd868 Blue Swirl
                }
372 5b6dd868 Blue Swirl
            } while (!feof(f));
373 5b6dd868 Blue Swirl
374 5b6dd868 Blue Swirl
            fclose(f);
375 5b6dd868 Blue Swirl
            mmap_unlock();
376 5b6dd868 Blue Swirl
        }
377 5b6dd868 Blue Swirl
#endif
378 5b6dd868 Blue Swirl
    }
379 5b6dd868 Blue Swirl
#endif
380 5b6dd868 Blue Swirl
}
381 5b6dd868 Blue Swirl
382 5b6dd868 Blue Swirl
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
383 5b6dd868 Blue Swirl
{
384 5b6dd868 Blue Swirl
    PageDesc *pd;
385 5b6dd868 Blue Swirl
    void **lp;
386 5b6dd868 Blue Swirl
    int i;
387 5b6dd868 Blue Swirl
388 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
389 5b6dd868 Blue Swirl
    /* We can't use g_malloc because it may recurse into a locked mutex. */
390 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE)                                 \
391 5b6dd868 Blue Swirl
    do {                                                \
392 5b6dd868 Blue Swirl
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
393 5b6dd868 Blue Swirl
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
394 5b6dd868 Blue Swirl
    } while (0)
395 5b6dd868 Blue Swirl
#else
396 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE) \
397 5b6dd868 Blue Swirl
    do { P = g_malloc0(SIZE); } while (0)
398 5b6dd868 Blue Swirl
#endif
399 5b6dd868 Blue Swirl
400 5b6dd868 Blue Swirl
    /* Level 1.  Always allocated.  */
401 5b6dd868 Blue Swirl
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
402 5b6dd868 Blue Swirl
403 5b6dd868 Blue Swirl
    /* Level 2..N-1.  */
404 03f49957 Paolo Bonzini
    for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
405 5b6dd868 Blue Swirl
        void **p = *lp;
406 5b6dd868 Blue Swirl
407 5b6dd868 Blue Swirl
        if (p == NULL) {
408 5b6dd868 Blue Swirl
            if (!alloc) {
409 5b6dd868 Blue Swirl
                return NULL;
410 5b6dd868 Blue Swirl
            }
411 03f49957 Paolo Bonzini
            ALLOC(p, sizeof(void *) * V_L2_SIZE);
412 5b6dd868 Blue Swirl
            *lp = p;
413 5b6dd868 Blue Swirl
        }
414 5b6dd868 Blue Swirl
415 03f49957 Paolo Bonzini
        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
416 5b6dd868 Blue Swirl
    }
417 5b6dd868 Blue Swirl
418 5b6dd868 Blue Swirl
    pd = *lp;
419 5b6dd868 Blue Swirl
    if (pd == NULL) {
420 5b6dd868 Blue Swirl
        if (!alloc) {
421 5b6dd868 Blue Swirl
            return NULL;
422 5b6dd868 Blue Swirl
        }
423 03f49957 Paolo Bonzini
        ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
424 5b6dd868 Blue Swirl
        *lp = pd;
425 5b6dd868 Blue Swirl
    }
426 5b6dd868 Blue Swirl
427 5b6dd868 Blue Swirl
#undef ALLOC
428 5b6dd868 Blue Swirl
429 03f49957 Paolo Bonzini
    return pd + (index & (V_L2_SIZE - 1));
430 5b6dd868 Blue Swirl
}
431 5b6dd868 Blue Swirl
432 5b6dd868 Blue Swirl
static inline PageDesc *page_find(tb_page_addr_t index)
433 5b6dd868 Blue Swirl
{
434 5b6dd868 Blue Swirl
    return page_find_alloc(index, 0);
435 5b6dd868 Blue Swirl
}
436 5b6dd868 Blue Swirl
437 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
438 5b6dd868 Blue Swirl
#define mmap_lock() do { } while (0)
439 5b6dd868 Blue Swirl
#define mmap_unlock() do { } while (0)
440 5b6dd868 Blue Swirl
#endif
441 5b6dd868 Blue Swirl
442 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
443 5b6dd868 Blue Swirl
/* Currently it is not recommended to allocate big chunks of data in
444 5b6dd868 Blue Swirl
   user mode. It will change when a dedicated libc will be used.  */
445 5b6dd868 Blue Swirl
/* ??? 64-bit hosts ought to have no problem mmaping data outside the
446 5b6dd868 Blue Swirl
   region in which the guest needs to run.  Revisit this.  */
447 5b6dd868 Blue Swirl
#define USE_STATIC_CODE_GEN_BUFFER
448 5b6dd868 Blue Swirl
#endif
449 5b6dd868 Blue Swirl
450 5b6dd868 Blue Swirl
/* ??? Should configure for this, not list operating systems here.  */
451 5b6dd868 Blue Swirl
#if (defined(__linux__) \
452 5b6dd868 Blue Swirl
    || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
453 5b6dd868 Blue Swirl
    || defined(__DragonFly__) || defined(__OpenBSD__) \
454 5b6dd868 Blue Swirl
    || defined(__NetBSD__))
455 5b6dd868 Blue Swirl
# define USE_MMAP
456 5b6dd868 Blue Swirl
#endif
457 5b6dd868 Blue Swirl
458 5b6dd868 Blue Swirl
/* Minimum size of the code gen buffer.  This number is randomly chosen,
459 5b6dd868 Blue Swirl
   but not so small that we can't have a fair number of TB's live.  */
460 5b6dd868 Blue Swirl
#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
461 5b6dd868 Blue Swirl
462 5b6dd868 Blue Swirl
/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
463 5b6dd868 Blue Swirl
   indicated, this is constrained by the range of direct branches on the
464 5b6dd868 Blue Swirl
   host cpu, as used by the TCG implementation of goto_tb.  */
465 5b6dd868 Blue Swirl
#if defined(__x86_64__)
466 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
467 5b6dd868 Blue Swirl
#elif defined(__sparc__)
468 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
469 4a136e0a Claudio Fontana
#elif defined(__aarch64__)
470 4a136e0a Claudio Fontana
# define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
471 5b6dd868 Blue Swirl
#elif defined(__arm__)
472 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
473 5b6dd868 Blue Swirl
#elif defined(__s390x__)
474 5b6dd868 Blue Swirl
  /* We have a +- 4GB range on the branches; leave some slop.  */
475 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
476 5b6dd868 Blue Swirl
#else
477 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
478 5b6dd868 Blue Swirl
#endif
479 5b6dd868 Blue Swirl
480 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
481 5b6dd868 Blue Swirl
482 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
483 5b6dd868 Blue Swirl
  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
484 5b6dd868 Blue Swirl
   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
485 5b6dd868 Blue Swirl
486 5b6dd868 Blue Swirl
static inline size_t size_code_gen_buffer(size_t tb_size)
487 5b6dd868 Blue Swirl
{
488 5b6dd868 Blue Swirl
    /* Size the buffer.  */
489 5b6dd868 Blue Swirl
    if (tb_size == 0) {
490 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
491 5b6dd868 Blue Swirl
        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492 5b6dd868 Blue Swirl
#else
493 5b6dd868 Blue Swirl
        /* ??? Needs adjustments.  */
494 5b6dd868 Blue Swirl
        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
495 5b6dd868 Blue Swirl
           static buffer, we could size this on RESERVED_VA, on the text
496 5b6dd868 Blue Swirl
           segment size of the executable, or continue to use the default.  */
497 5b6dd868 Blue Swirl
        tb_size = (unsigned long)(ram_size / 4);
498 5b6dd868 Blue Swirl
#endif
499 5b6dd868 Blue Swirl
    }
500 5b6dd868 Blue Swirl
    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
501 5b6dd868 Blue Swirl
        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
502 5b6dd868 Blue Swirl
    }
503 5b6dd868 Blue Swirl
    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
504 5b6dd868 Blue Swirl
        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
505 5b6dd868 Blue Swirl
    }
506 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = tb_size;
507 5b6dd868 Blue Swirl
    return tb_size;
508 5b6dd868 Blue Swirl
}
509 5b6dd868 Blue Swirl
510 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
511 5b6dd868 Blue Swirl
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
512 5b6dd868 Blue Swirl
    __attribute__((aligned(CODE_GEN_ALIGN)));
513 5b6dd868 Blue Swirl
514 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
515 5b6dd868 Blue Swirl
{
516 0b0d3320 Evgeny Voevodin
    map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
517 5b6dd868 Blue Swirl
    return static_code_gen_buffer;
518 5b6dd868 Blue Swirl
}
519 5b6dd868 Blue Swirl
#elif defined(USE_MMAP)
520 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
521 5b6dd868 Blue Swirl
{
522 5b6dd868 Blue Swirl
    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
523 5b6dd868 Blue Swirl
    uintptr_t start = 0;
524 5b6dd868 Blue Swirl
    void *buf;
525 5b6dd868 Blue Swirl
526 5b6dd868 Blue Swirl
    /* Constrain the position of the buffer based on the host cpu.
527 5b6dd868 Blue Swirl
       Note that these addresses are chosen in concert with the
528 5b6dd868 Blue Swirl
       addresses assigned in the relevant linker script file.  */
529 5b6dd868 Blue Swirl
# if defined(__PIE__) || defined(__PIC__)
530 5b6dd868 Blue Swirl
    /* Don't bother setting a preferred location if we're building
531 5b6dd868 Blue Swirl
       a position-independent executable.  We're more likely to get
532 5b6dd868 Blue Swirl
       an address near the main executable if we let the kernel
533 5b6dd868 Blue Swirl
       choose the address.  */
534 5b6dd868 Blue Swirl
# elif defined(__x86_64__) && defined(MAP_32BIT)
535 5b6dd868 Blue Swirl
    /* Force the memory down into low memory with the executable.
536 5b6dd868 Blue Swirl
       Leave the choice of exact location with the kernel.  */
537 5b6dd868 Blue Swirl
    flags |= MAP_32BIT;
538 5b6dd868 Blue Swirl
    /* Cannot expect to map more than 800MB in low memory.  */
539 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
540 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
541 5b6dd868 Blue Swirl
    }
542 5b6dd868 Blue Swirl
# elif defined(__sparc__)
543 5b6dd868 Blue Swirl
    start = 0x40000000ul;
544 5b6dd868 Blue Swirl
# elif defined(__s390x__)
545 5b6dd868 Blue Swirl
    start = 0x90000000ul;
546 5b6dd868 Blue Swirl
# endif
547 5b6dd868 Blue Swirl
548 0b0d3320 Evgeny Voevodin
    buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
549 5b6dd868 Blue Swirl
               PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
550 5b6dd868 Blue Swirl
    return buf == MAP_FAILED ? NULL : buf;
551 5b6dd868 Blue Swirl
}
552 5b6dd868 Blue Swirl
#else
553 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
554 5b6dd868 Blue Swirl
{
555 0b0d3320 Evgeny Voevodin
    void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
556 5b6dd868 Blue Swirl
557 5b6dd868 Blue Swirl
    if (buf) {
558 0b0d3320 Evgeny Voevodin
        map_exec(buf, tcg_ctx.code_gen_buffer_size);
559 5b6dd868 Blue Swirl
    }
560 5b6dd868 Blue Swirl
    return buf;
561 5b6dd868 Blue Swirl
}
562 5b6dd868 Blue Swirl
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
563 5b6dd868 Blue Swirl
564 5b6dd868 Blue Swirl
static inline void code_gen_alloc(size_t tb_size)
565 5b6dd868 Blue Swirl
{
566 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
567 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
568 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer == NULL) {
569 5b6dd868 Blue Swirl
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
570 5b6dd868 Blue Swirl
        exit(1);
571 5b6dd868 Blue Swirl
    }
572 5b6dd868 Blue Swirl
573 0b0d3320 Evgeny Voevodin
    qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
574 0b0d3320 Evgeny Voevodin
            QEMU_MADV_HUGEPAGE);
575 5b6dd868 Blue Swirl
576 5b6dd868 Blue Swirl
    /* Steal room for the prologue at the end of the buffer.  This ensures
577 5b6dd868 Blue Swirl
       (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
578 5b6dd868 Blue Swirl
       from TB's to the prologue are going to be in range.  It also means
579 5b6dd868 Blue Swirl
       that we don't need to mark (additional) portions of the data segment
580 5b6dd868 Blue Swirl
       as executable.  */
581 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
582 0b0d3320 Evgeny Voevodin
            tcg_ctx.code_gen_buffer_size - 1024;
583 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size -= 1024;
584 5b6dd868 Blue Swirl
585 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
586 5b6dd868 Blue Swirl
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
587 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
588 0b0d3320 Evgeny Voevodin
            CODE_GEN_AVG_BLOCK_SIZE;
589 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tbs =
590 5e5f07e0 Evgeny Voevodin
            g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
591 5b6dd868 Blue Swirl
}
592 5b6dd868 Blue Swirl
593 5b6dd868 Blue Swirl
/* Must be called before using the QEMU cpus. 'tb_size' is the size
594 5b6dd868 Blue Swirl
   (in bytes) allocated to the translation buffer. Zero means default
595 5b6dd868 Blue Swirl
   size. */
596 5b6dd868 Blue Swirl
void tcg_exec_init(unsigned long tb_size)
597 5b6dd868 Blue Swirl
{
598 5b6dd868 Blue Swirl
    cpu_gen_init();
599 5b6dd868 Blue Swirl
    code_gen_alloc(tb_size);
600 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
601 0b0d3320 Evgeny Voevodin
    tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
602 5b6dd868 Blue Swirl
    page_init();
603 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
604 5b6dd868 Blue Swirl
    /* There's no guest base to take into account, so go ahead and
605 5b6dd868 Blue Swirl
       initialize the prologue now.  */
606 5b6dd868 Blue Swirl
    tcg_prologue_init(&tcg_ctx);
607 5b6dd868 Blue Swirl
#endif
608 5b6dd868 Blue Swirl
}
609 5b6dd868 Blue Swirl
610 5b6dd868 Blue Swirl
bool tcg_enabled(void)
611 5b6dd868 Blue Swirl
{
612 0b0d3320 Evgeny Voevodin
    return tcg_ctx.code_gen_buffer != NULL;
613 5b6dd868 Blue Swirl
}
614 5b6dd868 Blue Swirl
615 5b6dd868 Blue Swirl
/* Allocate a new translation block. Flush the translation buffer if
616 5b6dd868 Blue Swirl
   too many translation blocks or too much generated code. */
617 5b6dd868 Blue Swirl
static TranslationBlock *tb_alloc(target_ulong pc)
618 5b6dd868 Blue Swirl
{
619 5b6dd868 Blue Swirl
    TranslationBlock *tb;
620 5b6dd868 Blue Swirl
621 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
622 0b0d3320 Evgeny Voevodin
        (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
623 0b0d3320 Evgeny Voevodin
         tcg_ctx.code_gen_buffer_max_size) {
624 5b6dd868 Blue Swirl
        return NULL;
625 5b6dd868 Blue Swirl
    }
626 5e5f07e0 Evgeny Voevodin
    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
627 5b6dd868 Blue Swirl
    tb->pc = pc;
628 5b6dd868 Blue Swirl
    tb->cflags = 0;
629 5b6dd868 Blue Swirl
    return tb;
630 5b6dd868 Blue Swirl
}
631 5b6dd868 Blue Swirl
632 5b6dd868 Blue Swirl
void tb_free(TranslationBlock *tb)
633 5b6dd868 Blue Swirl
{
634 5b6dd868 Blue Swirl
    /* In practice this is mostly used for single use temporary TB
635 5b6dd868 Blue Swirl
       Ignore the hard cases and just back up if this TB happens to
636 5b6dd868 Blue Swirl
       be the last one generated.  */
637 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
638 5e5f07e0 Evgeny Voevodin
            tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
639 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_ptr = tb->tc_ptr;
640 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.nb_tbs--;
641 5b6dd868 Blue Swirl
    }
642 5b6dd868 Blue Swirl
}
643 5b6dd868 Blue Swirl
644 5b6dd868 Blue Swirl
static inline void invalidate_page_bitmap(PageDesc *p)
645 5b6dd868 Blue Swirl
{
646 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
647 5b6dd868 Blue Swirl
        g_free(p->code_bitmap);
648 5b6dd868 Blue Swirl
        p->code_bitmap = NULL;
649 5b6dd868 Blue Swirl
    }
650 5b6dd868 Blue Swirl
    p->code_write_count = 0;
651 5b6dd868 Blue Swirl
}
652 5b6dd868 Blue Swirl
653 5b6dd868 Blue Swirl
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
654 5b6dd868 Blue Swirl
static void page_flush_tb_1(int level, void **lp)
655 5b6dd868 Blue Swirl
{
656 5b6dd868 Blue Swirl
    int i;
657 5b6dd868 Blue Swirl
658 5b6dd868 Blue Swirl
    if (*lp == NULL) {
659 5b6dd868 Blue Swirl
        return;
660 5b6dd868 Blue Swirl
    }
661 5b6dd868 Blue Swirl
    if (level == 0) {
662 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
663 5b6dd868 Blue Swirl
664 03f49957 Paolo Bonzini
        for (i = 0; i < V_L2_SIZE; ++i) {
665 5b6dd868 Blue Swirl
            pd[i].first_tb = NULL;
666 5b6dd868 Blue Swirl
            invalidate_page_bitmap(pd + i);
667 5b6dd868 Blue Swirl
        }
668 5b6dd868 Blue Swirl
    } else {
669 5b6dd868 Blue Swirl
        void **pp = *lp;
670 5b6dd868 Blue Swirl
671 03f49957 Paolo Bonzini
        for (i = 0; i < V_L2_SIZE; ++i) {
672 5b6dd868 Blue Swirl
            page_flush_tb_1(level - 1, pp + i);
673 5b6dd868 Blue Swirl
        }
674 5b6dd868 Blue Swirl
    }
675 5b6dd868 Blue Swirl
}
676 5b6dd868 Blue Swirl
677 5b6dd868 Blue Swirl
static void page_flush_tb(void)
678 5b6dd868 Blue Swirl
{
679 5b6dd868 Blue Swirl
    int i;
680 5b6dd868 Blue Swirl
681 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
682 03f49957 Paolo Bonzini
        page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
683 5b6dd868 Blue Swirl
    }
684 5b6dd868 Blue Swirl
}
685 5b6dd868 Blue Swirl
686 5b6dd868 Blue Swirl
/* flush all the translation blocks */
687 5b6dd868 Blue Swirl
/* XXX: tb_flush is currently not thread safe */
688 5b6dd868 Blue Swirl
void tb_flush(CPUArchState *env1)
689 5b6dd868 Blue Swirl
{
690 182735ef Andreas Fรคrber
    CPUState *cpu;
691 5b6dd868 Blue Swirl
692 5b6dd868 Blue Swirl
#if defined(DEBUG_FLUSH)
693 5b6dd868 Blue Swirl
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
694 0b0d3320 Evgeny Voevodin
           (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
695 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
696 0b0d3320 Evgeny Voevodin
           ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
697 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs : 0);
698 5b6dd868 Blue Swirl
#endif
699 0b0d3320 Evgeny Voevodin
    if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
700 0b0d3320 Evgeny Voevodin
        > tcg_ctx.code_gen_buffer_size) {
701 5b6dd868 Blue Swirl
        cpu_abort(env1, "Internal error: code buffer overflow\n");
702 5b6dd868 Blue Swirl
    }
703 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.nb_tbs = 0;
704 5b6dd868 Blue Swirl
705 bdc44640 Andreas Fรคrber
    CPU_FOREACH(cpu) {
706 182735ef Andreas Fรคrber
        CPUArchState *env = cpu->env_ptr;
707 182735ef Andreas Fรคrber
708 eb2535f4 Richard Henderson
        memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
709 5b6dd868 Blue Swirl
    }
710 5b6dd868 Blue Swirl
711 eb2535f4 Richard Henderson
    memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
712 5b6dd868 Blue Swirl
    page_flush_tb();
713 5b6dd868 Blue Swirl
714 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
715 5b6dd868 Blue Swirl
    /* XXX: flush processor icache at this point if cache flush is
716 5b6dd868 Blue Swirl
       expensive */
717 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_flush_count++;
718 5b6dd868 Blue Swirl
}
719 5b6dd868 Blue Swirl
720 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
721 5b6dd868 Blue Swirl
722 5b6dd868 Blue Swirl
static void tb_invalidate_check(target_ulong address)
723 5b6dd868 Blue Swirl
{
724 5b6dd868 Blue Swirl
    TranslationBlock *tb;
725 5b6dd868 Blue Swirl
    int i;
726 5b6dd868 Blue Swirl
727 5b6dd868 Blue Swirl
    address &= TARGET_PAGE_MASK;
728 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
729 5e5f07e0 Evgeny Voevodin
        for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
730 5b6dd868 Blue Swirl
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
731 5b6dd868 Blue Swirl
                  address >= tb->pc + tb->size)) {
732 5b6dd868 Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
733 5b6dd868 Blue Swirl
                       " PC=%08lx size=%04x\n",
734 5b6dd868 Blue Swirl
                       address, (long)tb->pc, tb->size);
735 5b6dd868 Blue Swirl
            }
736 5b6dd868 Blue Swirl
        }
737 5b6dd868 Blue Swirl
    }
738 5b6dd868 Blue Swirl
}
739 5b6dd868 Blue Swirl
740 5b6dd868 Blue Swirl
/* verify that all the pages have correct rights for code */
741 5b6dd868 Blue Swirl
static void tb_page_check(void)
742 5b6dd868 Blue Swirl
{
743 5b6dd868 Blue Swirl
    TranslationBlock *tb;
744 5b6dd868 Blue Swirl
    int i, flags1, flags2;
745 5b6dd868 Blue Swirl
746 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
747 5e5f07e0 Evgeny Voevodin
        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
748 5e5f07e0 Evgeny Voevodin
                tb = tb->phys_hash_next) {
749 5b6dd868 Blue Swirl
            flags1 = page_get_flags(tb->pc);
750 5b6dd868 Blue Swirl
            flags2 = page_get_flags(tb->pc + tb->size - 1);
751 5b6dd868 Blue Swirl
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
752 5b6dd868 Blue Swirl
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
753 5b6dd868 Blue Swirl
                       (long)tb->pc, tb->size, flags1, flags2);
754 5b6dd868 Blue Swirl
            }
755 5b6dd868 Blue Swirl
        }
756 5b6dd868 Blue Swirl
    }
757 5b6dd868 Blue Swirl
}
758 5b6dd868 Blue Swirl
759 5b6dd868 Blue Swirl
#endif
760 5b6dd868 Blue Swirl
761 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
762 5b6dd868 Blue Swirl
{
763 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
764 5b6dd868 Blue Swirl
765 5b6dd868 Blue Swirl
    for (;;) {
766 5b6dd868 Blue Swirl
        tb1 = *ptb;
767 5b6dd868 Blue Swirl
        if (tb1 == tb) {
768 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
            *ptb = tb1->phys_hash_next;
769 5b6dd868 Blue Swirl
            break;
770 5b6dd868 Blue Swirl
        }
771 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
        ptb = &tb1->phys_hash_next;
772 5b6dd868 Blue Swirl
    }
773 5b6dd868 Blue Swirl
}
774 5b6dd868 Blue Swirl
775 5b6dd868 Blue Swirl
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
776 5b6dd868 Blue Swirl
{
777 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
778 5b6dd868 Blue Swirl
    unsigned int n1;
779 5b6dd868 Blue Swirl
780 5b6dd868 Blue Swirl
    for (;;) {
781 5b6dd868 Blue Swirl
        tb1 = *ptb;
782 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
783 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
784 5b6dd868 Blue Swirl
        if (tb1 == tb) {
785 5b6dd868 Blue Swirl
            *ptb = tb1->page_next[n1];
786 5b6dd868 Blue Swirl
            break;
787 5b6dd868 Blue Swirl
        }
788 5b6dd868 Blue Swirl
        ptb = &tb1->page_next[n1];
789 5b6dd868 Blue Swirl
    }
790 5b6dd868 Blue Swirl
}
791 5b6dd868 Blue Swirl
792 5b6dd868 Blue Swirl
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
793 5b6dd868 Blue Swirl
{
794 5b6dd868 Blue Swirl
    TranslationBlock *tb1, **ptb;
795 5b6dd868 Blue Swirl
    unsigned int n1;
796 5b6dd868 Blue Swirl
797 5b6dd868 Blue Swirl
    ptb = &tb->jmp_next[n];
798 5b6dd868 Blue Swirl
    tb1 = *ptb;
799 5b6dd868 Blue Swirl
    if (tb1) {
800 5b6dd868 Blue Swirl
        /* find tb(n) in circular list */
801 5b6dd868 Blue Swirl
        for (;;) {
802 5b6dd868 Blue Swirl
            tb1 = *ptb;
803 5b6dd868 Blue Swirl
            n1 = (uintptr_t)tb1 & 3;
804 5b6dd868 Blue Swirl
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
805 5b6dd868 Blue Swirl
            if (n1 == n && tb1 == tb) {
806 5b6dd868 Blue Swirl
                break;
807 5b6dd868 Blue Swirl
            }
808 5b6dd868 Blue Swirl
            if (n1 == 2) {
809 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_first;
810 5b6dd868 Blue Swirl
            } else {
811 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_next[n1];
812 5b6dd868 Blue Swirl
            }
813 5b6dd868 Blue Swirl
        }
814 5b6dd868 Blue Swirl
        /* now we can suppress tb(n) from the list */
815 5b6dd868 Blue Swirl
        *ptb = tb->jmp_next[n];
816 5b6dd868 Blue Swirl
817 5b6dd868 Blue Swirl
        tb->jmp_next[n] = NULL;
818 5b6dd868 Blue Swirl
    }
819 5b6dd868 Blue Swirl
}
820 5b6dd868 Blue Swirl
821 5b6dd868 Blue Swirl
/* reset the jump entry 'n' of a TB so that it is not chained to
822 5b6dd868 Blue Swirl
   another TB */
823 5b6dd868 Blue Swirl
static inline void tb_reset_jump(TranslationBlock *tb, int n)
824 5b6dd868 Blue Swirl
{
825 5b6dd868 Blue Swirl
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
826 5b6dd868 Blue Swirl
}
827 5b6dd868 Blue Swirl
828 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
/* invalidate one TB */
829 5b6dd868 Blue Swirl
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
830 5b6dd868 Blue Swirl
{
831 182735ef Andreas Fรคrber
    CPUState *cpu;
832 5b6dd868 Blue Swirl
    PageDesc *p;
833 5b6dd868 Blue Swirl
    unsigned int h, n1;
834 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc;
835 5b6dd868 Blue Swirl
    TranslationBlock *tb1, *tb2;
836 5b6dd868 Blue Swirl
837 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
838 5b6dd868 Blue Swirl
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
839 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
840 5e5f07e0 Evgeny Voevodin
    tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
841 5b6dd868 Blue Swirl
842 5b6dd868 Blue Swirl
    /* remove the TB from the page list */
843 5b6dd868 Blue Swirl
    if (tb->page_addr[0] != page_addr) {
844 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
845 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
846 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
847 5b6dd868 Blue Swirl
    }
848 5b6dd868 Blue Swirl
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
849 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
850 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
851 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
852 5b6dd868 Blue Swirl
    }
853 5b6dd868 Blue Swirl
854 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
855 5b6dd868 Blue Swirl
856 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
857 5b6dd868 Blue Swirl
    h = tb_jmp_cache_hash_func(tb->pc);
858 bdc44640 Andreas Fรคrber
    CPU_FOREACH(cpu) {
859 182735ef Andreas Fรคrber
        CPUArchState *env = cpu->env_ptr;
860 182735ef Andreas Fรคrber
861 5b6dd868 Blue Swirl
        if (env->tb_jmp_cache[h] == tb) {
862 5b6dd868 Blue Swirl
            env->tb_jmp_cache[h] = NULL;
863 5b6dd868 Blue Swirl
        }
864 5b6dd868 Blue Swirl
    }
865 5b6dd868 Blue Swirl
866 5b6dd868 Blue Swirl
    /* suppress this TB from the two jump lists */
867 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 0);
868 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 1);
869 5b6dd868 Blue Swirl
870 5b6dd868 Blue Swirl
    /* suppress any remaining jumps to this TB */
871 5b6dd868 Blue Swirl
    tb1 = tb->jmp_first;
872 5b6dd868 Blue Swirl
    for (;;) {
873 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
874 5b6dd868 Blue Swirl
        if (n1 == 2) {
875 5b6dd868 Blue Swirl
            break;
876 5b6dd868 Blue Swirl
        }
877 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
878 5b6dd868 Blue Swirl
        tb2 = tb1->jmp_next[n1];
879 5b6dd868 Blue Swirl
        tb_reset_jump(tb1, n1);
880 5b6dd868 Blue Swirl
        tb1->jmp_next[n1] = NULL;
881 5b6dd868 Blue Swirl
        tb1 = tb2;
882 5b6dd868 Blue Swirl
    }
883 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
884 5b6dd868 Blue Swirl
885 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
886 5b6dd868 Blue Swirl
}
887 5b6dd868 Blue Swirl
888 5b6dd868 Blue Swirl
static inline void set_bits(uint8_t *tab, int start, int len)
889 5b6dd868 Blue Swirl
{
890 5b6dd868 Blue Swirl
    int end, mask, end1;
891 5b6dd868 Blue Swirl
892 5b6dd868 Blue Swirl
    end = start + len;
893 5b6dd868 Blue Swirl
    tab += start >> 3;
894 5b6dd868 Blue Swirl
    mask = 0xff << (start & 7);
895 5b6dd868 Blue Swirl
    if ((start & ~7) == (end & ~7)) {
896 5b6dd868 Blue Swirl
        if (start < end) {
897 5b6dd868 Blue Swirl
            mask &= ~(0xff << (end & 7));
898 5b6dd868 Blue Swirl
            *tab |= mask;
899 5b6dd868 Blue Swirl
        }
900 5b6dd868 Blue Swirl
    } else {
901 5b6dd868 Blue Swirl
        *tab++ |= mask;
902 5b6dd868 Blue Swirl
        start = (start + 8) & ~7;
903 5b6dd868 Blue Swirl
        end1 = end & ~7;
904 5b6dd868 Blue Swirl
        while (start < end1) {
905 5b6dd868 Blue Swirl
            *tab++ = 0xff;
906 5b6dd868 Blue Swirl
            start += 8;
907 5b6dd868 Blue Swirl
        }
908 5b6dd868 Blue Swirl
        if (start < end) {
909 5b6dd868 Blue Swirl
            mask = ~(0xff << (end & 7));
910 5b6dd868 Blue Swirl
            *tab |= mask;
911 5b6dd868 Blue Swirl
        }
912 5b6dd868 Blue Swirl
    }
913 5b6dd868 Blue Swirl
}
914 5b6dd868 Blue Swirl
915 5b6dd868 Blue Swirl
static void build_page_bitmap(PageDesc *p)
916 5b6dd868 Blue Swirl
{
917 5b6dd868 Blue Swirl
    int n, tb_start, tb_end;
918 5b6dd868 Blue Swirl
    TranslationBlock *tb;
919 5b6dd868 Blue Swirl
920 5b6dd868 Blue Swirl
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
921 5b6dd868 Blue Swirl
922 5b6dd868 Blue Swirl
    tb = p->first_tb;
923 5b6dd868 Blue Swirl
    while (tb != NULL) {
924 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
925 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
926 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
927 5b6dd868 Blue Swirl
        if (n == 0) {
928 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
929 5b6dd868 Blue Swirl
               it is not a problem */
930 5b6dd868 Blue Swirl
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
931 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
932 5b6dd868 Blue Swirl
            if (tb_end > TARGET_PAGE_SIZE) {
933 5b6dd868 Blue Swirl
                tb_end = TARGET_PAGE_SIZE;
934 5b6dd868 Blue Swirl
            }
935 5b6dd868 Blue Swirl
        } else {
936 5b6dd868 Blue Swirl
            tb_start = 0;
937 5b6dd868 Blue Swirl
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 5b6dd868 Blue Swirl
        }
939 5b6dd868 Blue Swirl
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
940 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
941 5b6dd868 Blue Swirl
    }
942 5b6dd868 Blue Swirl
}
943 5b6dd868 Blue Swirl
944 5b6dd868 Blue Swirl
TranslationBlock *tb_gen_code(CPUArchState *env,
945 5b6dd868 Blue Swirl
                              target_ulong pc, target_ulong cs_base,
946 5b6dd868 Blue Swirl
                              int flags, int cflags)
947 5b6dd868 Blue Swirl
{
948 5b6dd868 Blue Swirl
    TranslationBlock *tb;
949 5b6dd868 Blue Swirl
    uint8_t *tc_ptr;
950 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc, phys_page2;
951 5b6dd868 Blue Swirl
    target_ulong virt_page2;
952 5b6dd868 Blue Swirl
    int code_gen_size;
953 5b6dd868 Blue Swirl
954 5b6dd868 Blue Swirl
    phys_pc = get_page_addr_code(env, pc);
955 5b6dd868 Blue Swirl
    tb = tb_alloc(pc);
956 5b6dd868 Blue Swirl
    if (!tb) {
957 5b6dd868 Blue Swirl
        /* flush must be done */
958 5b6dd868 Blue Swirl
        tb_flush(env);
959 5b6dd868 Blue Swirl
        /* cannot fail at this point */
960 5b6dd868 Blue Swirl
        tb = tb_alloc(pc);
961 5b6dd868 Blue Swirl
        /* Don't forget to invalidate previous TB info.  */
962 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
963 5b6dd868 Blue Swirl
    }
964 0b0d3320 Evgeny Voevodin
    tc_ptr = tcg_ctx.code_gen_ptr;
965 5b6dd868 Blue Swirl
    tb->tc_ptr = tc_ptr;
966 5b6dd868 Blue Swirl
    tb->cs_base = cs_base;
967 5b6dd868 Blue Swirl
    tb->flags = flags;
968 5b6dd868 Blue Swirl
    tb->cflags = cflags;
969 5b6dd868 Blue Swirl
    cpu_gen_code(env, tb, &code_gen_size);
970 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
971 0b0d3320 Evgeny Voevodin
            code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
972 5b6dd868 Blue Swirl
973 5b6dd868 Blue Swirl
    /* check next page if needed */
974 5b6dd868 Blue Swirl
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
975 5b6dd868 Blue Swirl
    phys_page2 = -1;
976 5b6dd868 Blue Swirl
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
977 5b6dd868 Blue Swirl
        phys_page2 = get_page_addr_code(env, virt_page2);
978 5b6dd868 Blue Swirl
    }
979 5b6dd868 Blue Swirl
    tb_link_page(tb, phys_pc, phys_page2);
980 5b6dd868 Blue Swirl
    return tb;
981 5b6dd868 Blue Swirl
}
982 5b6dd868 Blue Swirl
983 5b6dd868 Blue Swirl
/*
984 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
985 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end may refer to *different* physical pages.
986 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
987 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
988 5b6dd868 Blue Swirl
 * this TB.
989 5b6dd868 Blue Swirl
 */
990 5b6dd868 Blue Swirl
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
991 5b6dd868 Blue Swirl
                              int is_cpu_write_access)
992 5b6dd868 Blue Swirl
{
993 5b6dd868 Blue Swirl
    while (start < end) {
994 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
995 5b6dd868 Blue Swirl
        start &= TARGET_PAGE_MASK;
996 5b6dd868 Blue Swirl
        start += TARGET_PAGE_SIZE;
997 5b6dd868 Blue Swirl
    }
998 5b6dd868 Blue Swirl
}
999 5b6dd868 Blue Swirl
1000 5b6dd868 Blue Swirl
/*
1001 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
1002 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1003 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
1004 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
1005 5b6dd868 Blue Swirl
 * this TB.
1006 5b6dd868 Blue Swirl
 */
1007 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1008 5b6dd868 Blue Swirl
                                   int is_cpu_write_access)
1009 5b6dd868 Blue Swirl
{
1010 5b6dd868 Blue Swirl
    TranslationBlock *tb, *tb_next, *saved_tb;
1011 4917cf44 Andreas Fรคrber
    CPUState *cpu = current_cpu;
1012 4917cf44 Andreas Fรคrber
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1013 4917cf44 Andreas Fรคrber
    CPUArchState *env = NULL;
1014 4917cf44 Andreas Fรคrber
#endif
1015 5b6dd868 Blue Swirl
    tb_page_addr_t tb_start, tb_end;
1016 5b6dd868 Blue Swirl
    PageDesc *p;
1017 5b6dd868 Blue Swirl
    int n;
1018 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1019 5b6dd868 Blue Swirl
    int current_tb_not_found = is_cpu_write_access;
1020 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1021 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1022 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1023 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1024 5b6dd868 Blue Swirl
    int current_flags = 0;
1025 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1026 5b6dd868 Blue Swirl
1027 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1028 5b6dd868 Blue Swirl
    if (!p) {
1029 5b6dd868 Blue Swirl
        return;
1030 5b6dd868 Blue Swirl
    }
1031 5b6dd868 Blue Swirl
    if (!p->code_bitmap &&
1032 5b6dd868 Blue Swirl
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1033 5b6dd868 Blue Swirl
        is_cpu_write_access) {
1034 5b6dd868 Blue Swirl
        /* build code bitmap */
1035 5b6dd868 Blue Swirl
        build_page_bitmap(p);
1036 5b6dd868 Blue Swirl
    }
1037 4917cf44 Andreas Fรคrber
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1038 4917cf44 Andreas Fรคrber
    if (cpu != NULL) {
1039 4917cf44 Andreas Fรคrber
        env = cpu->env_ptr;
1040 d77953b9 Andreas Fรคrber
    }
1041 4917cf44 Andreas Fรคrber
#endif
1042 5b6dd868 Blue Swirl
1043 5b6dd868 Blue Swirl
    /* we remove all the TBs in the range [start, end[ */
1044 5b6dd868 Blue Swirl
    /* XXX: see if in some cases it could be faster to invalidate all
1045 5b6dd868 Blue Swirl
       the code */
1046 5b6dd868 Blue Swirl
    tb = p->first_tb;
1047 5b6dd868 Blue Swirl
    while (tb != NULL) {
1048 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1049 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1050 5b6dd868 Blue Swirl
        tb_next = tb->page_next[n];
1051 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
1052 5b6dd868 Blue Swirl
        if (n == 0) {
1053 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
1054 5b6dd868 Blue Swirl
               it is not a problem */
1055 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1056 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
1057 5b6dd868 Blue Swirl
        } else {
1058 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[1];
1059 5b6dd868 Blue Swirl
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1060 5b6dd868 Blue Swirl
        }
1061 5b6dd868 Blue Swirl
        if (!(tb_end <= start || tb_start >= end)) {
1062 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1063 5b6dd868 Blue Swirl
            if (current_tb_not_found) {
1064 5b6dd868 Blue Swirl
                current_tb_not_found = 0;
1065 5b6dd868 Blue Swirl
                current_tb = NULL;
1066 5b6dd868 Blue Swirl
                if (env->mem_io_pc) {
1067 5b6dd868 Blue Swirl
                    /* now we have a real cpu fault */
1068 5b6dd868 Blue Swirl
                    current_tb = tb_find_pc(env->mem_io_pc);
1069 5b6dd868 Blue Swirl
                }
1070 5b6dd868 Blue Swirl
            }
1071 5b6dd868 Blue Swirl
            if (current_tb == tb &&
1072 5b6dd868 Blue Swirl
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1073 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1074 5b6dd868 Blue Swirl
                its execution. We could be more precise by checking
1075 5b6dd868 Blue Swirl
                that the modification is after the current PC, but it
1076 5b6dd868 Blue Swirl
                would require a specialized function to partially
1077 5b6dd868 Blue Swirl
                restore the CPU state */
1078 5b6dd868 Blue Swirl
1079 5b6dd868 Blue Swirl
                current_tb_modified = 1;
1080 a8a826a3 Blue Swirl
                cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1081 5b6dd868 Blue Swirl
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1082 5b6dd868 Blue Swirl
                                     &current_flags);
1083 5b6dd868 Blue Swirl
            }
1084 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1085 5b6dd868 Blue Swirl
            /* we need to do that to handle the case where a signal
1086 5b6dd868 Blue Swirl
               occurs while doing tb_phys_invalidate() */
1087 5b6dd868 Blue Swirl
            saved_tb = NULL;
1088 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1089 d77953b9 Andreas Fรคrber
                saved_tb = cpu->current_tb;
1090 d77953b9 Andreas Fรคrber
                cpu->current_tb = NULL;
1091 5b6dd868 Blue Swirl
            }
1092 5b6dd868 Blue Swirl
            tb_phys_invalidate(tb, -1);
1093 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1094 d77953b9 Andreas Fรคrber
                cpu->current_tb = saved_tb;
1095 c3affe56 Andreas Fรคrber
                if (cpu->interrupt_request && cpu->current_tb) {
1096 c3affe56 Andreas Fรคrber
                    cpu_interrupt(cpu, cpu->interrupt_request);
1097 5b6dd868 Blue Swirl
                }
1098 5b6dd868 Blue Swirl
            }
1099 5b6dd868 Blue Swirl
        }
1100 5b6dd868 Blue Swirl
        tb = tb_next;
1101 5b6dd868 Blue Swirl
    }
1102 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
1103 5b6dd868 Blue Swirl
    /* if no code remaining, no need to continue to use slow writes */
1104 5b6dd868 Blue Swirl
    if (!p->first_tb) {
1105 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
1106 5b6dd868 Blue Swirl
        if (is_cpu_write_access) {
1107 5b6dd868 Blue Swirl
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1108 5b6dd868 Blue Swirl
        }
1109 5b6dd868 Blue Swirl
    }
1110 5b6dd868 Blue Swirl
#endif
1111 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1112 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1113 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1114 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1115 5b6dd868 Blue Swirl
           itself */
1116 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1117 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1118 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, NULL);
1119 5b6dd868 Blue Swirl
    }
1120 5b6dd868 Blue Swirl
#endif
1121 5b6dd868 Blue Swirl
}
1122 5b6dd868 Blue Swirl
1123 5b6dd868 Blue Swirl
/* len must be <= 8 and start must be a multiple of len */
1124 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1125 5b6dd868 Blue Swirl
{
1126 5b6dd868 Blue Swirl
    PageDesc *p;
1127 5b6dd868 Blue Swirl
    int offset, b;
1128 5b6dd868 Blue Swirl
1129 5b6dd868 Blue Swirl
#if 0
1130 5b6dd868 Blue Swirl
    if (1) {
1131 5b6dd868 Blue Swirl
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1132 5b6dd868 Blue Swirl
                  cpu_single_env->mem_io_vaddr, len,
1133 5b6dd868 Blue Swirl
                  cpu_single_env->eip,
1134 5b6dd868 Blue Swirl
                  cpu_single_env->eip +
1135 5b6dd868 Blue Swirl
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1136 5b6dd868 Blue Swirl
    }
1137 5b6dd868 Blue Swirl
#endif
1138 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1139 5b6dd868 Blue Swirl
    if (!p) {
1140 5b6dd868 Blue Swirl
        return;
1141 5b6dd868 Blue Swirl
    }
1142 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
1143 5b6dd868 Blue Swirl
        offset = start & ~TARGET_PAGE_MASK;
1144 5b6dd868 Blue Swirl
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1145 5b6dd868 Blue Swirl
        if (b & ((1 << len) - 1)) {
1146 5b6dd868 Blue Swirl
            goto do_invalidate;
1147 5b6dd868 Blue Swirl
        }
1148 5b6dd868 Blue Swirl
    } else {
1149 5b6dd868 Blue Swirl
    do_invalidate:
1150 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, start + len, 1);
1151 5b6dd868 Blue Swirl
    }
1152 5b6dd868 Blue Swirl
}
1153 5b6dd868 Blue Swirl
1154 5b6dd868 Blue Swirl
#if !defined(CONFIG_SOFTMMU)
1155 5b6dd868 Blue Swirl
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1156 d02532f0 Alexander Graf
                                    uintptr_t pc, void *puc,
1157 d02532f0 Alexander Graf
                                    bool locked)
1158 5b6dd868 Blue Swirl
{
1159 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1160 5b6dd868 Blue Swirl
    PageDesc *p;
1161 5b6dd868 Blue Swirl
    int n;
1162 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1163 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1164 4917cf44 Andreas Fรคrber
    CPUState *cpu = current_cpu;
1165 4917cf44 Andreas Fรคrber
    CPUArchState *env = NULL;
1166 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1167 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1168 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1169 5b6dd868 Blue Swirl
    int current_flags = 0;
1170 5b6dd868 Blue Swirl
#endif
1171 5b6dd868 Blue Swirl
1172 5b6dd868 Blue Swirl
    addr &= TARGET_PAGE_MASK;
1173 5b6dd868 Blue Swirl
    p = page_find(addr >> TARGET_PAGE_BITS);
1174 5b6dd868 Blue Swirl
    if (!p) {
1175 5b6dd868 Blue Swirl
        return;
1176 5b6dd868 Blue Swirl
    }
1177 5b6dd868 Blue Swirl
    tb = p->first_tb;
1178 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1179 5b6dd868 Blue Swirl
    if (tb && pc != 0) {
1180 5b6dd868 Blue Swirl
        current_tb = tb_find_pc(pc);
1181 5b6dd868 Blue Swirl
    }
1182 4917cf44 Andreas Fรคrber
    if (cpu != NULL) {
1183 4917cf44 Andreas Fรคrber
        env = cpu->env_ptr;
1184 d77953b9 Andreas Fรคrber
    }
1185 5b6dd868 Blue Swirl
#endif
1186 5b6dd868 Blue Swirl
    while (tb != NULL) {
1187 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1188 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1189 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1190 5b6dd868 Blue Swirl
        if (current_tb == tb &&
1191 5b6dd868 Blue Swirl
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1192 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1193 5b6dd868 Blue Swirl
                   its execution. We could be more precise by checking
1194 5b6dd868 Blue Swirl
                   that the modification is after the current PC, but it
1195 5b6dd868 Blue Swirl
                   would require a specialized function to partially
1196 5b6dd868 Blue Swirl
                   restore the CPU state */
1197 5b6dd868 Blue Swirl
1198 5b6dd868 Blue Swirl
            current_tb_modified = 1;
1199 a8a826a3 Blue Swirl
            cpu_restore_state_from_tb(current_tb, env, pc);
1200 5b6dd868 Blue Swirl
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1201 5b6dd868 Blue Swirl
                                 &current_flags);
1202 5b6dd868 Blue Swirl
        }
1203 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1204 5b6dd868 Blue Swirl
        tb_phys_invalidate(tb, addr);
1205 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
1206 5b6dd868 Blue Swirl
    }
1207 5b6dd868 Blue Swirl
    p->first_tb = NULL;
1208 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1209 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1210 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1211 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1212 5b6dd868 Blue Swirl
           itself */
1213 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1214 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1215 d02532f0 Alexander Graf
        if (locked) {
1216 d02532f0 Alexander Graf
            mmap_unlock();
1217 d02532f0 Alexander Graf
        }
1218 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, puc);
1219 5b6dd868 Blue Swirl
    }
1220 5b6dd868 Blue Swirl
#endif
1221 5b6dd868 Blue Swirl
}
1222 5b6dd868 Blue Swirl
#endif
1223 5b6dd868 Blue Swirl
1224 5b6dd868 Blue Swirl
/* add the tb in the target page and protect it if necessary */
1225 5b6dd868 Blue Swirl
static inline void tb_alloc_page(TranslationBlock *tb,
1226 5b6dd868 Blue Swirl
                                 unsigned int n, tb_page_addr_t page_addr)
1227 5b6dd868 Blue Swirl
{
1228 5b6dd868 Blue Swirl
    PageDesc *p;
1229 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1230 5b6dd868 Blue Swirl
    bool page_already_protected;
1231 5b6dd868 Blue Swirl
#endif
1232 5b6dd868 Blue Swirl
1233 5b6dd868 Blue Swirl
    tb->page_addr[n] = page_addr;
1234 5b6dd868 Blue Swirl
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1235 5b6dd868 Blue Swirl
    tb->page_next[n] = p->first_tb;
1236 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1237 5b6dd868 Blue Swirl
    page_already_protected = p->first_tb != NULL;
1238 5b6dd868 Blue Swirl
#endif
1239 5b6dd868 Blue Swirl
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1240 5b6dd868 Blue Swirl
    invalidate_page_bitmap(p);
1241 5b6dd868 Blue Swirl
1242 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_SMC) || 1
1243 5b6dd868 Blue Swirl
1244 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
1245 5b6dd868 Blue Swirl
    if (p->flags & PAGE_WRITE) {
1246 5b6dd868 Blue Swirl
        target_ulong addr;
1247 5b6dd868 Blue Swirl
        PageDesc *p2;
1248 5b6dd868 Blue Swirl
        int prot;
1249 5b6dd868 Blue Swirl
1250 5b6dd868 Blue Swirl
        /* force the host page as non writable (writes will have a
1251 5b6dd868 Blue Swirl
           page fault + mprotect overhead) */
1252 5b6dd868 Blue Swirl
        page_addr &= qemu_host_page_mask;
1253 5b6dd868 Blue Swirl
        prot = 0;
1254 5b6dd868 Blue Swirl
        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1255 5b6dd868 Blue Swirl
            addr += TARGET_PAGE_SIZE) {
1256 5b6dd868 Blue Swirl
1257 5b6dd868 Blue Swirl
            p2 = page_find(addr >> TARGET_PAGE_BITS);
1258 5b6dd868 Blue Swirl
            if (!p2) {
1259 5b6dd868 Blue Swirl
                continue;
1260 5b6dd868 Blue Swirl
            }
1261 5b6dd868 Blue Swirl
            prot |= p2->flags;
1262 5b6dd868 Blue Swirl
            p2->flags &= ~PAGE_WRITE;
1263 5b6dd868 Blue Swirl
          }
1264 5b6dd868 Blue Swirl
        mprotect(g2h(page_addr), qemu_host_page_size,
1265 5b6dd868 Blue Swirl
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1266 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_INVALIDATE
1267 5b6dd868 Blue Swirl
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1268 5b6dd868 Blue Swirl
               page_addr);
1269 5b6dd868 Blue Swirl
#endif
1270 5b6dd868 Blue Swirl
    }
1271 5b6dd868 Blue Swirl
#else
1272 5b6dd868 Blue Swirl
    /* if some code is already present, then the pages are already
1273 5b6dd868 Blue Swirl
       protected. So we handle the case where only the first TB is
1274 5b6dd868 Blue Swirl
       allocated in a physical page */
1275 5b6dd868 Blue Swirl
    if (!page_already_protected) {
1276 5b6dd868 Blue Swirl
        tlb_protect_code(page_addr);
1277 5b6dd868 Blue Swirl
    }
1278 5b6dd868 Blue Swirl
#endif
1279 5b6dd868 Blue Swirl
1280 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_SMC */
1281 5b6dd868 Blue Swirl
}
1282 5b6dd868 Blue Swirl
1283 5b6dd868 Blue Swirl
/* add a new TB and link it to the physical page tables. phys_page2 is
1284 5b6dd868 Blue Swirl
   (-1) to indicate that only one page contains the TB. */
1285 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1286 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2)
1287 5b6dd868 Blue Swirl
{
1288 5b6dd868 Blue Swirl
    unsigned int h;
1289 5b6dd868 Blue Swirl
    TranslationBlock **ptb;
1290 5b6dd868 Blue Swirl
1291 5b6dd868 Blue Swirl
    /* Grab the mmap lock to stop another thread invalidating this TB
1292 5b6dd868 Blue Swirl
       before we are done.  */
1293 5b6dd868 Blue Swirl
    mmap_lock();
1294 5b6dd868 Blue Swirl
    /* add in the physical hash table */
1295 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
1296 5e5f07e0 Evgeny Voevodin
    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1297 5b6dd868 Blue Swirl
    tb->phys_hash_next = *ptb;
1298 5b6dd868 Blue Swirl
    *ptb = tb;
1299 5b6dd868 Blue Swirl
1300 5b6dd868 Blue Swirl
    /* add in the page list */
1301 5b6dd868 Blue Swirl
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1302 5b6dd868 Blue Swirl
    if (phys_page2 != -1) {
1303 5b6dd868 Blue Swirl
        tb_alloc_page(tb, 1, phys_page2);
1304 5b6dd868 Blue Swirl
    } else {
1305 5b6dd868 Blue Swirl
        tb->page_addr[1] = -1;
1306 5b6dd868 Blue Swirl
    }
1307 5b6dd868 Blue Swirl
1308 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1309 5b6dd868 Blue Swirl
    tb->jmp_next[0] = NULL;
1310 5b6dd868 Blue Swirl
    tb->jmp_next[1] = NULL;
1311 5b6dd868 Blue Swirl
1312 5b6dd868 Blue Swirl
    /* init original jump addresses */
1313 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[0] != 0xffff) {
1314 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 0);
1315 5b6dd868 Blue Swirl
    }
1316 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[1] != 0xffff) {
1317 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 1);
1318 5b6dd868 Blue Swirl
    }
1319 5b6dd868 Blue Swirl
1320 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1321 5b6dd868 Blue Swirl
    tb_page_check();
1322 5b6dd868 Blue Swirl
#endif
1323 5b6dd868 Blue Swirl
    mmap_unlock();
1324 5b6dd868 Blue Swirl
}
1325 5b6dd868 Blue Swirl
1326 5b6dd868 Blue Swirl
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1327 5b6dd868 Blue Swirl
   tb[1].tc_ptr. Return NULL if not found */
1328 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1329 5b6dd868 Blue Swirl
{
1330 5b6dd868 Blue Swirl
    int m_min, m_max, m;
1331 5b6dd868 Blue Swirl
    uintptr_t v;
1332 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1333 5b6dd868 Blue Swirl
1334 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1335 5b6dd868 Blue Swirl
        return NULL;
1336 5b6dd868 Blue Swirl
    }
1337 0b0d3320 Evgeny Voevodin
    if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1338 0b0d3320 Evgeny Voevodin
        tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1339 5b6dd868 Blue Swirl
        return NULL;
1340 5b6dd868 Blue Swirl
    }
1341 5b6dd868 Blue Swirl
    /* binary search (cf Knuth) */
1342 5b6dd868 Blue Swirl
    m_min = 0;
1343 5e5f07e0 Evgeny Voevodin
    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1344 5b6dd868 Blue Swirl
    while (m_min <= m_max) {
1345 5b6dd868 Blue Swirl
        m = (m_min + m_max) >> 1;
1346 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[m];
1347 5b6dd868 Blue Swirl
        v = (uintptr_t)tb->tc_ptr;
1348 5b6dd868 Blue Swirl
        if (v == tc_ptr) {
1349 5b6dd868 Blue Swirl
            return tb;
1350 5b6dd868 Blue Swirl
        } else if (tc_ptr < v) {
1351 5b6dd868 Blue Swirl
            m_max = m - 1;
1352 5b6dd868 Blue Swirl
        } else {
1353 5b6dd868 Blue Swirl
            m_min = m + 1;
1354 5b6dd868 Blue Swirl
        }
1355 5b6dd868 Blue Swirl
    }
1356 5e5f07e0 Evgeny Voevodin
    return &tcg_ctx.tb_ctx.tbs[m_max];
1357 5b6dd868 Blue Swirl
}
1358 5b6dd868 Blue Swirl
1359 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1360 29d8ec7b Edgar E. Iglesias
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1361 5b6dd868 Blue Swirl
{
1362 5b6dd868 Blue Swirl
    ram_addr_t ram_addr;
1363 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
1364 149f54b5 Paolo Bonzini
    hwaddr l = 1;
1365 5b6dd868 Blue Swirl
1366 29d8ec7b Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr, &l, false);
1367 5c8a00ce Paolo Bonzini
    if (!(memory_region_is_ram(mr)
1368 5c8a00ce Paolo Bonzini
          || memory_region_is_romd(mr))) {
1369 5b6dd868 Blue Swirl
        return;
1370 5b6dd868 Blue Swirl
    }
1371 5c8a00ce Paolo Bonzini
    ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1372 149f54b5 Paolo Bonzini
        + addr;
1373 5b6dd868 Blue Swirl
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1374 5b6dd868 Blue Swirl
}
1375 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1376 5b6dd868 Blue Swirl
1377 5b6dd868 Blue Swirl
void tb_check_watchpoint(CPUArchState *env)
1378 5b6dd868 Blue Swirl
{
1379 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1380 5b6dd868 Blue Swirl
1381 5b6dd868 Blue Swirl
    tb = tb_find_pc(env->mem_io_pc);
1382 5b6dd868 Blue Swirl
    if (!tb) {
1383 5b6dd868 Blue Swirl
        cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1384 5b6dd868 Blue Swirl
                  (void *)env->mem_io_pc);
1385 5b6dd868 Blue Swirl
    }
1386 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1387 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1388 5b6dd868 Blue Swirl
}
1389 5b6dd868 Blue Swirl
1390 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1391 5b6dd868 Blue Swirl
/* mask must never be zero, except for A20 change call */
1392 c3affe56 Andreas Fรคrber
static void tcg_handle_interrupt(CPUState *cpu, int mask)
1393 5b6dd868 Blue Swirl
{
1394 c3affe56 Andreas Fรคrber
    CPUArchState *env = cpu->env_ptr;
1395 5b6dd868 Blue Swirl
    int old_mask;
1396 5b6dd868 Blue Swirl
1397 259186a7 Andreas Fรคrber
    old_mask = cpu->interrupt_request;
1398 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1399 5b6dd868 Blue Swirl
1400 5b6dd868 Blue Swirl
    /*
1401 5b6dd868 Blue Swirl
     * If called from iothread context, wake the target cpu in
1402 5b6dd868 Blue Swirl
     * case its halted.
1403 5b6dd868 Blue Swirl
     */
1404 5b6dd868 Blue Swirl
    if (!qemu_cpu_is_self(cpu)) {
1405 5b6dd868 Blue Swirl
        qemu_cpu_kick(cpu);
1406 5b6dd868 Blue Swirl
        return;
1407 5b6dd868 Blue Swirl
    }
1408 5b6dd868 Blue Swirl
1409 5b6dd868 Blue Swirl
    if (use_icount) {
1410 5b6dd868 Blue Swirl
        env->icount_decr.u16.high = 0xffff;
1411 5b6dd868 Blue Swirl
        if (!can_do_io(env)
1412 5b6dd868 Blue Swirl
            && (mask & ~old_mask) != 0) {
1413 5b6dd868 Blue Swirl
            cpu_abort(env, "Raised interrupt while not in I/O function");
1414 5b6dd868 Blue Swirl
        }
1415 5b6dd868 Blue Swirl
    } else {
1416 378df4b2 Peter Maydell
        cpu->tcg_exit_req = 1;
1417 5b6dd868 Blue Swirl
    }
1418 5b6dd868 Blue Swirl
}
1419 5b6dd868 Blue Swirl
1420 5b6dd868 Blue Swirl
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1421 5b6dd868 Blue Swirl
1422 5b6dd868 Blue Swirl
/* in deterministic execution mode, instructions doing device I/Os
1423 5b6dd868 Blue Swirl
   must be at the end of the TB */
1424 5b6dd868 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1425 5b6dd868 Blue Swirl
{
1426 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1427 5b6dd868 Blue Swirl
    uint32_t n, cflags;
1428 5b6dd868 Blue Swirl
    target_ulong pc, cs_base;
1429 5b6dd868 Blue Swirl
    uint64_t flags;
1430 5b6dd868 Blue Swirl
1431 5b6dd868 Blue Swirl
    tb = tb_find_pc(retaddr);
1432 5b6dd868 Blue Swirl
    if (!tb) {
1433 5b6dd868 Blue Swirl
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1434 5b6dd868 Blue Swirl
                  (void *)retaddr);
1435 5b6dd868 Blue Swirl
    }
1436 5b6dd868 Blue Swirl
    n = env->icount_decr.u16.low + tb->icount;
1437 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, retaddr);
1438 5b6dd868 Blue Swirl
    /* Calculate how many instructions had been executed before the fault
1439 5b6dd868 Blue Swirl
       occurred.  */
1440 5b6dd868 Blue Swirl
    n = n - env->icount_decr.u16.low;
1441 5b6dd868 Blue Swirl
    /* Generate a new TB ending on the I/O insn.  */
1442 5b6dd868 Blue Swirl
    n++;
1443 5b6dd868 Blue Swirl
    /* On MIPS and SH, delay slot instructions can only be restarted if
1444 5b6dd868 Blue Swirl
       they were already the first instruction in the TB.  If this is not
1445 5b6dd868 Blue Swirl
       the first instruction in a TB then re-execute the preceding
1446 5b6dd868 Blue Swirl
       branch.  */
1447 5b6dd868 Blue Swirl
#if defined(TARGET_MIPS)
1448 5b6dd868 Blue Swirl
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1449 5b6dd868 Blue Swirl
        env->active_tc.PC -= 4;
1450 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1451 5b6dd868 Blue Swirl
        env->hflags &= ~MIPS_HFLAG_BMASK;
1452 5b6dd868 Blue Swirl
    }
1453 5b6dd868 Blue Swirl
#elif defined(TARGET_SH4)
1454 5b6dd868 Blue Swirl
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1455 5b6dd868 Blue Swirl
            && n > 1) {
1456 5b6dd868 Blue Swirl
        env->pc -= 2;
1457 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1458 5b6dd868 Blue Swirl
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1459 5b6dd868 Blue Swirl
    }
1460 5b6dd868 Blue Swirl
#endif
1461 5b6dd868 Blue Swirl
    /* This should never happen.  */
1462 5b6dd868 Blue Swirl
    if (n > CF_COUNT_MASK) {
1463 5b6dd868 Blue Swirl
        cpu_abort(env, "TB too big during recompile");
1464 5b6dd868 Blue Swirl
    }
1465 5b6dd868 Blue Swirl
1466 5b6dd868 Blue Swirl
    cflags = n | CF_LAST_IO;
1467 5b6dd868 Blue Swirl
    pc = tb->pc;
1468 5b6dd868 Blue Swirl
    cs_base = tb->cs_base;
1469 5b6dd868 Blue Swirl
    flags = tb->flags;
1470 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1471 5b6dd868 Blue Swirl
    /* FIXME: In theory this could raise an exception.  In practice
1472 5b6dd868 Blue Swirl
       we have already translated the block once so it's probably ok.  */
1473 5b6dd868 Blue Swirl
    tb_gen_code(env, pc, cs_base, flags, cflags);
1474 5b6dd868 Blue Swirl
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1475 5b6dd868 Blue Swirl
       the first in the TB) then we end up generating a whole new TB and
1476 5b6dd868 Blue Swirl
       repeating the fault, which is horribly inefficient.
1477 5b6dd868 Blue Swirl
       Better would be to execute just this insn uncached, or generate a
1478 5b6dd868 Blue Swirl
       second new TB.  */
1479 5b6dd868 Blue Swirl
    cpu_resume_from_signal(env, NULL);
1480 5b6dd868 Blue Swirl
}
1481 5b6dd868 Blue Swirl
1482 5b6dd868 Blue Swirl
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1483 5b6dd868 Blue Swirl
{
1484 5b6dd868 Blue Swirl
    unsigned int i;
1485 5b6dd868 Blue Swirl
1486 5b6dd868 Blue Swirl
    /* Discard jump cache entries for any tb which might potentially
1487 5b6dd868 Blue Swirl
       overlap the flushed page.  */
1488 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1489 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1490 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1491 5b6dd868 Blue Swirl
1492 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr);
1493 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1494 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1495 5b6dd868 Blue Swirl
}
1496 5b6dd868 Blue Swirl
1497 5b6dd868 Blue Swirl
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1498 5b6dd868 Blue Swirl
{
1499 5b6dd868 Blue Swirl
    int i, target_code_size, max_target_code_size;
1500 5b6dd868 Blue Swirl
    int direct_jmp_count, direct_jmp2_count, cross_page;
1501 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1502 5b6dd868 Blue Swirl
1503 5b6dd868 Blue Swirl
    target_code_size = 0;
1504 5b6dd868 Blue Swirl
    max_target_code_size = 0;
1505 5b6dd868 Blue Swirl
    cross_page = 0;
1506 5b6dd868 Blue Swirl
    direct_jmp_count = 0;
1507 5b6dd868 Blue Swirl
    direct_jmp2_count = 0;
1508 5e5f07e0 Evgeny Voevodin
    for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1509 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[i];
1510 5b6dd868 Blue Swirl
        target_code_size += tb->size;
1511 5b6dd868 Blue Swirl
        if (tb->size > max_target_code_size) {
1512 5b6dd868 Blue Swirl
            max_target_code_size = tb->size;
1513 5b6dd868 Blue Swirl
        }
1514 5b6dd868 Blue Swirl
        if (tb->page_addr[1] != -1) {
1515 5b6dd868 Blue Swirl
            cross_page++;
1516 5b6dd868 Blue Swirl
        }
1517 5b6dd868 Blue Swirl
        if (tb->tb_next_offset[0] != 0xffff) {
1518 5b6dd868 Blue Swirl
            direct_jmp_count++;
1519 5b6dd868 Blue Swirl
            if (tb->tb_next_offset[1] != 0xffff) {
1520 5b6dd868 Blue Swirl
                direct_jmp2_count++;
1521 5b6dd868 Blue Swirl
            }
1522 5b6dd868 Blue Swirl
        }
1523 5b6dd868 Blue Swirl
    }
1524 5b6dd868 Blue Swirl
    /* XXX: avoid using doubles ? */
1525 5b6dd868 Blue Swirl
    cpu_fprintf(f, "Translation buffer state:\n");
1526 5b6dd868 Blue Swirl
    cpu_fprintf(f, "gen code size       %td/%zd\n",
1527 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1528 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_buffer_max_size);
1529 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB count            %d/%d\n",
1530 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1531 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1532 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1533 5e5f07e0 Evgeny Voevodin
                    tcg_ctx.tb_ctx.nb_tbs : 0,
1534 5e5f07e0 Evgeny Voevodin
            max_target_code_size);
1535 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1536 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1537 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.code_gen_buffer) /
1538 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.tb_ctx.nb_tbs : 0,
1539 5e5f07e0 Evgeny Voevodin
                target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1540 5e5f07e0 Evgeny Voevodin
                                             tcg_ctx.code_gen_buffer) /
1541 5e5f07e0 Evgeny Voevodin
                                             target_code_size : 0);
1542 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1543 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1544 5e5f07e0 Evgeny Voevodin
                                    tcg_ctx.tb_ctx.nb_tbs : 0);
1545 5b6dd868 Blue Swirl
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1546 5b6dd868 Blue Swirl
                direct_jmp_count,
1547 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1548 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0,
1549 5b6dd868 Blue Swirl
                direct_jmp2_count,
1550 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1551 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0);
1552 5b6dd868 Blue Swirl
    cpu_fprintf(f, "\nStatistics:\n");
1553 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1554 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB invalidate count %d\n",
1555 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1556 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1557 5b6dd868 Blue Swirl
    tcg_dump_info(f, cpu_fprintf);
1558 5b6dd868 Blue Swirl
}
1559 5b6dd868 Blue Swirl
1560 5b6dd868 Blue Swirl
#else /* CONFIG_USER_ONLY */
1561 5b6dd868 Blue Swirl
1562 c3affe56 Andreas Fรคrber
void cpu_interrupt(CPUState *cpu, int mask)
1563 5b6dd868 Blue Swirl
{
1564 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1565 378df4b2 Peter Maydell
    cpu->tcg_exit_req = 1;
1566 5b6dd868 Blue Swirl
}
1567 5b6dd868 Blue Swirl
1568 5b6dd868 Blue Swirl
/*
1569 5b6dd868 Blue Swirl
 * Walks guest process memory "regions" one by one
1570 5b6dd868 Blue Swirl
 * and calls callback function 'fn' for each region.
1571 5b6dd868 Blue Swirl
 */
1572 5b6dd868 Blue Swirl
struct walk_memory_regions_data {
1573 5b6dd868 Blue Swirl
    walk_memory_regions_fn fn;
1574 5b6dd868 Blue Swirl
    void *priv;
1575 5b6dd868 Blue Swirl
    uintptr_t start;
1576 5b6dd868 Blue Swirl
    int prot;
1577 5b6dd868 Blue Swirl
};
1578 5b6dd868 Blue Swirl
1579 5b6dd868 Blue Swirl
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1580 5b6dd868 Blue Swirl
                                   abi_ulong end, int new_prot)
1581 5b6dd868 Blue Swirl
{
1582 5b6dd868 Blue Swirl
    if (data->start != -1ul) {
1583 5b6dd868 Blue Swirl
        int rc = data->fn(data->priv, data->start, end, data->prot);
1584 5b6dd868 Blue Swirl
        if (rc != 0) {
1585 5b6dd868 Blue Swirl
            return rc;
1586 5b6dd868 Blue Swirl
        }
1587 5b6dd868 Blue Swirl
    }
1588 5b6dd868 Blue Swirl
1589 5b6dd868 Blue Swirl
    data->start = (new_prot ? end : -1ul);
1590 5b6dd868 Blue Swirl
    data->prot = new_prot;
1591 5b6dd868 Blue Swirl
1592 5b6dd868 Blue Swirl
    return 0;
1593 5b6dd868 Blue Swirl
}
1594 5b6dd868 Blue Swirl
1595 5b6dd868 Blue Swirl
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1596 5b6dd868 Blue Swirl
                                 abi_ulong base, int level, void **lp)
1597 5b6dd868 Blue Swirl
{
1598 5b6dd868 Blue Swirl
    abi_ulong pa;
1599 5b6dd868 Blue Swirl
    int i, rc;
1600 5b6dd868 Blue Swirl
1601 5b6dd868 Blue Swirl
    if (*lp == NULL) {
1602 5b6dd868 Blue Swirl
        return walk_memory_regions_end(data, base, 0);
1603 5b6dd868 Blue Swirl
    }
1604 5b6dd868 Blue Swirl
1605 5b6dd868 Blue Swirl
    if (level == 0) {
1606 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
1607 5b6dd868 Blue Swirl
1608 03f49957 Paolo Bonzini
        for (i = 0; i < V_L2_SIZE; ++i) {
1609 5b6dd868 Blue Swirl
            int prot = pd[i].flags;
1610 5b6dd868 Blue Swirl
1611 5b6dd868 Blue Swirl
            pa = base | (i << TARGET_PAGE_BITS);
1612 5b6dd868 Blue Swirl
            if (prot != data->prot) {
1613 5b6dd868 Blue Swirl
                rc = walk_memory_regions_end(data, pa, prot);
1614 5b6dd868 Blue Swirl
                if (rc != 0) {
1615 5b6dd868 Blue Swirl
                    return rc;
1616 5b6dd868 Blue Swirl
                }
1617 5b6dd868 Blue Swirl
            }
1618 5b6dd868 Blue Swirl
        }
1619 5b6dd868 Blue Swirl
    } else {
1620 5b6dd868 Blue Swirl
        void **pp = *lp;
1621 5b6dd868 Blue Swirl
1622 03f49957 Paolo Bonzini
        for (i = 0; i < V_L2_SIZE; ++i) {
1623 5b6dd868 Blue Swirl
            pa = base | ((abi_ulong)i <<
1624 03f49957 Paolo Bonzini
                (TARGET_PAGE_BITS + V_L2_BITS * level));
1625 5b6dd868 Blue Swirl
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1626 5b6dd868 Blue Swirl
            if (rc != 0) {
1627 5b6dd868 Blue Swirl
                return rc;
1628 5b6dd868 Blue Swirl
            }
1629 5b6dd868 Blue Swirl
        }
1630 5b6dd868 Blue Swirl
    }
1631 5b6dd868 Blue Swirl
1632 5b6dd868 Blue Swirl
    return 0;
1633 5b6dd868 Blue Swirl
}
1634 5b6dd868 Blue Swirl
1635 5b6dd868 Blue Swirl
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1636 5b6dd868 Blue Swirl
{
1637 5b6dd868 Blue Swirl
    struct walk_memory_regions_data data;
1638 5b6dd868 Blue Swirl
    uintptr_t i;
1639 5b6dd868 Blue Swirl
1640 5b6dd868 Blue Swirl
    data.fn = fn;
1641 5b6dd868 Blue Swirl
    data.priv = priv;
1642 5b6dd868 Blue Swirl
    data.start = -1ul;
1643 5b6dd868 Blue Swirl
    data.prot = 0;
1644 5b6dd868 Blue Swirl
1645 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
1646 5b6dd868 Blue Swirl
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1647 03f49957 Paolo Bonzini
                                       V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1648 5b6dd868 Blue Swirl
1649 5b6dd868 Blue Swirl
        if (rc != 0) {
1650 5b6dd868 Blue Swirl
            return rc;
1651 5b6dd868 Blue Swirl
        }
1652 5b6dd868 Blue Swirl
    }
1653 5b6dd868 Blue Swirl
1654 5b6dd868 Blue Swirl
    return walk_memory_regions_end(&data, 0, 0);
1655 5b6dd868 Blue Swirl
}
1656 5b6dd868 Blue Swirl
1657 5b6dd868 Blue Swirl
static int dump_region(void *priv, abi_ulong start,
1658 5b6dd868 Blue Swirl
    abi_ulong end, unsigned long prot)
1659 5b6dd868 Blue Swirl
{
1660 5b6dd868 Blue Swirl
    FILE *f = (FILE *)priv;
1661 5b6dd868 Blue Swirl
1662 5b6dd868 Blue Swirl
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1663 5b6dd868 Blue Swirl
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
1664 5b6dd868 Blue Swirl
        start, end, end - start,
1665 5b6dd868 Blue Swirl
        ((prot & PAGE_READ) ? 'r' : '-'),
1666 5b6dd868 Blue Swirl
        ((prot & PAGE_WRITE) ? 'w' : '-'),
1667 5b6dd868 Blue Swirl
        ((prot & PAGE_EXEC) ? 'x' : '-'));
1668 5b6dd868 Blue Swirl
1669 5b6dd868 Blue Swirl
    return 0;
1670 5b6dd868 Blue Swirl
}
1671 5b6dd868 Blue Swirl
1672 5b6dd868 Blue Swirl
/* dump memory mappings */
1673 5b6dd868 Blue Swirl
void page_dump(FILE *f)
1674 5b6dd868 Blue Swirl
{
1675 227b8175 Stefan Weil
    const int length = sizeof(abi_ulong) * 2;
1676 227b8175 Stefan Weil
    (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1677 227b8175 Stefan Weil
            length, "start", length, "end", length, "size", "prot");
1678 5b6dd868 Blue Swirl
    walk_memory_regions(f, dump_region);
1679 5b6dd868 Blue Swirl
}
1680 5b6dd868 Blue Swirl
1681 5b6dd868 Blue Swirl
int page_get_flags(target_ulong address)
1682 5b6dd868 Blue Swirl
{
1683 5b6dd868 Blue Swirl
    PageDesc *p;
1684 5b6dd868 Blue Swirl
1685 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1686 5b6dd868 Blue Swirl
    if (!p) {
1687 5b6dd868 Blue Swirl
        return 0;
1688 5b6dd868 Blue Swirl
    }
1689 5b6dd868 Blue Swirl
    return p->flags;
1690 5b6dd868 Blue Swirl
}
1691 5b6dd868 Blue Swirl
1692 5b6dd868 Blue Swirl
/* Modify the flags of a page and invalidate the code if necessary.
1693 5b6dd868 Blue Swirl
   The flag PAGE_WRITE_ORG is positioned automatically depending
1694 5b6dd868 Blue Swirl
   on PAGE_WRITE.  The mmap_lock should already be held.  */
1695 5b6dd868 Blue Swirl
void page_set_flags(target_ulong start, target_ulong end, int flags)
1696 5b6dd868 Blue Swirl
{
1697 5b6dd868 Blue Swirl
    target_ulong addr, len;
1698 5b6dd868 Blue Swirl
1699 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1700 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1701 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1702 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1703 5b6dd868 Blue Swirl
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1704 5b6dd868 Blue Swirl
#endif
1705 5b6dd868 Blue Swirl
    assert(start < end);
1706 5b6dd868 Blue Swirl
1707 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1708 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(end);
1709 5b6dd868 Blue Swirl
1710 5b6dd868 Blue Swirl
    if (flags & PAGE_WRITE) {
1711 5b6dd868 Blue Swirl
        flags |= PAGE_WRITE_ORG;
1712 5b6dd868 Blue Swirl
    }
1713 5b6dd868 Blue Swirl
1714 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1715 5b6dd868 Blue Swirl
         len != 0;
1716 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1717 5b6dd868 Blue Swirl
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1718 5b6dd868 Blue Swirl
1719 5b6dd868 Blue Swirl
        /* If the write protection bit is set, then we invalidate
1720 5b6dd868 Blue Swirl
           the code inside.  */
1721 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_WRITE) &&
1722 5b6dd868 Blue Swirl
            (flags & PAGE_WRITE) &&
1723 5b6dd868 Blue Swirl
            p->first_tb) {
1724 d02532f0 Alexander Graf
            tb_invalidate_phys_page(addr, 0, NULL, false);
1725 5b6dd868 Blue Swirl
        }
1726 5b6dd868 Blue Swirl
        p->flags = flags;
1727 5b6dd868 Blue Swirl
    }
1728 5b6dd868 Blue Swirl
}
1729 5b6dd868 Blue Swirl
1730 5b6dd868 Blue Swirl
int page_check_range(target_ulong start, target_ulong len, int flags)
1731 5b6dd868 Blue Swirl
{
1732 5b6dd868 Blue Swirl
    PageDesc *p;
1733 5b6dd868 Blue Swirl
    target_ulong end;
1734 5b6dd868 Blue Swirl
    target_ulong addr;
1735 5b6dd868 Blue Swirl
1736 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1737 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1738 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1739 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1740 5b6dd868 Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1741 5b6dd868 Blue Swirl
#endif
1742 5b6dd868 Blue Swirl
1743 5b6dd868 Blue Swirl
    if (len == 0) {
1744 5b6dd868 Blue Swirl
        return 0;
1745 5b6dd868 Blue Swirl
    }
1746 5b6dd868 Blue Swirl
    if (start + len - 1 < start) {
1747 5b6dd868 Blue Swirl
        /* We've wrapped around.  */
1748 5b6dd868 Blue Swirl
        return -1;
1749 5b6dd868 Blue Swirl
    }
1750 5b6dd868 Blue Swirl
1751 5b6dd868 Blue Swirl
    /* must do before we loose bits in the next step */
1752 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(start + len);
1753 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1754 5b6dd868 Blue Swirl
1755 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1756 5b6dd868 Blue Swirl
         len != 0;
1757 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1758 5b6dd868 Blue Swirl
        p = page_find(addr >> TARGET_PAGE_BITS);
1759 5b6dd868 Blue Swirl
        if (!p) {
1760 5b6dd868 Blue Swirl
            return -1;
1761 5b6dd868 Blue Swirl
        }
1762 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_VALID)) {
1763 5b6dd868 Blue Swirl
            return -1;
1764 5b6dd868 Blue Swirl
        }
1765 5b6dd868 Blue Swirl
1766 5b6dd868 Blue Swirl
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1767 5b6dd868 Blue Swirl
            return -1;
1768 5b6dd868 Blue Swirl
        }
1769 5b6dd868 Blue Swirl
        if (flags & PAGE_WRITE) {
1770 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE_ORG)) {
1771 5b6dd868 Blue Swirl
                return -1;
1772 5b6dd868 Blue Swirl
            }
1773 5b6dd868 Blue Swirl
            /* unprotect the page if it was put read-only because it
1774 5b6dd868 Blue Swirl
               contains translated code */
1775 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE)) {
1776 5b6dd868 Blue Swirl
                if (!page_unprotect(addr, 0, NULL)) {
1777 5b6dd868 Blue Swirl
                    return -1;
1778 5b6dd868 Blue Swirl
                }
1779 5b6dd868 Blue Swirl
            }
1780 5b6dd868 Blue Swirl
            return 0;
1781 5b6dd868 Blue Swirl
        }
1782 5b6dd868 Blue Swirl
    }
1783 5b6dd868 Blue Swirl
    return 0;
1784 5b6dd868 Blue Swirl
}
1785 5b6dd868 Blue Swirl
1786 5b6dd868 Blue Swirl
/* called from signal handler: invalidate the code and unprotect the
1787 5b6dd868 Blue Swirl
   page. Return TRUE if the fault was successfully handled. */
1788 5b6dd868 Blue Swirl
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1789 5b6dd868 Blue Swirl
{
1790 5b6dd868 Blue Swirl
    unsigned int prot;
1791 5b6dd868 Blue Swirl
    PageDesc *p;
1792 5b6dd868 Blue Swirl
    target_ulong host_start, host_end, addr;
1793 5b6dd868 Blue Swirl
1794 5b6dd868 Blue Swirl
    /* Technically this isn't safe inside a signal handler.  However we
1795 5b6dd868 Blue Swirl
       know this only ever happens in a synchronous SEGV handler, so in
1796 5b6dd868 Blue Swirl
       practice it seems to be ok.  */
1797 5b6dd868 Blue Swirl
    mmap_lock();
1798 5b6dd868 Blue Swirl
1799 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1800 5b6dd868 Blue Swirl
    if (!p) {
1801 5b6dd868 Blue Swirl
        mmap_unlock();
1802 5b6dd868 Blue Swirl
        return 0;
1803 5b6dd868 Blue Swirl
    }
1804 5b6dd868 Blue Swirl
1805 5b6dd868 Blue Swirl
    /* if the page was really writable, then we change its
1806 5b6dd868 Blue Swirl
       protection back to writable */
1807 5b6dd868 Blue Swirl
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1808 5b6dd868 Blue Swirl
        host_start = address & qemu_host_page_mask;
1809 5b6dd868 Blue Swirl
        host_end = host_start + qemu_host_page_size;
1810 5b6dd868 Blue Swirl
1811 5b6dd868 Blue Swirl
        prot = 0;
1812 5b6dd868 Blue Swirl
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1813 5b6dd868 Blue Swirl
            p = page_find(addr >> TARGET_PAGE_BITS);
1814 5b6dd868 Blue Swirl
            p->flags |= PAGE_WRITE;
1815 5b6dd868 Blue Swirl
            prot |= p->flags;
1816 5b6dd868 Blue Swirl
1817 5b6dd868 Blue Swirl
            /* and since the content will be modified, we must invalidate
1818 5b6dd868 Blue Swirl
               the corresponding translated code. */
1819 d02532f0 Alexander Graf
            tb_invalidate_phys_page(addr, pc, puc, true);
1820 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1821 5b6dd868 Blue Swirl
            tb_invalidate_check(addr);
1822 5b6dd868 Blue Swirl
#endif
1823 5b6dd868 Blue Swirl
        }
1824 5b6dd868 Blue Swirl
        mprotect((void *)g2h(host_start), qemu_host_page_size,
1825 5b6dd868 Blue Swirl
                 prot & PAGE_BITS);
1826 5b6dd868 Blue Swirl
1827 5b6dd868 Blue Swirl
        mmap_unlock();
1828 5b6dd868 Blue Swirl
        return 1;
1829 5b6dd868 Blue Swirl
    }
1830 5b6dd868 Blue Swirl
    mmap_unlock();
1831 5b6dd868 Blue Swirl
    return 0;
1832 5b6dd868 Blue Swirl
}
1833 5b6dd868 Blue Swirl
#endif /* CONFIG_USER_ONLY */