Statistics
| Branch: | Revision:

root / translate-all.c @ 8cfd0495

History | View | Annotate | Download (53.8 kB)

1 d19893da bellard
/*
2 d19893da bellard
 *  Host code generation
3 5fafdf24 ths
 *
4 d19893da bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 d19893da bellard
 *
6 d19893da bellard
 * This library is free software; you can redistribute it and/or
7 d19893da bellard
 * modify it under the terms of the GNU Lesser General Public
8 d19893da bellard
 * License as published by the Free Software Foundation; either
9 d19893da bellard
 * version 2 of the License, or (at your option) any later version.
10 d19893da bellard
 *
11 d19893da bellard
 * This library is distributed in the hope that it will be useful,
12 d19893da bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 d19893da bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 d19893da bellard
 * Lesser General Public License for more details.
15 d19893da bellard
 *
16 d19893da bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 d19893da bellard
 */
19 5b6dd868 Blue Swirl
#ifdef _WIN32
20 5b6dd868 Blue Swirl
#include <windows.h>
21 5b6dd868 Blue Swirl
#else
22 5b6dd868 Blue Swirl
#include <sys/types.h>
23 5b6dd868 Blue Swirl
#include <sys/mman.h>
24 5b6dd868 Blue Swirl
#endif
25 d19893da bellard
#include <stdarg.h>
26 d19893da bellard
#include <stdlib.h>
27 d19893da bellard
#include <stdio.h>
28 d19893da bellard
#include <string.h>
29 d19893da bellard
#include <inttypes.h>
30 d19893da bellard
31 d19893da bellard
#include "config.h"
32 2054396a bellard
33 5b6dd868 Blue Swirl
#include "qemu-common.h"
34 af5ad107 bellard
#define NO_CPU_IO_DEFS
35 d3eead2e bellard
#include "cpu.h"
36 76cad711 Paolo Bonzini
#include "disas/disas.h"
37 57fec1fe bellard
#include "tcg.h"
38 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
39 5b6dd868 Blue Swirl
#include "qemu.h"
40 5b6dd868 Blue Swirl
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 5b6dd868 Blue Swirl
#include <sys/param.h>
42 5b6dd868 Blue Swirl
#if __FreeBSD_version >= 700104
43 5b6dd868 Blue Swirl
#define HAVE_KINFO_GETVMMAP
44 5b6dd868 Blue Swirl
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 5b6dd868 Blue Swirl
#include <sys/time.h>
46 5b6dd868 Blue Swirl
#include <sys/proc.h>
47 5b6dd868 Blue Swirl
#include <machine/profile.h>
48 5b6dd868 Blue Swirl
#define _KERNEL
49 5b6dd868 Blue Swirl
#include <sys/user.h>
50 5b6dd868 Blue Swirl
#undef _KERNEL
51 5b6dd868 Blue Swirl
#undef sigqueue
52 5b6dd868 Blue Swirl
#include <libutil.h>
53 5b6dd868 Blue Swirl
#endif
54 5b6dd868 Blue Swirl
#endif
55 0bc3cd62 Paolo Bonzini
#else
56 0bc3cd62 Paolo Bonzini
#include "exec/address-spaces.h"
57 5b6dd868 Blue Swirl
#endif
58 5b6dd868 Blue Swirl
59 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
60 5b6dd868 Blue Swirl
#include "translate-all.h"
61 0aa09897 Alexey Kardashevskiy
#include "qemu/timer.h"
62 5b6dd868 Blue Swirl
63 5b6dd868 Blue Swirl
//#define DEBUG_TB_INVALIDATE
64 5b6dd868 Blue Swirl
//#define DEBUG_FLUSH
65 5b6dd868 Blue Swirl
/* make various TB consistency checks */
66 5b6dd868 Blue Swirl
//#define DEBUG_TB_CHECK
67 5b6dd868 Blue Swirl
68 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
69 5b6dd868 Blue Swirl
/* TB consistency checks only implemented for usermode emulation.  */
70 5b6dd868 Blue Swirl
#undef DEBUG_TB_CHECK
71 5b6dd868 Blue Swirl
#endif
72 5b6dd868 Blue Swirl
73 5b6dd868 Blue Swirl
#define SMC_BITMAP_USE_THRESHOLD 10
74 5b6dd868 Blue Swirl
75 5b6dd868 Blue Swirl
typedef struct PageDesc {
76 5b6dd868 Blue Swirl
    /* list of TBs intersecting this ram page */
77 5b6dd868 Blue Swirl
    TranslationBlock *first_tb;
78 5b6dd868 Blue Swirl
    /* in order to optimize self modifying code, we count the number
79 5b6dd868 Blue Swirl
       of lookups we do to a given page to use a bitmap */
80 5b6dd868 Blue Swirl
    unsigned int code_write_count;
81 5b6dd868 Blue Swirl
    uint8_t *code_bitmap;
82 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
83 5b6dd868 Blue Swirl
    unsigned long flags;
84 5b6dd868 Blue Swirl
#endif
85 5b6dd868 Blue Swirl
} PageDesc;
86 5b6dd868 Blue Swirl
87 5b6dd868 Blue Swirl
/* In system mode we want L1_MAP to be based on ram offsets,
88 5b6dd868 Blue Swirl
   while in user mode we want it to be based on virtual addresses.  */
89 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
90 5b6dd868 Blue Swirl
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
92 5b6dd868 Blue Swirl
#else
93 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
94 5b6dd868 Blue Swirl
#endif
95 5b6dd868 Blue Swirl
#else
96 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
97 5b6dd868 Blue Swirl
#endif
98 5b6dd868 Blue Swirl
99 5b6dd868 Blue Swirl
/* The bits remaining after N lower levels of page tables.  */
100 5b6dd868 Blue Swirl
#define V_L1_BITS_REM \
101 5b6dd868 Blue Swirl
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
102 5b6dd868 Blue Swirl
103 5b6dd868 Blue Swirl
#if V_L1_BITS_REM < 4
104 5b6dd868 Blue Swirl
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
105 5b6dd868 Blue Swirl
#else
106 5b6dd868 Blue Swirl
#define V_L1_BITS  V_L1_BITS_REM
107 5b6dd868 Blue Swirl
#endif
108 5b6dd868 Blue Swirl
109 5b6dd868 Blue Swirl
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
110 5b6dd868 Blue Swirl
111 5b6dd868 Blue Swirl
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
112 5b6dd868 Blue Swirl
113 5b6dd868 Blue Swirl
uintptr_t qemu_real_host_page_size;
114 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_size;
115 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_mask;
116 5b6dd868 Blue Swirl
117 5b6dd868 Blue Swirl
/* This is a multi-level map on the virtual address space.
118 5b6dd868 Blue Swirl
   The bottom level has pointers to PageDesc.  */
119 5b6dd868 Blue Swirl
static void *l1_map[V_L1_SIZE];
120 5b6dd868 Blue Swirl
121 57fec1fe bellard
/* code generation context */
122 57fec1fe bellard
TCGContext tcg_ctx;
123 d19893da bellard
124 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2);
126 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
127 5b6dd868 Blue Swirl
128 57fec1fe bellard
void cpu_gen_init(void)
129 57fec1fe bellard
{
130 57fec1fe bellard
    tcg_context_init(&tcg_ctx); 
131 57fec1fe bellard
}
132 57fec1fe bellard
133 d19893da bellard
/* return non zero if the very first instruction is invalid so that
134 5fafdf24 ths
   the virtual CPU can trigger an exception.
135 d19893da bellard

136 d19893da bellard
   '*gen_code_size_ptr' contains the size of the generated code (host
137 d19893da bellard
   code).
138 d19893da bellard
*/
139 9349b4f9 Andreas Fรคrber
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
140 d19893da bellard
{
141 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
142 d19893da bellard
    uint8_t *gen_code_buf;
143 d19893da bellard
    int gen_code_size;
144 57fec1fe bellard
#ifdef CONFIG_PROFILER
145 57fec1fe bellard
    int64_t ti;
146 57fec1fe bellard
#endif
147 57fec1fe bellard
148 57fec1fe bellard
#ifdef CONFIG_PROFILER
149 b67d9a52 bellard
    s->tb_count1++; /* includes aborted translations because of
150 b67d9a52 bellard
                       exceptions */
151 57fec1fe bellard
    ti = profile_getclock();
152 57fec1fe bellard
#endif
153 57fec1fe bellard
    tcg_func_start(s);
154 d19893da bellard
155 2cfc5f17 ths
    gen_intermediate_code(env, tb);
156 2cfc5f17 ths
157 ec6338ba bellard
    /* generate machine code */
158 57fec1fe bellard
    gen_code_buf = tb->tc_ptr;
159 ec6338ba bellard
    tb->tb_next_offset[0] = 0xffff;
160 ec6338ba bellard
    tb->tb_next_offset[1] = 0xffff;
161 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
162 4cbb86e1 bellard
#ifdef USE_DIRECT_JUMP
163 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
164 57fec1fe bellard
    s->tb_next = NULL;
165 d19893da bellard
#else
166 57fec1fe bellard
    s->tb_jmp_offset = NULL;
167 57fec1fe bellard
    s->tb_next = tb->tb_next;
168 d19893da bellard
#endif
169 57fec1fe bellard
170 57fec1fe bellard
#ifdef CONFIG_PROFILER
171 b67d9a52 bellard
    s->tb_count++;
172 b67d9a52 bellard
    s->interm_time += profile_getclock() - ti;
173 b67d9a52 bellard
    s->code_time -= profile_getclock();
174 57fec1fe bellard
#endif
175 54604f74 aurel32
    gen_code_size = tcg_gen_code(s, gen_code_buf);
176 d19893da bellard
    *gen_code_size_ptr = gen_code_size;
177 57fec1fe bellard
#ifdef CONFIG_PROFILER
178 b67d9a52 bellard
    s->code_time += profile_getclock();
179 b67d9a52 bellard
    s->code_in_len += tb->size;
180 b67d9a52 bellard
    s->code_out_len += gen_code_size;
181 57fec1fe bellard
#endif
182 57fec1fe bellard
183 d19893da bellard
#ifdef DEBUG_DISAS
184 8fec2b8c aliguori
    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
185 93fcfe39 aliguori
        qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186 93fcfe39 aliguori
        log_disas(tb->tc_ptr, *gen_code_size_ptr);
187 93fcfe39 aliguori
        qemu_log("\n");
188 31b1a7b4 aliguori
        qemu_log_flush();
189 d19893da bellard
    }
190 d19893da bellard
#endif
191 d19893da bellard
    return 0;
192 d19893da bellard
}
193 d19893da bellard
194 5fafdf24 ths
/* The cpu state corresponding to 'searched_pc' is restored.
195 d19893da bellard
 */
196 a8a826a3 Blue Swirl
static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197 a8a826a3 Blue Swirl
                                     uintptr_t searched_pc)
198 d19893da bellard
{
199 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
200 57fec1fe bellard
    int j;
201 6375e09e Stefan Weil
    uintptr_t tc_ptr;
202 57fec1fe bellard
#ifdef CONFIG_PROFILER
203 57fec1fe bellard
    int64_t ti;
204 57fec1fe bellard
#endif
205 57fec1fe bellard
206 57fec1fe bellard
#ifdef CONFIG_PROFILER
207 57fec1fe bellard
    ti = profile_getclock();
208 57fec1fe bellard
#endif
209 57fec1fe bellard
    tcg_func_start(s);
210 d19893da bellard
211 2cfc5f17 ths
    gen_intermediate_code_pc(env, tb);
212 3b46e624 ths
213 2e70f6ef pbrook
    if (use_icount) {
214 2e70f6ef pbrook
        /* Reset the cycle counter to the start of the block.  */
215 2e70f6ef pbrook
        env->icount_decr.u16.low += tb->icount;
216 2e70f6ef pbrook
        /* Clear the IO flag.  */
217 2e70f6ef pbrook
        env->can_do_io = 0;
218 2e70f6ef pbrook
    }
219 2e70f6ef pbrook
220 d19893da bellard
    /* find opc index corresponding to search_pc */
221 6375e09e Stefan Weil
    tc_ptr = (uintptr_t)tb->tc_ptr;
222 d19893da bellard
    if (searched_pc < tc_ptr)
223 d19893da bellard
        return -1;
224 57fec1fe bellard
225 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
226 57fec1fe bellard
#ifdef USE_DIRECT_JUMP
227 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
228 57fec1fe bellard
    s->tb_next = NULL;
229 57fec1fe bellard
#else
230 57fec1fe bellard
    s->tb_jmp_offset = NULL;
231 57fec1fe bellard
    s->tb_next = tb->tb_next;
232 57fec1fe bellard
#endif
233 54604f74 aurel32
    j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
234 57fec1fe bellard
    if (j < 0)
235 57fec1fe bellard
        return -1;
236 d19893da bellard
    /* now find start of instruction before */
237 ab1103de Evgeny Voevodin
    while (s->gen_opc_instr_start[j] == 0) {
238 d19893da bellard
        j--;
239 ab1103de Evgeny Voevodin
    }
240 c9c99c22 Evgeny Voevodin
    env->icount_decr.u16.low -= s->gen_opc_icount[j];
241 3b46e624 ths
242 e87b7cb0 Stefan Weil
    restore_state_to_opc(env, tb, j);
243 57fec1fe bellard
244 57fec1fe bellard
#ifdef CONFIG_PROFILER
245 b67d9a52 bellard
    s->restore_time += profile_getclock() - ti;
246 b67d9a52 bellard
    s->restore_count++;
247 57fec1fe bellard
#endif
248 d19893da bellard
    return 0;
249 d19893da bellard
}
250 5b6dd868 Blue Swirl
251 a8a826a3 Blue Swirl
bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
252 a8a826a3 Blue Swirl
{
253 a8a826a3 Blue Swirl
    TranslationBlock *tb;
254 a8a826a3 Blue Swirl
255 a8a826a3 Blue Swirl
    tb = tb_find_pc(retaddr);
256 a8a826a3 Blue Swirl
    if (tb) {
257 a8a826a3 Blue Swirl
        cpu_restore_state_from_tb(tb, env, retaddr);
258 a8a826a3 Blue Swirl
        return true;
259 a8a826a3 Blue Swirl
    }
260 a8a826a3 Blue Swirl
    return false;
261 a8a826a3 Blue Swirl
}
262 a8a826a3 Blue Swirl
263 5b6dd868 Blue Swirl
#ifdef _WIN32
264 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
265 5b6dd868 Blue Swirl
{
266 5b6dd868 Blue Swirl
    DWORD old_protect;
267 5b6dd868 Blue Swirl
    VirtualProtect(addr, size,
268 5b6dd868 Blue Swirl
                   PAGE_EXECUTE_READWRITE, &old_protect);
269 5b6dd868 Blue Swirl
}
270 5b6dd868 Blue Swirl
#else
271 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
272 5b6dd868 Blue Swirl
{
273 5b6dd868 Blue Swirl
    unsigned long start, end, page_size;
274 5b6dd868 Blue Swirl
275 5b6dd868 Blue Swirl
    page_size = getpagesize();
276 5b6dd868 Blue Swirl
    start = (unsigned long)addr;
277 5b6dd868 Blue Swirl
    start &= ~(page_size - 1);
278 5b6dd868 Blue Swirl
279 5b6dd868 Blue Swirl
    end = (unsigned long)addr + size;
280 5b6dd868 Blue Swirl
    end += page_size - 1;
281 5b6dd868 Blue Swirl
    end &= ~(page_size - 1);
282 5b6dd868 Blue Swirl
283 5b6dd868 Blue Swirl
    mprotect((void *)start, end - start,
284 5b6dd868 Blue Swirl
             PROT_READ | PROT_WRITE | PROT_EXEC);
285 5b6dd868 Blue Swirl
}
286 5b6dd868 Blue Swirl
#endif
287 5b6dd868 Blue Swirl
288 5b6dd868 Blue Swirl
static void page_init(void)
289 5b6dd868 Blue Swirl
{
290 5b6dd868 Blue Swirl
    /* NOTE: we can always suppose that qemu_host_page_size >=
291 5b6dd868 Blue Swirl
       TARGET_PAGE_SIZE */
292 5b6dd868 Blue Swirl
#ifdef _WIN32
293 5b6dd868 Blue Swirl
    {
294 5b6dd868 Blue Swirl
        SYSTEM_INFO system_info;
295 5b6dd868 Blue Swirl
296 5b6dd868 Blue Swirl
        GetSystemInfo(&system_info);
297 5b6dd868 Blue Swirl
        qemu_real_host_page_size = system_info.dwPageSize;
298 5b6dd868 Blue Swirl
    }
299 5b6dd868 Blue Swirl
#else
300 5b6dd868 Blue Swirl
    qemu_real_host_page_size = getpagesize();
301 5b6dd868 Blue Swirl
#endif
302 5b6dd868 Blue Swirl
    if (qemu_host_page_size == 0) {
303 5b6dd868 Blue Swirl
        qemu_host_page_size = qemu_real_host_page_size;
304 5b6dd868 Blue Swirl
    }
305 5b6dd868 Blue Swirl
    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 5b6dd868 Blue Swirl
        qemu_host_page_size = TARGET_PAGE_SIZE;
307 5b6dd868 Blue Swirl
    }
308 5b6dd868 Blue Swirl
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
309 5b6dd868 Blue Swirl
310 5b6dd868 Blue Swirl
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
311 5b6dd868 Blue Swirl
    {
312 5b6dd868 Blue Swirl
#ifdef HAVE_KINFO_GETVMMAP
313 5b6dd868 Blue Swirl
        struct kinfo_vmentry *freep;
314 5b6dd868 Blue Swirl
        int i, cnt;
315 5b6dd868 Blue Swirl
316 5b6dd868 Blue Swirl
        freep = kinfo_getvmmap(getpid(), &cnt);
317 5b6dd868 Blue Swirl
        if (freep) {
318 5b6dd868 Blue Swirl
            mmap_lock();
319 5b6dd868 Blue Swirl
            for (i = 0; i < cnt; i++) {
320 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
321 5b6dd868 Blue Swirl
322 5b6dd868 Blue Swirl
                startaddr = freep[i].kve_start;
323 5b6dd868 Blue Swirl
                endaddr = freep[i].kve_end;
324 5b6dd868 Blue Swirl
                if (h2g_valid(startaddr)) {
325 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326 5b6dd868 Blue Swirl
327 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
328 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
329 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 5b6dd868 Blue Swirl
                    } else {
331 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
333 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 5b6dd868 Blue Swirl
#endif
335 5b6dd868 Blue Swirl
                    }
336 5b6dd868 Blue Swirl
                }
337 5b6dd868 Blue Swirl
            }
338 5b6dd868 Blue Swirl
            free(freep);
339 5b6dd868 Blue Swirl
            mmap_unlock();
340 5b6dd868 Blue Swirl
        }
341 5b6dd868 Blue Swirl
#else
342 5b6dd868 Blue Swirl
        FILE *f;
343 5b6dd868 Blue Swirl
344 5b6dd868 Blue Swirl
        last_brk = (unsigned long)sbrk(0);
345 5b6dd868 Blue Swirl
346 5b6dd868 Blue Swirl
        f = fopen("/compat/linux/proc/self/maps", "r");
347 5b6dd868 Blue Swirl
        if (f) {
348 5b6dd868 Blue Swirl
            mmap_lock();
349 5b6dd868 Blue Swirl
350 5b6dd868 Blue Swirl
            do {
351 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
352 5b6dd868 Blue Swirl
                int n;
353 5b6dd868 Blue Swirl
354 5b6dd868 Blue Swirl
                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
355 5b6dd868 Blue Swirl
356 5b6dd868 Blue Swirl
                if (n == 2 && h2g_valid(startaddr)) {
357 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358 5b6dd868 Blue Swirl
359 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
360 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
361 5b6dd868 Blue Swirl
                    } else {
362 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
363 5b6dd868 Blue Swirl
                    }
364 5b6dd868 Blue Swirl
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
365 5b6dd868 Blue Swirl
                }
366 5b6dd868 Blue Swirl
            } while (!feof(f));
367 5b6dd868 Blue Swirl
368 5b6dd868 Blue Swirl
            fclose(f);
369 5b6dd868 Blue Swirl
            mmap_unlock();
370 5b6dd868 Blue Swirl
        }
371 5b6dd868 Blue Swirl
#endif
372 5b6dd868 Blue Swirl
    }
373 5b6dd868 Blue Swirl
#endif
374 5b6dd868 Blue Swirl
}
375 5b6dd868 Blue Swirl
376 5b6dd868 Blue Swirl
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
377 5b6dd868 Blue Swirl
{
378 5b6dd868 Blue Swirl
    PageDesc *pd;
379 5b6dd868 Blue Swirl
    void **lp;
380 5b6dd868 Blue Swirl
    int i;
381 5b6dd868 Blue Swirl
382 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
383 5b6dd868 Blue Swirl
    /* We can't use g_malloc because it may recurse into a locked mutex. */
384 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE)                                 \
385 5b6dd868 Blue Swirl
    do {                                                \
386 5b6dd868 Blue Swirl
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
387 5b6dd868 Blue Swirl
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
388 5b6dd868 Blue Swirl
    } while (0)
389 5b6dd868 Blue Swirl
#else
390 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE) \
391 5b6dd868 Blue Swirl
    do { P = g_malloc0(SIZE); } while (0)
392 5b6dd868 Blue Swirl
#endif
393 5b6dd868 Blue Swirl
394 5b6dd868 Blue Swirl
    /* Level 1.  Always allocated.  */
395 5b6dd868 Blue Swirl
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
396 5b6dd868 Blue Swirl
397 5b6dd868 Blue Swirl
    /* Level 2..N-1.  */
398 5b6dd868 Blue Swirl
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399 5b6dd868 Blue Swirl
        void **p = *lp;
400 5b6dd868 Blue Swirl
401 5b6dd868 Blue Swirl
        if (p == NULL) {
402 5b6dd868 Blue Swirl
            if (!alloc) {
403 5b6dd868 Blue Swirl
                return NULL;
404 5b6dd868 Blue Swirl
            }
405 5b6dd868 Blue Swirl
            ALLOC(p, sizeof(void *) * L2_SIZE);
406 5b6dd868 Blue Swirl
            *lp = p;
407 5b6dd868 Blue Swirl
        }
408 5b6dd868 Blue Swirl
409 5b6dd868 Blue Swirl
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
410 5b6dd868 Blue Swirl
    }
411 5b6dd868 Blue Swirl
412 5b6dd868 Blue Swirl
    pd = *lp;
413 5b6dd868 Blue Swirl
    if (pd == NULL) {
414 5b6dd868 Blue Swirl
        if (!alloc) {
415 5b6dd868 Blue Swirl
            return NULL;
416 5b6dd868 Blue Swirl
        }
417 5b6dd868 Blue Swirl
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418 5b6dd868 Blue Swirl
        *lp = pd;
419 5b6dd868 Blue Swirl
    }
420 5b6dd868 Blue Swirl
421 5b6dd868 Blue Swirl
#undef ALLOC
422 5b6dd868 Blue Swirl
423 5b6dd868 Blue Swirl
    return pd + (index & (L2_SIZE - 1));
424 5b6dd868 Blue Swirl
}
425 5b6dd868 Blue Swirl
426 5b6dd868 Blue Swirl
static inline PageDesc *page_find(tb_page_addr_t index)
427 5b6dd868 Blue Swirl
{
428 5b6dd868 Blue Swirl
    return page_find_alloc(index, 0);
429 5b6dd868 Blue Swirl
}
430 5b6dd868 Blue Swirl
431 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
432 5b6dd868 Blue Swirl
#define mmap_lock() do { } while (0)
433 5b6dd868 Blue Swirl
#define mmap_unlock() do { } while (0)
434 5b6dd868 Blue Swirl
#endif
435 5b6dd868 Blue Swirl
436 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
437 5b6dd868 Blue Swirl
/* Currently it is not recommended to allocate big chunks of data in
438 5b6dd868 Blue Swirl
   user mode. It will change when a dedicated libc will be used.  */
439 5b6dd868 Blue Swirl
/* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 5b6dd868 Blue Swirl
   region in which the guest needs to run.  Revisit this.  */
441 5b6dd868 Blue Swirl
#define USE_STATIC_CODE_GEN_BUFFER
442 5b6dd868 Blue Swirl
#endif
443 5b6dd868 Blue Swirl
444 5b6dd868 Blue Swirl
/* ??? Should configure for this, not list operating systems here.  */
445 5b6dd868 Blue Swirl
#if (defined(__linux__) \
446 5b6dd868 Blue Swirl
    || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 5b6dd868 Blue Swirl
    || defined(__DragonFly__) || defined(__OpenBSD__) \
448 5b6dd868 Blue Swirl
    || defined(__NetBSD__))
449 5b6dd868 Blue Swirl
# define USE_MMAP
450 5b6dd868 Blue Swirl
#endif
451 5b6dd868 Blue Swirl
452 5b6dd868 Blue Swirl
/* Minimum size of the code gen buffer.  This number is randomly chosen,
453 5b6dd868 Blue Swirl
   but not so small that we can't have a fair number of TB's live.  */
454 5b6dd868 Blue Swirl
#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
455 5b6dd868 Blue Swirl
456 5b6dd868 Blue Swirl
/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
457 5b6dd868 Blue Swirl
   indicated, this is constrained by the range of direct branches on the
458 5b6dd868 Blue Swirl
   host cpu, as used by the TCG implementation of goto_tb.  */
459 5b6dd868 Blue Swirl
#if defined(__x86_64__)
460 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
461 5b6dd868 Blue Swirl
#elif defined(__sparc__)
462 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
463 4a136e0a Claudio Fontana
#elif defined(__aarch64__)
464 4a136e0a Claudio Fontana
# define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
465 5b6dd868 Blue Swirl
#elif defined(__arm__)
466 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
467 5b6dd868 Blue Swirl
#elif defined(__s390x__)
468 5b6dd868 Blue Swirl
  /* We have a +- 4GB range on the branches; leave some slop.  */
469 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
470 5b6dd868 Blue Swirl
#else
471 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
472 5b6dd868 Blue Swirl
#endif
473 5b6dd868 Blue Swirl
474 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
475 5b6dd868 Blue Swirl
476 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
477 5b6dd868 Blue Swirl
  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
478 5b6dd868 Blue Swirl
   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
479 5b6dd868 Blue Swirl
480 5b6dd868 Blue Swirl
static inline size_t size_code_gen_buffer(size_t tb_size)
481 5b6dd868 Blue Swirl
{
482 5b6dd868 Blue Swirl
    /* Size the buffer.  */
483 5b6dd868 Blue Swirl
    if (tb_size == 0) {
484 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
485 5b6dd868 Blue Swirl
        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486 5b6dd868 Blue Swirl
#else
487 5b6dd868 Blue Swirl
        /* ??? Needs adjustments.  */
488 5b6dd868 Blue Swirl
        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
489 5b6dd868 Blue Swirl
           static buffer, we could size this on RESERVED_VA, on the text
490 5b6dd868 Blue Swirl
           segment size of the executable, or continue to use the default.  */
491 5b6dd868 Blue Swirl
        tb_size = (unsigned long)(ram_size / 4);
492 5b6dd868 Blue Swirl
#endif
493 5b6dd868 Blue Swirl
    }
494 5b6dd868 Blue Swirl
    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
495 5b6dd868 Blue Swirl
        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
496 5b6dd868 Blue Swirl
    }
497 5b6dd868 Blue Swirl
    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
498 5b6dd868 Blue Swirl
        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
499 5b6dd868 Blue Swirl
    }
500 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = tb_size;
501 5b6dd868 Blue Swirl
    return tb_size;
502 5b6dd868 Blue Swirl
}
503 5b6dd868 Blue Swirl
504 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
505 5b6dd868 Blue Swirl
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
506 5b6dd868 Blue Swirl
    __attribute__((aligned(CODE_GEN_ALIGN)));
507 5b6dd868 Blue Swirl
508 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
509 5b6dd868 Blue Swirl
{
510 0b0d3320 Evgeny Voevodin
    map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
511 5b6dd868 Blue Swirl
    return static_code_gen_buffer;
512 5b6dd868 Blue Swirl
}
513 5b6dd868 Blue Swirl
#elif defined(USE_MMAP)
514 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
515 5b6dd868 Blue Swirl
{
516 5b6dd868 Blue Swirl
    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
517 5b6dd868 Blue Swirl
    uintptr_t start = 0;
518 5b6dd868 Blue Swirl
    void *buf;
519 5b6dd868 Blue Swirl
520 5b6dd868 Blue Swirl
    /* Constrain the position of the buffer based on the host cpu.
521 5b6dd868 Blue Swirl
       Note that these addresses are chosen in concert with the
522 5b6dd868 Blue Swirl
       addresses assigned in the relevant linker script file.  */
523 5b6dd868 Blue Swirl
# if defined(__PIE__) || defined(__PIC__)
524 5b6dd868 Blue Swirl
    /* Don't bother setting a preferred location if we're building
525 5b6dd868 Blue Swirl
       a position-independent executable.  We're more likely to get
526 5b6dd868 Blue Swirl
       an address near the main executable if we let the kernel
527 5b6dd868 Blue Swirl
       choose the address.  */
528 5b6dd868 Blue Swirl
# elif defined(__x86_64__) && defined(MAP_32BIT)
529 5b6dd868 Blue Swirl
    /* Force the memory down into low memory with the executable.
530 5b6dd868 Blue Swirl
       Leave the choice of exact location with the kernel.  */
531 5b6dd868 Blue Swirl
    flags |= MAP_32BIT;
532 5b6dd868 Blue Swirl
    /* Cannot expect to map more than 800MB in low memory.  */
533 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
534 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
535 5b6dd868 Blue Swirl
    }
536 5b6dd868 Blue Swirl
# elif defined(__sparc__)
537 5b6dd868 Blue Swirl
    start = 0x40000000ul;
538 5b6dd868 Blue Swirl
# elif defined(__s390x__)
539 5b6dd868 Blue Swirl
    start = 0x90000000ul;
540 5b6dd868 Blue Swirl
# endif
541 5b6dd868 Blue Swirl
542 0b0d3320 Evgeny Voevodin
    buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
543 5b6dd868 Blue Swirl
               PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
544 5b6dd868 Blue Swirl
    return buf == MAP_FAILED ? NULL : buf;
545 5b6dd868 Blue Swirl
}
546 5b6dd868 Blue Swirl
#else
547 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
548 5b6dd868 Blue Swirl
{
549 0b0d3320 Evgeny Voevodin
    void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
550 5b6dd868 Blue Swirl
551 5b6dd868 Blue Swirl
    if (buf) {
552 0b0d3320 Evgeny Voevodin
        map_exec(buf, tcg_ctx.code_gen_buffer_size);
553 5b6dd868 Blue Swirl
    }
554 5b6dd868 Blue Swirl
    return buf;
555 5b6dd868 Blue Swirl
}
556 5b6dd868 Blue Swirl
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
557 5b6dd868 Blue Swirl
558 5b6dd868 Blue Swirl
static inline void code_gen_alloc(size_t tb_size)
559 5b6dd868 Blue Swirl
{
560 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
561 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
562 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer == NULL) {
563 5b6dd868 Blue Swirl
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
564 5b6dd868 Blue Swirl
        exit(1);
565 5b6dd868 Blue Swirl
    }
566 5b6dd868 Blue Swirl
567 0b0d3320 Evgeny Voevodin
    qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
568 0b0d3320 Evgeny Voevodin
            QEMU_MADV_HUGEPAGE);
569 5b6dd868 Blue Swirl
570 5b6dd868 Blue Swirl
    /* Steal room for the prologue at the end of the buffer.  This ensures
571 5b6dd868 Blue Swirl
       (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
572 5b6dd868 Blue Swirl
       from TB's to the prologue are going to be in range.  It also means
573 5b6dd868 Blue Swirl
       that we don't need to mark (additional) portions of the data segment
574 5b6dd868 Blue Swirl
       as executable.  */
575 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
576 0b0d3320 Evgeny Voevodin
            tcg_ctx.code_gen_buffer_size - 1024;
577 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size -= 1024;
578 5b6dd868 Blue Swirl
579 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
580 5b6dd868 Blue Swirl
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
581 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
582 0b0d3320 Evgeny Voevodin
            CODE_GEN_AVG_BLOCK_SIZE;
583 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tbs =
584 5e5f07e0 Evgeny Voevodin
            g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
585 5b6dd868 Blue Swirl
}
586 5b6dd868 Blue Swirl
587 5b6dd868 Blue Swirl
/* Must be called before using the QEMU cpus. 'tb_size' is the size
588 5b6dd868 Blue Swirl
   (in bytes) allocated to the translation buffer. Zero means default
589 5b6dd868 Blue Swirl
   size. */
590 5b6dd868 Blue Swirl
void tcg_exec_init(unsigned long tb_size)
591 5b6dd868 Blue Swirl
{
592 5b6dd868 Blue Swirl
    cpu_gen_init();
593 5b6dd868 Blue Swirl
    code_gen_alloc(tb_size);
594 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
595 0b0d3320 Evgeny Voevodin
    tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
596 5b6dd868 Blue Swirl
    page_init();
597 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
598 5b6dd868 Blue Swirl
    /* There's no guest base to take into account, so go ahead and
599 5b6dd868 Blue Swirl
       initialize the prologue now.  */
600 5b6dd868 Blue Swirl
    tcg_prologue_init(&tcg_ctx);
601 5b6dd868 Blue Swirl
#endif
602 5b6dd868 Blue Swirl
}
603 5b6dd868 Blue Swirl
604 5b6dd868 Blue Swirl
bool tcg_enabled(void)
605 5b6dd868 Blue Swirl
{
606 0b0d3320 Evgeny Voevodin
    return tcg_ctx.code_gen_buffer != NULL;
607 5b6dd868 Blue Swirl
}
608 5b6dd868 Blue Swirl
609 5b6dd868 Blue Swirl
/* Allocate a new translation block. Flush the translation buffer if
610 5b6dd868 Blue Swirl
   too many translation blocks or too much generated code. */
611 5b6dd868 Blue Swirl
static TranslationBlock *tb_alloc(target_ulong pc)
612 5b6dd868 Blue Swirl
{
613 5b6dd868 Blue Swirl
    TranslationBlock *tb;
614 5b6dd868 Blue Swirl
615 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
616 0b0d3320 Evgeny Voevodin
        (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
617 0b0d3320 Evgeny Voevodin
         tcg_ctx.code_gen_buffer_max_size) {
618 5b6dd868 Blue Swirl
        return NULL;
619 5b6dd868 Blue Swirl
    }
620 5e5f07e0 Evgeny Voevodin
    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
621 5b6dd868 Blue Swirl
    tb->pc = pc;
622 5b6dd868 Blue Swirl
    tb->cflags = 0;
623 5b6dd868 Blue Swirl
    return tb;
624 5b6dd868 Blue Swirl
}
625 5b6dd868 Blue Swirl
626 5b6dd868 Blue Swirl
void tb_free(TranslationBlock *tb)
627 5b6dd868 Blue Swirl
{
628 5b6dd868 Blue Swirl
    /* In practice this is mostly used for single use temporary TB
629 5b6dd868 Blue Swirl
       Ignore the hard cases and just back up if this TB happens to
630 5b6dd868 Blue Swirl
       be the last one generated.  */
631 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
632 5e5f07e0 Evgeny Voevodin
            tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
633 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_ptr = tb->tc_ptr;
634 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.nb_tbs--;
635 5b6dd868 Blue Swirl
    }
636 5b6dd868 Blue Swirl
}
637 5b6dd868 Blue Swirl
638 5b6dd868 Blue Swirl
static inline void invalidate_page_bitmap(PageDesc *p)
639 5b6dd868 Blue Swirl
{
640 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
641 5b6dd868 Blue Swirl
        g_free(p->code_bitmap);
642 5b6dd868 Blue Swirl
        p->code_bitmap = NULL;
643 5b6dd868 Blue Swirl
    }
644 5b6dd868 Blue Swirl
    p->code_write_count = 0;
645 5b6dd868 Blue Swirl
}
646 5b6dd868 Blue Swirl
647 5b6dd868 Blue Swirl
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
648 5b6dd868 Blue Swirl
static void page_flush_tb_1(int level, void **lp)
649 5b6dd868 Blue Swirl
{
650 5b6dd868 Blue Swirl
    int i;
651 5b6dd868 Blue Swirl
652 5b6dd868 Blue Swirl
    if (*lp == NULL) {
653 5b6dd868 Blue Swirl
        return;
654 5b6dd868 Blue Swirl
    }
655 5b6dd868 Blue Swirl
    if (level == 0) {
656 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
657 5b6dd868 Blue Swirl
658 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
659 5b6dd868 Blue Swirl
            pd[i].first_tb = NULL;
660 5b6dd868 Blue Swirl
            invalidate_page_bitmap(pd + i);
661 5b6dd868 Blue Swirl
        }
662 5b6dd868 Blue Swirl
    } else {
663 5b6dd868 Blue Swirl
        void **pp = *lp;
664 5b6dd868 Blue Swirl
665 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
666 5b6dd868 Blue Swirl
            page_flush_tb_1(level - 1, pp + i);
667 5b6dd868 Blue Swirl
        }
668 5b6dd868 Blue Swirl
    }
669 5b6dd868 Blue Swirl
}
670 5b6dd868 Blue Swirl
671 5b6dd868 Blue Swirl
static void page_flush_tb(void)
672 5b6dd868 Blue Swirl
{
673 5b6dd868 Blue Swirl
    int i;
674 5b6dd868 Blue Swirl
675 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
676 5b6dd868 Blue Swirl
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
677 5b6dd868 Blue Swirl
    }
678 5b6dd868 Blue Swirl
}
679 5b6dd868 Blue Swirl
680 5b6dd868 Blue Swirl
/* flush all the translation blocks */
681 5b6dd868 Blue Swirl
/* XXX: tb_flush is currently not thread safe */
682 5b6dd868 Blue Swirl
void tb_flush(CPUArchState *env1)
683 5b6dd868 Blue Swirl
{
684 182735ef Andreas Fรคrber
    CPUState *cpu;
685 5b6dd868 Blue Swirl
686 5b6dd868 Blue Swirl
#if defined(DEBUG_FLUSH)
687 5b6dd868 Blue Swirl
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
688 0b0d3320 Evgeny Voevodin
           (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
689 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
690 0b0d3320 Evgeny Voevodin
           ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
691 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs : 0);
692 5b6dd868 Blue Swirl
#endif
693 0b0d3320 Evgeny Voevodin
    if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
694 0b0d3320 Evgeny Voevodin
        > tcg_ctx.code_gen_buffer_size) {
695 5b6dd868 Blue Swirl
        cpu_abort(env1, "Internal error: code buffer overflow\n");
696 5b6dd868 Blue Swirl
    }
697 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.nb_tbs = 0;
698 5b6dd868 Blue Swirl
699 182735ef Andreas Fรคrber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
700 182735ef Andreas Fรคrber
        CPUArchState *env = cpu->env_ptr;
701 182735ef Andreas Fรคrber
702 5b6dd868 Blue Swirl
        memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
703 5b6dd868 Blue Swirl
    }
704 5b6dd868 Blue Swirl
705 5e5f07e0 Evgeny Voevodin
    memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
706 5e5f07e0 Evgeny Voevodin
            CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
707 5b6dd868 Blue Swirl
    page_flush_tb();
708 5b6dd868 Blue Swirl
709 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
710 5b6dd868 Blue Swirl
    /* XXX: flush processor icache at this point if cache flush is
711 5b6dd868 Blue Swirl
       expensive */
712 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_flush_count++;
713 5b6dd868 Blue Swirl
}
714 5b6dd868 Blue Swirl
715 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
716 5b6dd868 Blue Swirl
717 5b6dd868 Blue Swirl
static void tb_invalidate_check(target_ulong address)
718 5b6dd868 Blue Swirl
{
719 5b6dd868 Blue Swirl
    TranslationBlock *tb;
720 5b6dd868 Blue Swirl
    int i;
721 5b6dd868 Blue Swirl
722 5b6dd868 Blue Swirl
    address &= TARGET_PAGE_MASK;
723 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
724 5e5f07e0 Evgeny Voevodin
        for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
725 5b6dd868 Blue Swirl
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
726 5b6dd868 Blue Swirl
                  address >= tb->pc + tb->size)) {
727 5b6dd868 Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
728 5b6dd868 Blue Swirl
                       " PC=%08lx size=%04x\n",
729 5b6dd868 Blue Swirl
                       address, (long)tb->pc, tb->size);
730 5b6dd868 Blue Swirl
            }
731 5b6dd868 Blue Swirl
        }
732 5b6dd868 Blue Swirl
    }
733 5b6dd868 Blue Swirl
}
734 5b6dd868 Blue Swirl
735 5b6dd868 Blue Swirl
/* verify that all the pages have correct rights for code */
736 5b6dd868 Blue Swirl
static void tb_page_check(void)
737 5b6dd868 Blue Swirl
{
738 5b6dd868 Blue Swirl
    TranslationBlock *tb;
739 5b6dd868 Blue Swirl
    int i, flags1, flags2;
740 5b6dd868 Blue Swirl
741 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
742 5e5f07e0 Evgeny Voevodin
        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
743 5e5f07e0 Evgeny Voevodin
                tb = tb->phys_hash_next) {
744 5b6dd868 Blue Swirl
            flags1 = page_get_flags(tb->pc);
745 5b6dd868 Blue Swirl
            flags2 = page_get_flags(tb->pc + tb->size - 1);
746 5b6dd868 Blue Swirl
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
747 5b6dd868 Blue Swirl
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
748 5b6dd868 Blue Swirl
                       (long)tb->pc, tb->size, flags1, flags2);
749 5b6dd868 Blue Swirl
            }
750 5b6dd868 Blue Swirl
        }
751 5b6dd868 Blue Swirl
    }
752 5b6dd868 Blue Swirl
}
753 5b6dd868 Blue Swirl
754 5b6dd868 Blue Swirl
#endif
755 5b6dd868 Blue Swirl
756 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
757 5b6dd868 Blue Swirl
{
758 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
759 5b6dd868 Blue Swirl
760 5b6dd868 Blue Swirl
    for (;;) {
761 5b6dd868 Blue Swirl
        tb1 = *ptb;
762 5b6dd868 Blue Swirl
        if (tb1 == tb) {
763 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
            *ptb = tb1->phys_hash_next;
764 5b6dd868 Blue Swirl
            break;
765 5b6dd868 Blue Swirl
        }
766 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
        ptb = &tb1->phys_hash_next;
767 5b6dd868 Blue Swirl
    }
768 5b6dd868 Blue Swirl
}
769 5b6dd868 Blue Swirl
770 5b6dd868 Blue Swirl
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
771 5b6dd868 Blue Swirl
{
772 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
773 5b6dd868 Blue Swirl
    unsigned int n1;
774 5b6dd868 Blue Swirl
775 5b6dd868 Blue Swirl
    for (;;) {
776 5b6dd868 Blue Swirl
        tb1 = *ptb;
777 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
778 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
779 5b6dd868 Blue Swirl
        if (tb1 == tb) {
780 5b6dd868 Blue Swirl
            *ptb = tb1->page_next[n1];
781 5b6dd868 Blue Swirl
            break;
782 5b6dd868 Blue Swirl
        }
783 5b6dd868 Blue Swirl
        ptb = &tb1->page_next[n1];
784 5b6dd868 Blue Swirl
    }
785 5b6dd868 Blue Swirl
}
786 5b6dd868 Blue Swirl
787 5b6dd868 Blue Swirl
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
788 5b6dd868 Blue Swirl
{
789 5b6dd868 Blue Swirl
    TranslationBlock *tb1, **ptb;
790 5b6dd868 Blue Swirl
    unsigned int n1;
791 5b6dd868 Blue Swirl
792 5b6dd868 Blue Swirl
    ptb = &tb->jmp_next[n];
793 5b6dd868 Blue Swirl
    tb1 = *ptb;
794 5b6dd868 Blue Swirl
    if (tb1) {
795 5b6dd868 Blue Swirl
        /* find tb(n) in circular list */
796 5b6dd868 Blue Swirl
        for (;;) {
797 5b6dd868 Blue Swirl
            tb1 = *ptb;
798 5b6dd868 Blue Swirl
            n1 = (uintptr_t)tb1 & 3;
799 5b6dd868 Blue Swirl
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
800 5b6dd868 Blue Swirl
            if (n1 == n && tb1 == tb) {
801 5b6dd868 Blue Swirl
                break;
802 5b6dd868 Blue Swirl
            }
803 5b6dd868 Blue Swirl
            if (n1 == 2) {
804 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_first;
805 5b6dd868 Blue Swirl
            } else {
806 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_next[n1];
807 5b6dd868 Blue Swirl
            }
808 5b6dd868 Blue Swirl
        }
809 5b6dd868 Blue Swirl
        /* now we can suppress tb(n) from the list */
810 5b6dd868 Blue Swirl
        *ptb = tb->jmp_next[n];
811 5b6dd868 Blue Swirl
812 5b6dd868 Blue Swirl
        tb->jmp_next[n] = NULL;
813 5b6dd868 Blue Swirl
    }
814 5b6dd868 Blue Swirl
}
815 5b6dd868 Blue Swirl
816 5b6dd868 Blue Swirl
/* reset the jump entry 'n' of a TB so that it is not chained to
817 5b6dd868 Blue Swirl
   another TB */
818 5b6dd868 Blue Swirl
static inline void tb_reset_jump(TranslationBlock *tb, int n)
819 5b6dd868 Blue Swirl
{
820 5b6dd868 Blue Swirl
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
821 5b6dd868 Blue Swirl
}
822 5b6dd868 Blue Swirl
823 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
/* invalidate one TB */
824 5b6dd868 Blue Swirl
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
825 5b6dd868 Blue Swirl
{
826 182735ef Andreas Fรคrber
    CPUState *cpu;
827 5b6dd868 Blue Swirl
    PageDesc *p;
828 5b6dd868 Blue Swirl
    unsigned int h, n1;
829 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc;
830 5b6dd868 Blue Swirl
    TranslationBlock *tb1, *tb2;
831 5b6dd868 Blue Swirl
832 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
833 5b6dd868 Blue Swirl
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
834 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
835 5e5f07e0 Evgeny Voevodin
    tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
836 5b6dd868 Blue Swirl
837 5b6dd868 Blue Swirl
    /* remove the TB from the page list */
838 5b6dd868 Blue Swirl
    if (tb->page_addr[0] != page_addr) {
839 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
840 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
841 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
842 5b6dd868 Blue Swirl
    }
843 5b6dd868 Blue Swirl
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
844 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
845 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
846 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
847 5b6dd868 Blue Swirl
    }
848 5b6dd868 Blue Swirl
849 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
850 5b6dd868 Blue Swirl
851 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
852 5b6dd868 Blue Swirl
    h = tb_jmp_cache_hash_func(tb->pc);
853 182735ef Andreas Fรคrber
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
854 182735ef Andreas Fรคrber
        CPUArchState *env = cpu->env_ptr;
855 182735ef Andreas Fรคrber
856 5b6dd868 Blue Swirl
        if (env->tb_jmp_cache[h] == tb) {
857 5b6dd868 Blue Swirl
            env->tb_jmp_cache[h] = NULL;
858 5b6dd868 Blue Swirl
        }
859 5b6dd868 Blue Swirl
    }
860 5b6dd868 Blue Swirl
861 5b6dd868 Blue Swirl
    /* suppress this TB from the two jump lists */
862 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 0);
863 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 1);
864 5b6dd868 Blue Swirl
865 5b6dd868 Blue Swirl
    /* suppress any remaining jumps to this TB */
866 5b6dd868 Blue Swirl
    tb1 = tb->jmp_first;
867 5b6dd868 Blue Swirl
    for (;;) {
868 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
869 5b6dd868 Blue Swirl
        if (n1 == 2) {
870 5b6dd868 Blue Swirl
            break;
871 5b6dd868 Blue Swirl
        }
872 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
873 5b6dd868 Blue Swirl
        tb2 = tb1->jmp_next[n1];
874 5b6dd868 Blue Swirl
        tb_reset_jump(tb1, n1);
875 5b6dd868 Blue Swirl
        tb1->jmp_next[n1] = NULL;
876 5b6dd868 Blue Swirl
        tb1 = tb2;
877 5b6dd868 Blue Swirl
    }
878 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
879 5b6dd868 Blue Swirl
880 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
881 5b6dd868 Blue Swirl
}
882 5b6dd868 Blue Swirl
883 5b6dd868 Blue Swirl
static inline void set_bits(uint8_t *tab, int start, int len)
884 5b6dd868 Blue Swirl
{
885 5b6dd868 Blue Swirl
    int end, mask, end1;
886 5b6dd868 Blue Swirl
887 5b6dd868 Blue Swirl
    end = start + len;
888 5b6dd868 Blue Swirl
    tab += start >> 3;
889 5b6dd868 Blue Swirl
    mask = 0xff << (start & 7);
890 5b6dd868 Blue Swirl
    if ((start & ~7) == (end & ~7)) {
891 5b6dd868 Blue Swirl
        if (start < end) {
892 5b6dd868 Blue Swirl
            mask &= ~(0xff << (end & 7));
893 5b6dd868 Blue Swirl
            *tab |= mask;
894 5b6dd868 Blue Swirl
        }
895 5b6dd868 Blue Swirl
    } else {
896 5b6dd868 Blue Swirl
        *tab++ |= mask;
897 5b6dd868 Blue Swirl
        start = (start + 8) & ~7;
898 5b6dd868 Blue Swirl
        end1 = end & ~7;
899 5b6dd868 Blue Swirl
        while (start < end1) {
900 5b6dd868 Blue Swirl
            *tab++ = 0xff;
901 5b6dd868 Blue Swirl
            start += 8;
902 5b6dd868 Blue Swirl
        }
903 5b6dd868 Blue Swirl
        if (start < end) {
904 5b6dd868 Blue Swirl
            mask = ~(0xff << (end & 7));
905 5b6dd868 Blue Swirl
            *tab |= mask;
906 5b6dd868 Blue Swirl
        }
907 5b6dd868 Blue Swirl
    }
908 5b6dd868 Blue Swirl
}
909 5b6dd868 Blue Swirl
910 5b6dd868 Blue Swirl
static void build_page_bitmap(PageDesc *p)
911 5b6dd868 Blue Swirl
{
912 5b6dd868 Blue Swirl
    int n, tb_start, tb_end;
913 5b6dd868 Blue Swirl
    TranslationBlock *tb;
914 5b6dd868 Blue Swirl
915 5b6dd868 Blue Swirl
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
916 5b6dd868 Blue Swirl
917 5b6dd868 Blue Swirl
    tb = p->first_tb;
918 5b6dd868 Blue Swirl
    while (tb != NULL) {
919 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
920 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
921 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
922 5b6dd868 Blue Swirl
        if (n == 0) {
923 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
924 5b6dd868 Blue Swirl
               it is not a problem */
925 5b6dd868 Blue Swirl
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
926 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
927 5b6dd868 Blue Swirl
            if (tb_end > TARGET_PAGE_SIZE) {
928 5b6dd868 Blue Swirl
                tb_end = TARGET_PAGE_SIZE;
929 5b6dd868 Blue Swirl
            }
930 5b6dd868 Blue Swirl
        } else {
931 5b6dd868 Blue Swirl
            tb_start = 0;
932 5b6dd868 Blue Swirl
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
933 5b6dd868 Blue Swirl
        }
934 5b6dd868 Blue Swirl
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
935 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
936 5b6dd868 Blue Swirl
    }
937 5b6dd868 Blue Swirl
}
938 5b6dd868 Blue Swirl
939 5b6dd868 Blue Swirl
TranslationBlock *tb_gen_code(CPUArchState *env,
940 5b6dd868 Blue Swirl
                              target_ulong pc, target_ulong cs_base,
941 5b6dd868 Blue Swirl
                              int flags, int cflags)
942 5b6dd868 Blue Swirl
{
943 5b6dd868 Blue Swirl
    TranslationBlock *tb;
944 5b6dd868 Blue Swirl
    uint8_t *tc_ptr;
945 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc, phys_page2;
946 5b6dd868 Blue Swirl
    target_ulong virt_page2;
947 5b6dd868 Blue Swirl
    int code_gen_size;
948 5b6dd868 Blue Swirl
949 5b6dd868 Blue Swirl
    phys_pc = get_page_addr_code(env, pc);
950 5b6dd868 Blue Swirl
    tb = tb_alloc(pc);
951 5b6dd868 Blue Swirl
    if (!tb) {
952 5b6dd868 Blue Swirl
        /* flush must be done */
953 5b6dd868 Blue Swirl
        tb_flush(env);
954 5b6dd868 Blue Swirl
        /* cannot fail at this point */
955 5b6dd868 Blue Swirl
        tb = tb_alloc(pc);
956 5b6dd868 Blue Swirl
        /* Don't forget to invalidate previous TB info.  */
957 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
958 5b6dd868 Blue Swirl
    }
959 0b0d3320 Evgeny Voevodin
    tc_ptr = tcg_ctx.code_gen_ptr;
960 5b6dd868 Blue Swirl
    tb->tc_ptr = tc_ptr;
961 5b6dd868 Blue Swirl
    tb->cs_base = cs_base;
962 5b6dd868 Blue Swirl
    tb->flags = flags;
963 5b6dd868 Blue Swirl
    tb->cflags = cflags;
964 5b6dd868 Blue Swirl
    cpu_gen_code(env, tb, &code_gen_size);
965 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
966 0b0d3320 Evgeny Voevodin
            code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
967 5b6dd868 Blue Swirl
968 5b6dd868 Blue Swirl
    /* check next page if needed */
969 5b6dd868 Blue Swirl
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
970 5b6dd868 Blue Swirl
    phys_page2 = -1;
971 5b6dd868 Blue Swirl
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
972 5b6dd868 Blue Swirl
        phys_page2 = get_page_addr_code(env, virt_page2);
973 5b6dd868 Blue Swirl
    }
974 5b6dd868 Blue Swirl
    tb_link_page(tb, phys_pc, phys_page2);
975 5b6dd868 Blue Swirl
    return tb;
976 5b6dd868 Blue Swirl
}
977 5b6dd868 Blue Swirl
978 5b6dd868 Blue Swirl
/*
979 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
980 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end may refer to *different* physical pages.
981 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
982 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
983 5b6dd868 Blue Swirl
 * this TB.
984 5b6dd868 Blue Swirl
 */
985 5b6dd868 Blue Swirl
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
986 5b6dd868 Blue Swirl
                              int is_cpu_write_access)
987 5b6dd868 Blue Swirl
{
988 5b6dd868 Blue Swirl
    while (start < end) {
989 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
990 5b6dd868 Blue Swirl
        start &= TARGET_PAGE_MASK;
991 5b6dd868 Blue Swirl
        start += TARGET_PAGE_SIZE;
992 5b6dd868 Blue Swirl
    }
993 5b6dd868 Blue Swirl
}
994 5b6dd868 Blue Swirl
995 5b6dd868 Blue Swirl
/*
996 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
997 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end must refer to the *same* physical page.
998 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
999 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
1000 5b6dd868 Blue Swirl
 * this TB.
1001 5b6dd868 Blue Swirl
 */
1002 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1003 5b6dd868 Blue Swirl
                                   int is_cpu_write_access)
1004 5b6dd868 Blue Swirl
{
1005 5b6dd868 Blue Swirl
    TranslationBlock *tb, *tb_next, *saved_tb;
1006 4917cf44 Andreas Fรคrber
    CPUState *cpu = current_cpu;
1007 4917cf44 Andreas Fรคrber
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1008 4917cf44 Andreas Fรคrber
    CPUArchState *env = NULL;
1009 4917cf44 Andreas Fรคrber
#endif
1010 5b6dd868 Blue Swirl
    tb_page_addr_t tb_start, tb_end;
1011 5b6dd868 Blue Swirl
    PageDesc *p;
1012 5b6dd868 Blue Swirl
    int n;
1013 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1014 5b6dd868 Blue Swirl
    int current_tb_not_found = is_cpu_write_access;
1015 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1016 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1017 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1018 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1019 5b6dd868 Blue Swirl
    int current_flags = 0;
1020 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1021 5b6dd868 Blue Swirl
1022 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1023 5b6dd868 Blue Swirl
    if (!p) {
1024 5b6dd868 Blue Swirl
        return;
1025 5b6dd868 Blue Swirl
    }
1026 5b6dd868 Blue Swirl
    if (!p->code_bitmap &&
1027 5b6dd868 Blue Swirl
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1028 5b6dd868 Blue Swirl
        is_cpu_write_access) {
1029 5b6dd868 Blue Swirl
        /* build code bitmap */
1030 5b6dd868 Blue Swirl
        build_page_bitmap(p);
1031 5b6dd868 Blue Swirl
    }
1032 4917cf44 Andreas Fรคrber
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1033 4917cf44 Andreas Fรคrber
    if (cpu != NULL) {
1034 4917cf44 Andreas Fรคrber
        env = cpu->env_ptr;
1035 d77953b9 Andreas Fรคrber
    }
1036 4917cf44 Andreas Fรคrber
#endif
1037 5b6dd868 Blue Swirl
1038 5b6dd868 Blue Swirl
    /* we remove all the TBs in the range [start, end[ */
1039 5b6dd868 Blue Swirl
    /* XXX: see if in some cases it could be faster to invalidate all
1040 5b6dd868 Blue Swirl
       the code */
1041 5b6dd868 Blue Swirl
    tb = p->first_tb;
1042 5b6dd868 Blue Swirl
    while (tb != NULL) {
1043 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1044 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1045 5b6dd868 Blue Swirl
        tb_next = tb->page_next[n];
1046 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
1047 5b6dd868 Blue Swirl
        if (n == 0) {
1048 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
1049 5b6dd868 Blue Swirl
               it is not a problem */
1050 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
1052 5b6dd868 Blue Swirl
        } else {
1053 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[1];
1054 5b6dd868 Blue Swirl
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 5b6dd868 Blue Swirl
        }
1056 5b6dd868 Blue Swirl
        if (!(tb_end <= start || tb_start >= end)) {
1057 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1058 5b6dd868 Blue Swirl
            if (current_tb_not_found) {
1059 5b6dd868 Blue Swirl
                current_tb_not_found = 0;
1060 5b6dd868 Blue Swirl
                current_tb = NULL;
1061 5b6dd868 Blue Swirl
                if (env->mem_io_pc) {
1062 5b6dd868 Blue Swirl
                    /* now we have a real cpu fault */
1063 5b6dd868 Blue Swirl
                    current_tb = tb_find_pc(env->mem_io_pc);
1064 5b6dd868 Blue Swirl
                }
1065 5b6dd868 Blue Swirl
            }
1066 5b6dd868 Blue Swirl
            if (current_tb == tb &&
1067 5b6dd868 Blue Swirl
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1069 5b6dd868 Blue Swirl
                its execution. We could be more precise by checking
1070 5b6dd868 Blue Swirl
                that the modification is after the current PC, but it
1071 5b6dd868 Blue Swirl
                would require a specialized function to partially
1072 5b6dd868 Blue Swirl
                restore the CPU state */
1073 5b6dd868 Blue Swirl
1074 5b6dd868 Blue Swirl
                current_tb_modified = 1;
1075 a8a826a3 Blue Swirl
                cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1076 5b6dd868 Blue Swirl
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 5b6dd868 Blue Swirl
                                     &current_flags);
1078 5b6dd868 Blue Swirl
            }
1079 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1080 5b6dd868 Blue Swirl
            /* we need to do that to handle the case where a signal
1081 5b6dd868 Blue Swirl
               occurs while doing tb_phys_invalidate() */
1082 5b6dd868 Blue Swirl
            saved_tb = NULL;
1083 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1084 d77953b9 Andreas Fรคrber
                saved_tb = cpu->current_tb;
1085 d77953b9 Andreas Fรคrber
                cpu->current_tb = NULL;
1086 5b6dd868 Blue Swirl
            }
1087 5b6dd868 Blue Swirl
            tb_phys_invalidate(tb, -1);
1088 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1089 d77953b9 Andreas Fรคrber
                cpu->current_tb = saved_tb;
1090 c3affe56 Andreas Fรคrber
                if (cpu->interrupt_request && cpu->current_tb) {
1091 c3affe56 Andreas Fรคrber
                    cpu_interrupt(cpu, cpu->interrupt_request);
1092 5b6dd868 Blue Swirl
                }
1093 5b6dd868 Blue Swirl
            }
1094 5b6dd868 Blue Swirl
        }
1095 5b6dd868 Blue Swirl
        tb = tb_next;
1096 5b6dd868 Blue Swirl
    }
1097 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
1098 5b6dd868 Blue Swirl
    /* if no code remaining, no need to continue to use slow writes */
1099 5b6dd868 Blue Swirl
    if (!p->first_tb) {
1100 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
1101 5b6dd868 Blue Swirl
        if (is_cpu_write_access) {
1102 5b6dd868 Blue Swirl
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103 5b6dd868 Blue Swirl
        }
1104 5b6dd868 Blue Swirl
    }
1105 5b6dd868 Blue Swirl
#endif
1106 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1107 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1108 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1109 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1110 5b6dd868 Blue Swirl
           itself */
1111 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1112 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, NULL);
1114 5b6dd868 Blue Swirl
    }
1115 5b6dd868 Blue Swirl
#endif
1116 5b6dd868 Blue Swirl
}
1117 5b6dd868 Blue Swirl
1118 5b6dd868 Blue Swirl
/* len must be <= 8 and start must be a multiple of len */
1119 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1120 5b6dd868 Blue Swirl
{
1121 5b6dd868 Blue Swirl
    PageDesc *p;
1122 5b6dd868 Blue Swirl
    int offset, b;
1123 5b6dd868 Blue Swirl
1124 5b6dd868 Blue Swirl
#if 0
1125 5b6dd868 Blue Swirl
    if (1) {
1126 5b6dd868 Blue Swirl
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1127 5b6dd868 Blue Swirl
                  cpu_single_env->mem_io_vaddr, len,
1128 5b6dd868 Blue Swirl
                  cpu_single_env->eip,
1129 5b6dd868 Blue Swirl
                  cpu_single_env->eip +
1130 5b6dd868 Blue Swirl
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1131 5b6dd868 Blue Swirl
    }
1132 5b6dd868 Blue Swirl
#endif
1133 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1134 5b6dd868 Blue Swirl
    if (!p) {
1135 5b6dd868 Blue Swirl
        return;
1136 5b6dd868 Blue Swirl
    }
1137 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
1138 5b6dd868 Blue Swirl
        offset = start & ~TARGET_PAGE_MASK;
1139 5b6dd868 Blue Swirl
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1140 5b6dd868 Blue Swirl
        if (b & ((1 << len) - 1)) {
1141 5b6dd868 Blue Swirl
            goto do_invalidate;
1142 5b6dd868 Blue Swirl
        }
1143 5b6dd868 Blue Swirl
    } else {
1144 5b6dd868 Blue Swirl
    do_invalidate:
1145 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, start + len, 1);
1146 5b6dd868 Blue Swirl
    }
1147 5b6dd868 Blue Swirl
}
1148 5b6dd868 Blue Swirl
1149 5b6dd868 Blue Swirl
#if !defined(CONFIG_SOFTMMU)
1150 5b6dd868 Blue Swirl
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1151 d02532f0 Alexander Graf
                                    uintptr_t pc, void *puc,
1152 d02532f0 Alexander Graf
                                    bool locked)
1153 5b6dd868 Blue Swirl
{
1154 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1155 5b6dd868 Blue Swirl
    PageDesc *p;
1156 5b6dd868 Blue Swirl
    int n;
1157 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1158 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1159 4917cf44 Andreas Fรคrber
    CPUState *cpu = current_cpu;
1160 4917cf44 Andreas Fรคrber
    CPUArchState *env = NULL;
1161 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1162 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1163 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1164 5b6dd868 Blue Swirl
    int current_flags = 0;
1165 5b6dd868 Blue Swirl
#endif
1166 5b6dd868 Blue Swirl
1167 5b6dd868 Blue Swirl
    addr &= TARGET_PAGE_MASK;
1168 5b6dd868 Blue Swirl
    p = page_find(addr >> TARGET_PAGE_BITS);
1169 5b6dd868 Blue Swirl
    if (!p) {
1170 5b6dd868 Blue Swirl
        return;
1171 5b6dd868 Blue Swirl
    }
1172 5b6dd868 Blue Swirl
    tb = p->first_tb;
1173 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1174 5b6dd868 Blue Swirl
    if (tb && pc != 0) {
1175 5b6dd868 Blue Swirl
        current_tb = tb_find_pc(pc);
1176 5b6dd868 Blue Swirl
    }
1177 4917cf44 Andreas Fรคrber
    if (cpu != NULL) {
1178 4917cf44 Andreas Fรคrber
        env = cpu->env_ptr;
1179 d77953b9 Andreas Fรคrber
    }
1180 5b6dd868 Blue Swirl
#endif
1181 5b6dd868 Blue Swirl
    while (tb != NULL) {
1182 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1183 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1184 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1185 5b6dd868 Blue Swirl
        if (current_tb == tb &&
1186 5b6dd868 Blue Swirl
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1187 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1188 5b6dd868 Blue Swirl
                   its execution. We could be more precise by checking
1189 5b6dd868 Blue Swirl
                   that the modification is after the current PC, but it
1190 5b6dd868 Blue Swirl
                   would require a specialized function to partially
1191 5b6dd868 Blue Swirl
                   restore the CPU state */
1192 5b6dd868 Blue Swirl
1193 5b6dd868 Blue Swirl
            current_tb_modified = 1;
1194 a8a826a3 Blue Swirl
            cpu_restore_state_from_tb(current_tb, env, pc);
1195 5b6dd868 Blue Swirl
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1196 5b6dd868 Blue Swirl
                                 &current_flags);
1197 5b6dd868 Blue Swirl
        }
1198 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1199 5b6dd868 Blue Swirl
        tb_phys_invalidate(tb, addr);
1200 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
1201 5b6dd868 Blue Swirl
    }
1202 5b6dd868 Blue Swirl
    p->first_tb = NULL;
1203 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1204 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1205 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1206 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1207 5b6dd868 Blue Swirl
           itself */
1208 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1209 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1210 d02532f0 Alexander Graf
        if (locked) {
1211 d02532f0 Alexander Graf
            mmap_unlock();
1212 d02532f0 Alexander Graf
        }
1213 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, puc);
1214 5b6dd868 Blue Swirl
    }
1215 5b6dd868 Blue Swirl
#endif
1216 5b6dd868 Blue Swirl
}
1217 5b6dd868 Blue Swirl
#endif
1218 5b6dd868 Blue Swirl
1219 5b6dd868 Blue Swirl
/* add the tb in the target page and protect it if necessary */
1220 5b6dd868 Blue Swirl
static inline void tb_alloc_page(TranslationBlock *tb,
1221 5b6dd868 Blue Swirl
                                 unsigned int n, tb_page_addr_t page_addr)
1222 5b6dd868 Blue Swirl
{
1223 5b6dd868 Blue Swirl
    PageDesc *p;
1224 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1225 5b6dd868 Blue Swirl
    bool page_already_protected;
1226 5b6dd868 Blue Swirl
#endif
1227 5b6dd868 Blue Swirl
1228 5b6dd868 Blue Swirl
    tb->page_addr[n] = page_addr;
1229 5b6dd868 Blue Swirl
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1230 5b6dd868 Blue Swirl
    tb->page_next[n] = p->first_tb;
1231 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1232 5b6dd868 Blue Swirl
    page_already_protected = p->first_tb != NULL;
1233 5b6dd868 Blue Swirl
#endif
1234 5b6dd868 Blue Swirl
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1235 5b6dd868 Blue Swirl
    invalidate_page_bitmap(p);
1236 5b6dd868 Blue Swirl
1237 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_SMC) || 1
1238 5b6dd868 Blue Swirl
1239 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
1240 5b6dd868 Blue Swirl
    if (p->flags & PAGE_WRITE) {
1241 5b6dd868 Blue Swirl
        target_ulong addr;
1242 5b6dd868 Blue Swirl
        PageDesc *p2;
1243 5b6dd868 Blue Swirl
        int prot;
1244 5b6dd868 Blue Swirl
1245 5b6dd868 Blue Swirl
        /* force the host page as non writable (writes will have a
1246 5b6dd868 Blue Swirl
           page fault + mprotect overhead) */
1247 5b6dd868 Blue Swirl
        page_addr &= qemu_host_page_mask;
1248 5b6dd868 Blue Swirl
        prot = 0;
1249 5b6dd868 Blue Swirl
        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1250 5b6dd868 Blue Swirl
            addr += TARGET_PAGE_SIZE) {
1251 5b6dd868 Blue Swirl
1252 5b6dd868 Blue Swirl
            p2 = page_find(addr >> TARGET_PAGE_BITS);
1253 5b6dd868 Blue Swirl
            if (!p2) {
1254 5b6dd868 Blue Swirl
                continue;
1255 5b6dd868 Blue Swirl
            }
1256 5b6dd868 Blue Swirl
            prot |= p2->flags;
1257 5b6dd868 Blue Swirl
            p2->flags &= ~PAGE_WRITE;
1258 5b6dd868 Blue Swirl
          }
1259 5b6dd868 Blue Swirl
        mprotect(g2h(page_addr), qemu_host_page_size,
1260 5b6dd868 Blue Swirl
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1261 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_INVALIDATE
1262 5b6dd868 Blue Swirl
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1263 5b6dd868 Blue Swirl
               page_addr);
1264 5b6dd868 Blue Swirl
#endif
1265 5b6dd868 Blue Swirl
    }
1266 5b6dd868 Blue Swirl
#else
1267 5b6dd868 Blue Swirl
    /* if some code is already present, then the pages are already
1268 5b6dd868 Blue Swirl
       protected. So we handle the case where only the first TB is
1269 5b6dd868 Blue Swirl
       allocated in a physical page */
1270 5b6dd868 Blue Swirl
    if (!page_already_protected) {
1271 5b6dd868 Blue Swirl
        tlb_protect_code(page_addr);
1272 5b6dd868 Blue Swirl
    }
1273 5b6dd868 Blue Swirl
#endif
1274 5b6dd868 Blue Swirl
1275 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_SMC */
1276 5b6dd868 Blue Swirl
}
1277 5b6dd868 Blue Swirl
1278 5b6dd868 Blue Swirl
/* add a new TB and link it to the physical page tables. phys_page2 is
1279 5b6dd868 Blue Swirl
   (-1) to indicate that only one page contains the TB. */
1280 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1281 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2)
1282 5b6dd868 Blue Swirl
{
1283 5b6dd868 Blue Swirl
    unsigned int h;
1284 5b6dd868 Blue Swirl
    TranslationBlock **ptb;
1285 5b6dd868 Blue Swirl
1286 5b6dd868 Blue Swirl
    /* Grab the mmap lock to stop another thread invalidating this TB
1287 5b6dd868 Blue Swirl
       before we are done.  */
1288 5b6dd868 Blue Swirl
    mmap_lock();
1289 5b6dd868 Blue Swirl
    /* add in the physical hash table */
1290 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
1291 5e5f07e0 Evgeny Voevodin
    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1292 5b6dd868 Blue Swirl
    tb->phys_hash_next = *ptb;
1293 5b6dd868 Blue Swirl
    *ptb = tb;
1294 5b6dd868 Blue Swirl
1295 5b6dd868 Blue Swirl
    /* add in the page list */
1296 5b6dd868 Blue Swirl
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1297 5b6dd868 Blue Swirl
    if (phys_page2 != -1) {
1298 5b6dd868 Blue Swirl
        tb_alloc_page(tb, 1, phys_page2);
1299 5b6dd868 Blue Swirl
    } else {
1300 5b6dd868 Blue Swirl
        tb->page_addr[1] = -1;
1301 5b6dd868 Blue Swirl
    }
1302 5b6dd868 Blue Swirl
1303 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1304 5b6dd868 Blue Swirl
    tb->jmp_next[0] = NULL;
1305 5b6dd868 Blue Swirl
    tb->jmp_next[1] = NULL;
1306 5b6dd868 Blue Swirl
1307 5b6dd868 Blue Swirl
    /* init original jump addresses */
1308 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[0] != 0xffff) {
1309 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 0);
1310 5b6dd868 Blue Swirl
    }
1311 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[1] != 0xffff) {
1312 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 1);
1313 5b6dd868 Blue Swirl
    }
1314 5b6dd868 Blue Swirl
1315 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1316 5b6dd868 Blue Swirl
    tb_page_check();
1317 5b6dd868 Blue Swirl
#endif
1318 5b6dd868 Blue Swirl
    mmap_unlock();
1319 5b6dd868 Blue Swirl
}
1320 5b6dd868 Blue Swirl
1321 5b6dd868 Blue Swirl
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1322 5b6dd868 Blue Swirl
/* check whether the given addr is in TCG generated code buffer or not */
1323 5b6dd868 Blue Swirl
bool is_tcg_gen_code(uintptr_t tc_ptr)
1324 5b6dd868 Blue Swirl
{
1325 52ae646d Yeongkyoon Lee
    /* This can be called during code generation, code_gen_buffer_size
1326 5b6dd868 Blue Swirl
       is used instead of code_gen_ptr for upper boundary checking */
1327 0b0d3320 Evgeny Voevodin
    return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
1328 0b0d3320 Evgeny Voevodin
            tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
1329 52ae646d Yeongkyoon Lee
                    tcg_ctx.code_gen_buffer_size));
1330 5b6dd868 Blue Swirl
}
1331 5b6dd868 Blue Swirl
#endif
1332 5b6dd868 Blue Swirl
1333 5b6dd868 Blue Swirl
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1334 5b6dd868 Blue Swirl
   tb[1].tc_ptr. Return NULL if not found */
1335 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1336 5b6dd868 Blue Swirl
{
1337 5b6dd868 Blue Swirl
    int m_min, m_max, m;
1338 5b6dd868 Blue Swirl
    uintptr_t v;
1339 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1340 5b6dd868 Blue Swirl
1341 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1342 5b6dd868 Blue Swirl
        return NULL;
1343 5b6dd868 Blue Swirl
    }
1344 0b0d3320 Evgeny Voevodin
    if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1345 0b0d3320 Evgeny Voevodin
        tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1346 5b6dd868 Blue Swirl
        return NULL;
1347 5b6dd868 Blue Swirl
    }
1348 5b6dd868 Blue Swirl
    /* binary search (cf Knuth) */
1349 5b6dd868 Blue Swirl
    m_min = 0;
1350 5e5f07e0 Evgeny Voevodin
    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1351 5b6dd868 Blue Swirl
    while (m_min <= m_max) {
1352 5b6dd868 Blue Swirl
        m = (m_min + m_max) >> 1;
1353 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[m];
1354 5b6dd868 Blue Swirl
        v = (uintptr_t)tb->tc_ptr;
1355 5b6dd868 Blue Swirl
        if (v == tc_ptr) {
1356 5b6dd868 Blue Swirl
            return tb;
1357 5b6dd868 Blue Swirl
        } else if (tc_ptr < v) {
1358 5b6dd868 Blue Swirl
            m_max = m - 1;
1359 5b6dd868 Blue Swirl
        } else {
1360 5b6dd868 Blue Swirl
            m_min = m + 1;
1361 5b6dd868 Blue Swirl
        }
1362 5b6dd868 Blue Swirl
    }
1363 5e5f07e0 Evgeny Voevodin
    return &tcg_ctx.tb_ctx.tbs[m_max];
1364 5b6dd868 Blue Swirl
}
1365 5b6dd868 Blue Swirl
1366 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1367 5b6dd868 Blue Swirl
void tb_invalidate_phys_addr(hwaddr addr)
1368 5b6dd868 Blue Swirl
{
1369 5b6dd868 Blue Swirl
    ram_addr_t ram_addr;
1370 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
1371 149f54b5 Paolo Bonzini
    hwaddr l = 1;
1372 5b6dd868 Blue Swirl
1373 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1374 5c8a00ce Paolo Bonzini
    if (!(memory_region_is_ram(mr)
1375 5c8a00ce Paolo Bonzini
          || memory_region_is_romd(mr))) {
1376 5b6dd868 Blue Swirl
        return;
1377 5b6dd868 Blue Swirl
    }
1378 5c8a00ce Paolo Bonzini
    ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1379 149f54b5 Paolo Bonzini
        + addr;
1380 5b6dd868 Blue Swirl
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1381 5b6dd868 Blue Swirl
}
1382 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1383 5b6dd868 Blue Swirl
1384 5b6dd868 Blue Swirl
void tb_check_watchpoint(CPUArchState *env)
1385 5b6dd868 Blue Swirl
{
1386 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1387 5b6dd868 Blue Swirl
1388 5b6dd868 Blue Swirl
    tb = tb_find_pc(env->mem_io_pc);
1389 5b6dd868 Blue Swirl
    if (!tb) {
1390 5b6dd868 Blue Swirl
        cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1391 5b6dd868 Blue Swirl
                  (void *)env->mem_io_pc);
1392 5b6dd868 Blue Swirl
    }
1393 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1394 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1395 5b6dd868 Blue Swirl
}
1396 5b6dd868 Blue Swirl
1397 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1398 5b6dd868 Blue Swirl
/* mask must never be zero, except for A20 change call */
1399 c3affe56 Andreas Fรคrber
static void tcg_handle_interrupt(CPUState *cpu, int mask)
1400 5b6dd868 Blue Swirl
{
1401 c3affe56 Andreas Fรคrber
    CPUArchState *env = cpu->env_ptr;
1402 5b6dd868 Blue Swirl
    int old_mask;
1403 5b6dd868 Blue Swirl
1404 259186a7 Andreas Fรคrber
    old_mask = cpu->interrupt_request;
1405 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1406 5b6dd868 Blue Swirl
1407 5b6dd868 Blue Swirl
    /*
1408 5b6dd868 Blue Swirl
     * If called from iothread context, wake the target cpu in
1409 5b6dd868 Blue Swirl
     * case its halted.
1410 5b6dd868 Blue Swirl
     */
1411 5b6dd868 Blue Swirl
    if (!qemu_cpu_is_self(cpu)) {
1412 5b6dd868 Blue Swirl
        qemu_cpu_kick(cpu);
1413 5b6dd868 Blue Swirl
        return;
1414 5b6dd868 Blue Swirl
    }
1415 5b6dd868 Blue Swirl
1416 5b6dd868 Blue Swirl
    if (use_icount) {
1417 5b6dd868 Blue Swirl
        env->icount_decr.u16.high = 0xffff;
1418 5b6dd868 Blue Swirl
        if (!can_do_io(env)
1419 5b6dd868 Blue Swirl
            && (mask & ~old_mask) != 0) {
1420 5b6dd868 Blue Swirl
            cpu_abort(env, "Raised interrupt while not in I/O function");
1421 5b6dd868 Blue Swirl
        }
1422 5b6dd868 Blue Swirl
    } else {
1423 378df4b2 Peter Maydell
        cpu->tcg_exit_req = 1;
1424 5b6dd868 Blue Swirl
    }
1425 5b6dd868 Blue Swirl
}
1426 5b6dd868 Blue Swirl
1427 5b6dd868 Blue Swirl
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1428 5b6dd868 Blue Swirl
1429 5b6dd868 Blue Swirl
/* in deterministic execution mode, instructions doing device I/Os
1430 5b6dd868 Blue Swirl
   must be at the end of the TB */
1431 5b6dd868 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1432 5b6dd868 Blue Swirl
{
1433 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1434 5b6dd868 Blue Swirl
    uint32_t n, cflags;
1435 5b6dd868 Blue Swirl
    target_ulong pc, cs_base;
1436 5b6dd868 Blue Swirl
    uint64_t flags;
1437 5b6dd868 Blue Swirl
1438 5b6dd868 Blue Swirl
    tb = tb_find_pc(retaddr);
1439 5b6dd868 Blue Swirl
    if (!tb) {
1440 5b6dd868 Blue Swirl
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1441 5b6dd868 Blue Swirl
                  (void *)retaddr);
1442 5b6dd868 Blue Swirl
    }
1443 5b6dd868 Blue Swirl
    n = env->icount_decr.u16.low + tb->icount;
1444 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, retaddr);
1445 5b6dd868 Blue Swirl
    /* Calculate how many instructions had been executed before the fault
1446 5b6dd868 Blue Swirl
       occurred.  */
1447 5b6dd868 Blue Swirl
    n = n - env->icount_decr.u16.low;
1448 5b6dd868 Blue Swirl
    /* Generate a new TB ending on the I/O insn.  */
1449 5b6dd868 Blue Swirl
    n++;
1450 5b6dd868 Blue Swirl
    /* On MIPS and SH, delay slot instructions can only be restarted if
1451 5b6dd868 Blue Swirl
       they were already the first instruction in the TB.  If this is not
1452 5b6dd868 Blue Swirl
       the first instruction in a TB then re-execute the preceding
1453 5b6dd868 Blue Swirl
       branch.  */
1454 5b6dd868 Blue Swirl
#if defined(TARGET_MIPS)
1455 5b6dd868 Blue Swirl
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1456 5b6dd868 Blue Swirl
        env->active_tc.PC -= 4;
1457 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1458 5b6dd868 Blue Swirl
        env->hflags &= ~MIPS_HFLAG_BMASK;
1459 5b6dd868 Blue Swirl
    }
1460 5b6dd868 Blue Swirl
#elif defined(TARGET_SH4)
1461 5b6dd868 Blue Swirl
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1462 5b6dd868 Blue Swirl
            && n > 1) {
1463 5b6dd868 Blue Swirl
        env->pc -= 2;
1464 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1465 5b6dd868 Blue Swirl
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1466 5b6dd868 Blue Swirl
    }
1467 5b6dd868 Blue Swirl
#endif
1468 5b6dd868 Blue Swirl
    /* This should never happen.  */
1469 5b6dd868 Blue Swirl
    if (n > CF_COUNT_MASK) {
1470 5b6dd868 Blue Swirl
        cpu_abort(env, "TB too big during recompile");
1471 5b6dd868 Blue Swirl
    }
1472 5b6dd868 Blue Swirl
1473 5b6dd868 Blue Swirl
    cflags = n | CF_LAST_IO;
1474 5b6dd868 Blue Swirl
    pc = tb->pc;
1475 5b6dd868 Blue Swirl
    cs_base = tb->cs_base;
1476 5b6dd868 Blue Swirl
    flags = tb->flags;
1477 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1478 5b6dd868 Blue Swirl
    /* FIXME: In theory this could raise an exception.  In practice
1479 5b6dd868 Blue Swirl
       we have already translated the block once so it's probably ok.  */
1480 5b6dd868 Blue Swirl
    tb_gen_code(env, pc, cs_base, flags, cflags);
1481 5b6dd868 Blue Swirl
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1482 5b6dd868 Blue Swirl
       the first in the TB) then we end up generating a whole new TB and
1483 5b6dd868 Blue Swirl
       repeating the fault, which is horribly inefficient.
1484 5b6dd868 Blue Swirl
       Better would be to execute just this insn uncached, or generate a
1485 5b6dd868 Blue Swirl
       second new TB.  */
1486 5b6dd868 Blue Swirl
    cpu_resume_from_signal(env, NULL);
1487 5b6dd868 Blue Swirl
}
1488 5b6dd868 Blue Swirl
1489 5b6dd868 Blue Swirl
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1490 5b6dd868 Blue Swirl
{
1491 5b6dd868 Blue Swirl
    unsigned int i;
1492 5b6dd868 Blue Swirl
1493 5b6dd868 Blue Swirl
    /* Discard jump cache entries for any tb which might potentially
1494 5b6dd868 Blue Swirl
       overlap the flushed page.  */
1495 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1496 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1497 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1498 5b6dd868 Blue Swirl
1499 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr);
1500 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1501 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1502 5b6dd868 Blue Swirl
}
1503 5b6dd868 Blue Swirl
1504 5b6dd868 Blue Swirl
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1505 5b6dd868 Blue Swirl
{
1506 5b6dd868 Blue Swirl
    int i, target_code_size, max_target_code_size;
1507 5b6dd868 Blue Swirl
    int direct_jmp_count, direct_jmp2_count, cross_page;
1508 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1509 5b6dd868 Blue Swirl
1510 5b6dd868 Blue Swirl
    target_code_size = 0;
1511 5b6dd868 Blue Swirl
    max_target_code_size = 0;
1512 5b6dd868 Blue Swirl
    cross_page = 0;
1513 5b6dd868 Blue Swirl
    direct_jmp_count = 0;
1514 5b6dd868 Blue Swirl
    direct_jmp2_count = 0;
1515 5e5f07e0 Evgeny Voevodin
    for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1516 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[i];
1517 5b6dd868 Blue Swirl
        target_code_size += tb->size;
1518 5b6dd868 Blue Swirl
        if (tb->size > max_target_code_size) {
1519 5b6dd868 Blue Swirl
            max_target_code_size = tb->size;
1520 5b6dd868 Blue Swirl
        }
1521 5b6dd868 Blue Swirl
        if (tb->page_addr[1] != -1) {
1522 5b6dd868 Blue Swirl
            cross_page++;
1523 5b6dd868 Blue Swirl
        }
1524 5b6dd868 Blue Swirl
        if (tb->tb_next_offset[0] != 0xffff) {
1525 5b6dd868 Blue Swirl
            direct_jmp_count++;
1526 5b6dd868 Blue Swirl
            if (tb->tb_next_offset[1] != 0xffff) {
1527 5b6dd868 Blue Swirl
                direct_jmp2_count++;
1528 5b6dd868 Blue Swirl
            }
1529 5b6dd868 Blue Swirl
        }
1530 5b6dd868 Blue Swirl
    }
1531 5b6dd868 Blue Swirl
    /* XXX: avoid using doubles ? */
1532 5b6dd868 Blue Swirl
    cpu_fprintf(f, "Translation buffer state:\n");
1533 5b6dd868 Blue Swirl
    cpu_fprintf(f, "gen code size       %td/%zd\n",
1534 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1535 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_buffer_max_size);
1536 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB count            %d/%d\n",
1537 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1538 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1539 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1540 5e5f07e0 Evgeny Voevodin
                    tcg_ctx.tb_ctx.nb_tbs : 0,
1541 5e5f07e0 Evgeny Voevodin
            max_target_code_size);
1542 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1543 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1544 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.code_gen_buffer) /
1545 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.tb_ctx.nb_tbs : 0,
1546 5e5f07e0 Evgeny Voevodin
                target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1547 5e5f07e0 Evgeny Voevodin
                                             tcg_ctx.code_gen_buffer) /
1548 5e5f07e0 Evgeny Voevodin
                                             target_code_size : 0);
1549 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1550 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1551 5e5f07e0 Evgeny Voevodin
                                    tcg_ctx.tb_ctx.nb_tbs : 0);
1552 5b6dd868 Blue Swirl
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1553 5b6dd868 Blue Swirl
                direct_jmp_count,
1554 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1555 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0,
1556 5b6dd868 Blue Swirl
                direct_jmp2_count,
1557 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1558 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0);
1559 5b6dd868 Blue Swirl
    cpu_fprintf(f, "\nStatistics:\n");
1560 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1561 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB invalidate count %d\n",
1562 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1563 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1564 5b6dd868 Blue Swirl
    tcg_dump_info(f, cpu_fprintf);
1565 5b6dd868 Blue Swirl
}
1566 5b6dd868 Blue Swirl
1567 5b6dd868 Blue Swirl
#else /* CONFIG_USER_ONLY */
1568 5b6dd868 Blue Swirl
1569 c3affe56 Andreas Fรคrber
void cpu_interrupt(CPUState *cpu, int mask)
1570 5b6dd868 Blue Swirl
{
1571 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1572 378df4b2 Peter Maydell
    cpu->tcg_exit_req = 1;
1573 5b6dd868 Blue Swirl
}
1574 5b6dd868 Blue Swirl
1575 5b6dd868 Blue Swirl
/*
1576 5b6dd868 Blue Swirl
 * Walks guest process memory "regions" one by one
1577 5b6dd868 Blue Swirl
 * and calls callback function 'fn' for each region.
1578 5b6dd868 Blue Swirl
 */
1579 5b6dd868 Blue Swirl
struct walk_memory_regions_data {
1580 5b6dd868 Blue Swirl
    walk_memory_regions_fn fn;
1581 5b6dd868 Blue Swirl
    void *priv;
1582 5b6dd868 Blue Swirl
    uintptr_t start;
1583 5b6dd868 Blue Swirl
    int prot;
1584 5b6dd868 Blue Swirl
};
1585 5b6dd868 Blue Swirl
1586 5b6dd868 Blue Swirl
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1587 5b6dd868 Blue Swirl
                                   abi_ulong end, int new_prot)
1588 5b6dd868 Blue Swirl
{
1589 5b6dd868 Blue Swirl
    if (data->start != -1ul) {
1590 5b6dd868 Blue Swirl
        int rc = data->fn(data->priv, data->start, end, data->prot);
1591 5b6dd868 Blue Swirl
        if (rc != 0) {
1592 5b6dd868 Blue Swirl
            return rc;
1593 5b6dd868 Blue Swirl
        }
1594 5b6dd868 Blue Swirl
    }
1595 5b6dd868 Blue Swirl
1596 5b6dd868 Blue Swirl
    data->start = (new_prot ? end : -1ul);
1597 5b6dd868 Blue Swirl
    data->prot = new_prot;
1598 5b6dd868 Blue Swirl
1599 5b6dd868 Blue Swirl
    return 0;
1600 5b6dd868 Blue Swirl
}
1601 5b6dd868 Blue Swirl
1602 5b6dd868 Blue Swirl
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1603 5b6dd868 Blue Swirl
                                 abi_ulong base, int level, void **lp)
1604 5b6dd868 Blue Swirl
{
1605 5b6dd868 Blue Swirl
    abi_ulong pa;
1606 5b6dd868 Blue Swirl
    int i, rc;
1607 5b6dd868 Blue Swirl
1608 5b6dd868 Blue Swirl
    if (*lp == NULL) {
1609 5b6dd868 Blue Swirl
        return walk_memory_regions_end(data, base, 0);
1610 5b6dd868 Blue Swirl
    }
1611 5b6dd868 Blue Swirl
1612 5b6dd868 Blue Swirl
    if (level == 0) {
1613 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
1614 5b6dd868 Blue Swirl
1615 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
1616 5b6dd868 Blue Swirl
            int prot = pd[i].flags;
1617 5b6dd868 Blue Swirl
1618 5b6dd868 Blue Swirl
            pa = base | (i << TARGET_PAGE_BITS);
1619 5b6dd868 Blue Swirl
            if (prot != data->prot) {
1620 5b6dd868 Blue Swirl
                rc = walk_memory_regions_end(data, pa, prot);
1621 5b6dd868 Blue Swirl
                if (rc != 0) {
1622 5b6dd868 Blue Swirl
                    return rc;
1623 5b6dd868 Blue Swirl
                }
1624 5b6dd868 Blue Swirl
            }
1625 5b6dd868 Blue Swirl
        }
1626 5b6dd868 Blue Swirl
    } else {
1627 5b6dd868 Blue Swirl
        void **pp = *lp;
1628 5b6dd868 Blue Swirl
1629 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
1630 5b6dd868 Blue Swirl
            pa = base | ((abi_ulong)i <<
1631 5b6dd868 Blue Swirl
                (TARGET_PAGE_BITS + L2_BITS * level));
1632 5b6dd868 Blue Swirl
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1633 5b6dd868 Blue Swirl
            if (rc != 0) {
1634 5b6dd868 Blue Swirl
                return rc;
1635 5b6dd868 Blue Swirl
            }
1636 5b6dd868 Blue Swirl
        }
1637 5b6dd868 Blue Swirl
    }
1638 5b6dd868 Blue Swirl
1639 5b6dd868 Blue Swirl
    return 0;
1640 5b6dd868 Blue Swirl
}
1641 5b6dd868 Blue Swirl
1642 5b6dd868 Blue Swirl
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1643 5b6dd868 Blue Swirl
{
1644 5b6dd868 Blue Swirl
    struct walk_memory_regions_data data;
1645 5b6dd868 Blue Swirl
    uintptr_t i;
1646 5b6dd868 Blue Swirl
1647 5b6dd868 Blue Swirl
    data.fn = fn;
1648 5b6dd868 Blue Swirl
    data.priv = priv;
1649 5b6dd868 Blue Swirl
    data.start = -1ul;
1650 5b6dd868 Blue Swirl
    data.prot = 0;
1651 5b6dd868 Blue Swirl
1652 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
1653 5b6dd868 Blue Swirl
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1654 5b6dd868 Blue Swirl
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1655 5b6dd868 Blue Swirl
1656 5b6dd868 Blue Swirl
        if (rc != 0) {
1657 5b6dd868 Blue Swirl
            return rc;
1658 5b6dd868 Blue Swirl
        }
1659 5b6dd868 Blue Swirl
    }
1660 5b6dd868 Blue Swirl
1661 5b6dd868 Blue Swirl
    return walk_memory_regions_end(&data, 0, 0);
1662 5b6dd868 Blue Swirl
}
1663 5b6dd868 Blue Swirl
1664 5b6dd868 Blue Swirl
static int dump_region(void *priv, abi_ulong start,
1665 5b6dd868 Blue Swirl
    abi_ulong end, unsigned long prot)
1666 5b6dd868 Blue Swirl
{
1667 5b6dd868 Blue Swirl
    FILE *f = (FILE *)priv;
1668 5b6dd868 Blue Swirl
1669 5b6dd868 Blue Swirl
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1670 5b6dd868 Blue Swirl
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
1671 5b6dd868 Blue Swirl
        start, end, end - start,
1672 5b6dd868 Blue Swirl
        ((prot & PAGE_READ) ? 'r' : '-'),
1673 5b6dd868 Blue Swirl
        ((prot & PAGE_WRITE) ? 'w' : '-'),
1674 5b6dd868 Blue Swirl
        ((prot & PAGE_EXEC) ? 'x' : '-'));
1675 5b6dd868 Blue Swirl
1676 5b6dd868 Blue Swirl
    return 0;
1677 5b6dd868 Blue Swirl
}
1678 5b6dd868 Blue Swirl
1679 5b6dd868 Blue Swirl
/* dump memory mappings */
1680 5b6dd868 Blue Swirl
void page_dump(FILE *f)
1681 5b6dd868 Blue Swirl
{
1682 5b6dd868 Blue Swirl
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
1683 5b6dd868 Blue Swirl
            "start", "end", "size", "prot");
1684 5b6dd868 Blue Swirl
    walk_memory_regions(f, dump_region);
1685 5b6dd868 Blue Swirl
}
1686 5b6dd868 Blue Swirl
1687 5b6dd868 Blue Swirl
int page_get_flags(target_ulong address)
1688 5b6dd868 Blue Swirl
{
1689 5b6dd868 Blue Swirl
    PageDesc *p;
1690 5b6dd868 Blue Swirl
1691 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1692 5b6dd868 Blue Swirl
    if (!p) {
1693 5b6dd868 Blue Swirl
        return 0;
1694 5b6dd868 Blue Swirl
    }
1695 5b6dd868 Blue Swirl
    return p->flags;
1696 5b6dd868 Blue Swirl
}
1697 5b6dd868 Blue Swirl
1698 5b6dd868 Blue Swirl
/* Modify the flags of a page and invalidate the code if necessary.
1699 5b6dd868 Blue Swirl
   The flag PAGE_WRITE_ORG is positioned automatically depending
1700 5b6dd868 Blue Swirl
   on PAGE_WRITE.  The mmap_lock should already be held.  */
1701 5b6dd868 Blue Swirl
void page_set_flags(target_ulong start, target_ulong end, int flags)
1702 5b6dd868 Blue Swirl
{
1703 5b6dd868 Blue Swirl
    target_ulong addr, len;
1704 5b6dd868 Blue Swirl
1705 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1706 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1707 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1708 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1709 5b6dd868 Blue Swirl
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1710 5b6dd868 Blue Swirl
#endif
1711 5b6dd868 Blue Swirl
    assert(start < end);
1712 5b6dd868 Blue Swirl
1713 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1714 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(end);
1715 5b6dd868 Blue Swirl
1716 5b6dd868 Blue Swirl
    if (flags & PAGE_WRITE) {
1717 5b6dd868 Blue Swirl
        flags |= PAGE_WRITE_ORG;
1718 5b6dd868 Blue Swirl
    }
1719 5b6dd868 Blue Swirl
1720 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1721 5b6dd868 Blue Swirl
         len != 0;
1722 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1723 5b6dd868 Blue Swirl
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1724 5b6dd868 Blue Swirl
1725 5b6dd868 Blue Swirl
        /* If the write protection bit is set, then we invalidate
1726 5b6dd868 Blue Swirl
           the code inside.  */
1727 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_WRITE) &&
1728 5b6dd868 Blue Swirl
            (flags & PAGE_WRITE) &&
1729 5b6dd868 Blue Swirl
            p->first_tb) {
1730 d02532f0 Alexander Graf
            tb_invalidate_phys_page(addr, 0, NULL, false);
1731 5b6dd868 Blue Swirl
        }
1732 5b6dd868 Blue Swirl
        p->flags = flags;
1733 5b6dd868 Blue Swirl
    }
1734 5b6dd868 Blue Swirl
}
1735 5b6dd868 Blue Swirl
1736 5b6dd868 Blue Swirl
int page_check_range(target_ulong start, target_ulong len, int flags)
1737 5b6dd868 Blue Swirl
{
1738 5b6dd868 Blue Swirl
    PageDesc *p;
1739 5b6dd868 Blue Swirl
    target_ulong end;
1740 5b6dd868 Blue Swirl
    target_ulong addr;
1741 5b6dd868 Blue Swirl
1742 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1743 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1744 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1745 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1746 5b6dd868 Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1747 5b6dd868 Blue Swirl
#endif
1748 5b6dd868 Blue Swirl
1749 5b6dd868 Blue Swirl
    if (len == 0) {
1750 5b6dd868 Blue Swirl
        return 0;
1751 5b6dd868 Blue Swirl
    }
1752 5b6dd868 Blue Swirl
    if (start + len - 1 < start) {
1753 5b6dd868 Blue Swirl
        /* We've wrapped around.  */
1754 5b6dd868 Blue Swirl
        return -1;
1755 5b6dd868 Blue Swirl
    }
1756 5b6dd868 Blue Swirl
1757 5b6dd868 Blue Swirl
    /* must do before we loose bits in the next step */
1758 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(start + len);
1759 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1760 5b6dd868 Blue Swirl
1761 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1762 5b6dd868 Blue Swirl
         len != 0;
1763 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1764 5b6dd868 Blue Swirl
        p = page_find(addr >> TARGET_PAGE_BITS);
1765 5b6dd868 Blue Swirl
        if (!p) {
1766 5b6dd868 Blue Swirl
            return -1;
1767 5b6dd868 Blue Swirl
        }
1768 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_VALID)) {
1769 5b6dd868 Blue Swirl
            return -1;
1770 5b6dd868 Blue Swirl
        }
1771 5b6dd868 Blue Swirl
1772 5b6dd868 Blue Swirl
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1773 5b6dd868 Blue Swirl
            return -1;
1774 5b6dd868 Blue Swirl
        }
1775 5b6dd868 Blue Swirl
        if (flags & PAGE_WRITE) {
1776 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE_ORG)) {
1777 5b6dd868 Blue Swirl
                return -1;
1778 5b6dd868 Blue Swirl
            }
1779 5b6dd868 Blue Swirl
            /* unprotect the page if it was put read-only because it
1780 5b6dd868 Blue Swirl
               contains translated code */
1781 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE)) {
1782 5b6dd868 Blue Swirl
                if (!page_unprotect(addr, 0, NULL)) {
1783 5b6dd868 Blue Swirl
                    return -1;
1784 5b6dd868 Blue Swirl
                }
1785 5b6dd868 Blue Swirl
            }
1786 5b6dd868 Blue Swirl
            return 0;
1787 5b6dd868 Blue Swirl
        }
1788 5b6dd868 Blue Swirl
    }
1789 5b6dd868 Blue Swirl
    return 0;
1790 5b6dd868 Blue Swirl
}
1791 5b6dd868 Blue Swirl
1792 5b6dd868 Blue Swirl
/* called from signal handler: invalidate the code and unprotect the
1793 5b6dd868 Blue Swirl
   page. Return TRUE if the fault was successfully handled. */
1794 5b6dd868 Blue Swirl
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1795 5b6dd868 Blue Swirl
{
1796 5b6dd868 Blue Swirl
    unsigned int prot;
1797 5b6dd868 Blue Swirl
    PageDesc *p;
1798 5b6dd868 Blue Swirl
    target_ulong host_start, host_end, addr;
1799 5b6dd868 Blue Swirl
1800 5b6dd868 Blue Swirl
    /* Technically this isn't safe inside a signal handler.  However we
1801 5b6dd868 Blue Swirl
       know this only ever happens in a synchronous SEGV handler, so in
1802 5b6dd868 Blue Swirl
       practice it seems to be ok.  */
1803 5b6dd868 Blue Swirl
    mmap_lock();
1804 5b6dd868 Blue Swirl
1805 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1806 5b6dd868 Blue Swirl
    if (!p) {
1807 5b6dd868 Blue Swirl
        mmap_unlock();
1808 5b6dd868 Blue Swirl
        return 0;
1809 5b6dd868 Blue Swirl
    }
1810 5b6dd868 Blue Swirl
1811 5b6dd868 Blue Swirl
    /* if the page was really writable, then we change its
1812 5b6dd868 Blue Swirl
       protection back to writable */
1813 5b6dd868 Blue Swirl
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1814 5b6dd868 Blue Swirl
        host_start = address & qemu_host_page_mask;
1815 5b6dd868 Blue Swirl
        host_end = host_start + qemu_host_page_size;
1816 5b6dd868 Blue Swirl
1817 5b6dd868 Blue Swirl
        prot = 0;
1818 5b6dd868 Blue Swirl
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1819 5b6dd868 Blue Swirl
            p = page_find(addr >> TARGET_PAGE_BITS);
1820 5b6dd868 Blue Swirl
            p->flags |= PAGE_WRITE;
1821 5b6dd868 Blue Swirl
            prot |= p->flags;
1822 5b6dd868 Blue Swirl
1823 5b6dd868 Blue Swirl
            /* and since the content will be modified, we must invalidate
1824 5b6dd868 Blue Swirl
               the corresponding translated code. */
1825 d02532f0 Alexander Graf
            tb_invalidate_phys_page(addr, pc, puc, true);
1826 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1827 5b6dd868 Blue Swirl
            tb_invalidate_check(addr);
1828 5b6dd868 Blue Swirl
#endif
1829 5b6dd868 Blue Swirl
        }
1830 5b6dd868 Blue Swirl
        mprotect((void *)g2h(host_start), qemu_host_page_size,
1831 5b6dd868 Blue Swirl
                 prot & PAGE_BITS);
1832 5b6dd868 Blue Swirl
1833 5b6dd868 Blue Swirl
        mmap_unlock();
1834 5b6dd868 Blue Swirl
        return 1;
1835 5b6dd868 Blue Swirl
    }
1836 5b6dd868 Blue Swirl
    mmap_unlock();
1837 5b6dd868 Blue Swirl
    return 0;
1838 5b6dd868 Blue Swirl
}
1839 5b6dd868 Blue Swirl
#endif /* CONFIG_USER_ONLY */