Statistics
| Branch: | Revision:

root / translate-all.c @ c09b437b

History | View | Annotate | Download (53.5 kB)

1 d19893da bellard
/*
2 d19893da bellard
 *  Host code generation
3 5fafdf24 ths
 *
4 d19893da bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 d19893da bellard
 *
6 d19893da bellard
 * This library is free software; you can redistribute it and/or
7 d19893da bellard
 * modify it under the terms of the GNU Lesser General Public
8 d19893da bellard
 * License as published by the Free Software Foundation; either
9 d19893da bellard
 * version 2 of the License, or (at your option) any later version.
10 d19893da bellard
 *
11 d19893da bellard
 * This library is distributed in the hope that it will be useful,
12 d19893da bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 d19893da bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 d19893da bellard
 * Lesser General Public License for more details.
15 d19893da bellard
 *
16 d19893da bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 d19893da bellard
 */
19 5b6dd868 Blue Swirl
#ifdef _WIN32
20 5b6dd868 Blue Swirl
#include <windows.h>
21 5b6dd868 Blue Swirl
#else
22 5b6dd868 Blue Swirl
#include <sys/types.h>
23 5b6dd868 Blue Swirl
#include <sys/mman.h>
24 5b6dd868 Blue Swirl
#endif
25 d19893da bellard
#include <stdarg.h>
26 d19893da bellard
#include <stdlib.h>
27 d19893da bellard
#include <stdio.h>
28 d19893da bellard
#include <string.h>
29 d19893da bellard
#include <inttypes.h>
30 d19893da bellard
31 d19893da bellard
#include "config.h"
32 2054396a bellard
33 5b6dd868 Blue Swirl
#include "qemu-common.h"
34 af5ad107 bellard
#define NO_CPU_IO_DEFS
35 d3eead2e bellard
#include "cpu.h"
36 76cad711 Paolo Bonzini
#include "disas/disas.h"
37 57fec1fe bellard
#include "tcg.h"
38 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
39 022c62cb Paolo Bonzini
#include "exec/memory.h"
40 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
41 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
42 5b6dd868 Blue Swirl
#include "qemu.h"
43 5b6dd868 Blue Swirl
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
44 5b6dd868 Blue Swirl
#include <sys/param.h>
45 5b6dd868 Blue Swirl
#if __FreeBSD_version >= 700104
46 5b6dd868 Blue Swirl
#define HAVE_KINFO_GETVMMAP
47 5b6dd868 Blue Swirl
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
48 5b6dd868 Blue Swirl
#include <sys/time.h>
49 5b6dd868 Blue Swirl
#include <sys/proc.h>
50 5b6dd868 Blue Swirl
#include <machine/profile.h>
51 5b6dd868 Blue Swirl
#define _KERNEL
52 5b6dd868 Blue Swirl
#include <sys/user.h>
53 5b6dd868 Blue Swirl
#undef _KERNEL
54 5b6dd868 Blue Swirl
#undef sigqueue
55 5b6dd868 Blue Swirl
#include <libutil.h>
56 5b6dd868 Blue Swirl
#endif
57 5b6dd868 Blue Swirl
#endif
58 5b6dd868 Blue Swirl
#endif
59 5b6dd868 Blue Swirl
60 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
61 5b6dd868 Blue Swirl
#include "translate-all.h"
62 5b6dd868 Blue Swirl
63 5b6dd868 Blue Swirl
//#define DEBUG_TB_INVALIDATE
64 5b6dd868 Blue Swirl
//#define DEBUG_FLUSH
65 5b6dd868 Blue Swirl
/* make various TB consistency checks */
66 5b6dd868 Blue Swirl
//#define DEBUG_TB_CHECK
67 5b6dd868 Blue Swirl
68 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
69 5b6dd868 Blue Swirl
/* TB consistency checks only implemented for usermode emulation.  */
70 5b6dd868 Blue Swirl
#undef DEBUG_TB_CHECK
71 5b6dd868 Blue Swirl
#endif
72 5b6dd868 Blue Swirl
73 5b6dd868 Blue Swirl
#define SMC_BITMAP_USE_THRESHOLD 10
74 5b6dd868 Blue Swirl
75 5b6dd868 Blue Swirl
typedef struct PageDesc {
76 5b6dd868 Blue Swirl
    /* list of TBs intersecting this ram page */
77 5b6dd868 Blue Swirl
    TranslationBlock *first_tb;
78 5b6dd868 Blue Swirl
    /* in order to optimize self modifying code, we count the number
79 5b6dd868 Blue Swirl
       of lookups we do to a given page to use a bitmap */
80 5b6dd868 Blue Swirl
    unsigned int code_write_count;
81 5b6dd868 Blue Swirl
    uint8_t *code_bitmap;
82 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
83 5b6dd868 Blue Swirl
    unsigned long flags;
84 5b6dd868 Blue Swirl
#endif
85 5b6dd868 Blue Swirl
} PageDesc;
86 5b6dd868 Blue Swirl
87 5b6dd868 Blue Swirl
/* In system mode we want L1_MAP to be based on ram offsets,
88 5b6dd868 Blue Swirl
   while in user mode we want it to be based on virtual addresses.  */
89 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
90 5b6dd868 Blue Swirl
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
92 5b6dd868 Blue Swirl
#else
93 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
94 5b6dd868 Blue Swirl
#endif
95 5b6dd868 Blue Swirl
#else
96 5b6dd868 Blue Swirl
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
97 5b6dd868 Blue Swirl
#endif
98 5b6dd868 Blue Swirl
99 5b6dd868 Blue Swirl
/* The bits remaining after N lower levels of page tables.  */
100 5b6dd868 Blue Swirl
#define V_L1_BITS_REM \
101 5b6dd868 Blue Swirl
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
102 5b6dd868 Blue Swirl
103 5b6dd868 Blue Swirl
#if V_L1_BITS_REM < 4
104 5b6dd868 Blue Swirl
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
105 5b6dd868 Blue Swirl
#else
106 5b6dd868 Blue Swirl
#define V_L1_BITS  V_L1_BITS_REM
107 5b6dd868 Blue Swirl
#endif
108 5b6dd868 Blue Swirl
109 5b6dd868 Blue Swirl
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
110 5b6dd868 Blue Swirl
111 5b6dd868 Blue Swirl
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
112 5b6dd868 Blue Swirl
113 5b6dd868 Blue Swirl
uintptr_t qemu_real_host_page_size;
114 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_size;
115 5b6dd868 Blue Swirl
uintptr_t qemu_host_page_mask;
116 5b6dd868 Blue Swirl
117 5b6dd868 Blue Swirl
/* This is a multi-level map on the virtual address space.
118 5b6dd868 Blue Swirl
   The bottom level has pointers to PageDesc.  */
119 5b6dd868 Blue Swirl
static void *l1_map[V_L1_SIZE];
120 5b6dd868 Blue Swirl
121 57fec1fe bellard
/* code generation context */
122 57fec1fe bellard
TCGContext tcg_ctx;
123 d19893da bellard
124 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2);
126 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
127 5b6dd868 Blue Swirl
128 57fec1fe bellard
void cpu_gen_init(void)
129 57fec1fe bellard
{
130 57fec1fe bellard
    tcg_context_init(&tcg_ctx); 
131 57fec1fe bellard
}
132 57fec1fe bellard
133 d19893da bellard
/* return non zero if the very first instruction is invalid so that
134 5fafdf24 ths
   the virtual CPU can trigger an exception.
135 d19893da bellard

136 d19893da bellard
   '*gen_code_size_ptr' contains the size of the generated code (host
137 d19893da bellard
   code).
138 d19893da bellard
*/
139 9349b4f9 Andreas Fรคrber
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
140 d19893da bellard
{
141 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
142 d19893da bellard
    uint8_t *gen_code_buf;
143 d19893da bellard
    int gen_code_size;
144 57fec1fe bellard
#ifdef CONFIG_PROFILER
145 57fec1fe bellard
    int64_t ti;
146 57fec1fe bellard
#endif
147 57fec1fe bellard
148 57fec1fe bellard
#ifdef CONFIG_PROFILER
149 b67d9a52 bellard
    s->tb_count1++; /* includes aborted translations because of
150 b67d9a52 bellard
                       exceptions */
151 57fec1fe bellard
    ti = profile_getclock();
152 57fec1fe bellard
#endif
153 57fec1fe bellard
    tcg_func_start(s);
154 d19893da bellard
155 2cfc5f17 ths
    gen_intermediate_code(env, tb);
156 2cfc5f17 ths
157 ec6338ba bellard
    /* generate machine code */
158 57fec1fe bellard
    gen_code_buf = tb->tc_ptr;
159 ec6338ba bellard
    tb->tb_next_offset[0] = 0xffff;
160 ec6338ba bellard
    tb->tb_next_offset[1] = 0xffff;
161 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
162 4cbb86e1 bellard
#ifdef USE_DIRECT_JUMP
163 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
164 57fec1fe bellard
    s->tb_next = NULL;
165 d19893da bellard
#else
166 57fec1fe bellard
    s->tb_jmp_offset = NULL;
167 57fec1fe bellard
    s->tb_next = tb->tb_next;
168 d19893da bellard
#endif
169 57fec1fe bellard
170 57fec1fe bellard
#ifdef CONFIG_PROFILER
171 b67d9a52 bellard
    s->tb_count++;
172 b67d9a52 bellard
    s->interm_time += profile_getclock() - ti;
173 b67d9a52 bellard
    s->code_time -= profile_getclock();
174 57fec1fe bellard
#endif
175 54604f74 aurel32
    gen_code_size = tcg_gen_code(s, gen_code_buf);
176 d19893da bellard
    *gen_code_size_ptr = gen_code_size;
177 57fec1fe bellard
#ifdef CONFIG_PROFILER
178 b67d9a52 bellard
    s->code_time += profile_getclock();
179 b67d9a52 bellard
    s->code_in_len += tb->size;
180 b67d9a52 bellard
    s->code_out_len += gen_code_size;
181 57fec1fe bellard
#endif
182 57fec1fe bellard
183 d19893da bellard
#ifdef DEBUG_DISAS
184 8fec2b8c aliguori
    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
185 93fcfe39 aliguori
        qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186 93fcfe39 aliguori
        log_disas(tb->tc_ptr, *gen_code_size_ptr);
187 93fcfe39 aliguori
        qemu_log("\n");
188 31b1a7b4 aliguori
        qemu_log_flush();
189 d19893da bellard
    }
190 d19893da bellard
#endif
191 d19893da bellard
    return 0;
192 d19893da bellard
}
193 d19893da bellard
194 5fafdf24 ths
/* The cpu state corresponding to 'searched_pc' is restored.
195 d19893da bellard
 */
196 a8a826a3 Blue Swirl
static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197 a8a826a3 Blue Swirl
                                     uintptr_t searched_pc)
198 d19893da bellard
{
199 57fec1fe bellard
    TCGContext *s = &tcg_ctx;
200 57fec1fe bellard
    int j;
201 6375e09e Stefan Weil
    uintptr_t tc_ptr;
202 57fec1fe bellard
#ifdef CONFIG_PROFILER
203 57fec1fe bellard
    int64_t ti;
204 57fec1fe bellard
#endif
205 57fec1fe bellard
206 57fec1fe bellard
#ifdef CONFIG_PROFILER
207 57fec1fe bellard
    ti = profile_getclock();
208 57fec1fe bellard
#endif
209 57fec1fe bellard
    tcg_func_start(s);
210 d19893da bellard
211 2cfc5f17 ths
    gen_intermediate_code_pc(env, tb);
212 3b46e624 ths
213 2e70f6ef pbrook
    if (use_icount) {
214 2e70f6ef pbrook
        /* Reset the cycle counter to the start of the block.  */
215 2e70f6ef pbrook
        env->icount_decr.u16.low += tb->icount;
216 2e70f6ef pbrook
        /* Clear the IO flag.  */
217 2e70f6ef pbrook
        env->can_do_io = 0;
218 2e70f6ef pbrook
    }
219 2e70f6ef pbrook
220 d19893da bellard
    /* find opc index corresponding to search_pc */
221 6375e09e Stefan Weil
    tc_ptr = (uintptr_t)tb->tc_ptr;
222 d19893da bellard
    if (searched_pc < tc_ptr)
223 d19893da bellard
        return -1;
224 57fec1fe bellard
225 57fec1fe bellard
    s->tb_next_offset = tb->tb_next_offset;
226 57fec1fe bellard
#ifdef USE_DIRECT_JUMP
227 57fec1fe bellard
    s->tb_jmp_offset = tb->tb_jmp_offset;
228 57fec1fe bellard
    s->tb_next = NULL;
229 57fec1fe bellard
#else
230 57fec1fe bellard
    s->tb_jmp_offset = NULL;
231 57fec1fe bellard
    s->tb_next = tb->tb_next;
232 57fec1fe bellard
#endif
233 54604f74 aurel32
    j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
234 57fec1fe bellard
    if (j < 0)
235 57fec1fe bellard
        return -1;
236 d19893da bellard
    /* now find start of instruction before */
237 ab1103de Evgeny Voevodin
    while (s->gen_opc_instr_start[j] == 0) {
238 d19893da bellard
        j--;
239 ab1103de Evgeny Voevodin
    }
240 c9c99c22 Evgeny Voevodin
    env->icount_decr.u16.low -= s->gen_opc_icount[j];
241 3b46e624 ths
242 e87b7cb0 Stefan Weil
    restore_state_to_opc(env, tb, j);
243 57fec1fe bellard
244 57fec1fe bellard
#ifdef CONFIG_PROFILER
245 b67d9a52 bellard
    s->restore_time += profile_getclock() - ti;
246 b67d9a52 bellard
    s->restore_count++;
247 57fec1fe bellard
#endif
248 d19893da bellard
    return 0;
249 d19893da bellard
}
250 5b6dd868 Blue Swirl
251 a8a826a3 Blue Swirl
bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
252 a8a826a3 Blue Swirl
{
253 a8a826a3 Blue Swirl
    TranslationBlock *tb;
254 a8a826a3 Blue Swirl
255 a8a826a3 Blue Swirl
    tb = tb_find_pc(retaddr);
256 a8a826a3 Blue Swirl
    if (tb) {
257 a8a826a3 Blue Swirl
        cpu_restore_state_from_tb(tb, env, retaddr);
258 a8a826a3 Blue Swirl
        return true;
259 a8a826a3 Blue Swirl
    }
260 a8a826a3 Blue Swirl
    return false;
261 a8a826a3 Blue Swirl
}
262 a8a826a3 Blue Swirl
263 5b6dd868 Blue Swirl
#ifdef _WIN32
264 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
265 5b6dd868 Blue Swirl
{
266 5b6dd868 Blue Swirl
    DWORD old_protect;
267 5b6dd868 Blue Swirl
    VirtualProtect(addr, size,
268 5b6dd868 Blue Swirl
                   PAGE_EXECUTE_READWRITE, &old_protect);
269 5b6dd868 Blue Swirl
}
270 5b6dd868 Blue Swirl
#else
271 5b6dd868 Blue Swirl
static inline void map_exec(void *addr, long size)
272 5b6dd868 Blue Swirl
{
273 5b6dd868 Blue Swirl
    unsigned long start, end, page_size;
274 5b6dd868 Blue Swirl
275 5b6dd868 Blue Swirl
    page_size = getpagesize();
276 5b6dd868 Blue Swirl
    start = (unsigned long)addr;
277 5b6dd868 Blue Swirl
    start &= ~(page_size - 1);
278 5b6dd868 Blue Swirl
279 5b6dd868 Blue Swirl
    end = (unsigned long)addr + size;
280 5b6dd868 Blue Swirl
    end += page_size - 1;
281 5b6dd868 Blue Swirl
    end &= ~(page_size - 1);
282 5b6dd868 Blue Swirl
283 5b6dd868 Blue Swirl
    mprotect((void *)start, end - start,
284 5b6dd868 Blue Swirl
             PROT_READ | PROT_WRITE | PROT_EXEC);
285 5b6dd868 Blue Swirl
}
286 5b6dd868 Blue Swirl
#endif
287 5b6dd868 Blue Swirl
288 5b6dd868 Blue Swirl
static void page_init(void)
289 5b6dd868 Blue Swirl
{
290 5b6dd868 Blue Swirl
    /* NOTE: we can always suppose that qemu_host_page_size >=
291 5b6dd868 Blue Swirl
       TARGET_PAGE_SIZE */
292 5b6dd868 Blue Swirl
#ifdef _WIN32
293 5b6dd868 Blue Swirl
    {
294 5b6dd868 Blue Swirl
        SYSTEM_INFO system_info;
295 5b6dd868 Blue Swirl
296 5b6dd868 Blue Swirl
        GetSystemInfo(&system_info);
297 5b6dd868 Blue Swirl
        qemu_real_host_page_size = system_info.dwPageSize;
298 5b6dd868 Blue Swirl
    }
299 5b6dd868 Blue Swirl
#else
300 5b6dd868 Blue Swirl
    qemu_real_host_page_size = getpagesize();
301 5b6dd868 Blue Swirl
#endif
302 5b6dd868 Blue Swirl
    if (qemu_host_page_size == 0) {
303 5b6dd868 Blue Swirl
        qemu_host_page_size = qemu_real_host_page_size;
304 5b6dd868 Blue Swirl
    }
305 5b6dd868 Blue Swirl
    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 5b6dd868 Blue Swirl
        qemu_host_page_size = TARGET_PAGE_SIZE;
307 5b6dd868 Blue Swirl
    }
308 5b6dd868 Blue Swirl
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
309 5b6dd868 Blue Swirl
310 5b6dd868 Blue Swirl
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
311 5b6dd868 Blue Swirl
    {
312 5b6dd868 Blue Swirl
#ifdef HAVE_KINFO_GETVMMAP
313 5b6dd868 Blue Swirl
        struct kinfo_vmentry *freep;
314 5b6dd868 Blue Swirl
        int i, cnt;
315 5b6dd868 Blue Swirl
316 5b6dd868 Blue Swirl
        freep = kinfo_getvmmap(getpid(), &cnt);
317 5b6dd868 Blue Swirl
        if (freep) {
318 5b6dd868 Blue Swirl
            mmap_lock();
319 5b6dd868 Blue Swirl
            for (i = 0; i < cnt; i++) {
320 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
321 5b6dd868 Blue Swirl
322 5b6dd868 Blue Swirl
                startaddr = freep[i].kve_start;
323 5b6dd868 Blue Swirl
                endaddr = freep[i].kve_end;
324 5b6dd868 Blue Swirl
                if (h2g_valid(startaddr)) {
325 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326 5b6dd868 Blue Swirl
327 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
328 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
329 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 5b6dd868 Blue Swirl
                    } else {
331 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
333 5b6dd868 Blue Swirl
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 5b6dd868 Blue Swirl
#endif
335 5b6dd868 Blue Swirl
                    }
336 5b6dd868 Blue Swirl
                }
337 5b6dd868 Blue Swirl
            }
338 5b6dd868 Blue Swirl
            free(freep);
339 5b6dd868 Blue Swirl
            mmap_unlock();
340 5b6dd868 Blue Swirl
        }
341 5b6dd868 Blue Swirl
#else
342 5b6dd868 Blue Swirl
        FILE *f;
343 5b6dd868 Blue Swirl
344 5b6dd868 Blue Swirl
        last_brk = (unsigned long)sbrk(0);
345 5b6dd868 Blue Swirl
346 5b6dd868 Blue Swirl
        f = fopen("/compat/linux/proc/self/maps", "r");
347 5b6dd868 Blue Swirl
        if (f) {
348 5b6dd868 Blue Swirl
            mmap_lock();
349 5b6dd868 Blue Swirl
350 5b6dd868 Blue Swirl
            do {
351 5b6dd868 Blue Swirl
                unsigned long startaddr, endaddr;
352 5b6dd868 Blue Swirl
                int n;
353 5b6dd868 Blue Swirl
354 5b6dd868 Blue Swirl
                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
355 5b6dd868 Blue Swirl
356 5b6dd868 Blue Swirl
                if (n == 2 && h2g_valid(startaddr)) {
357 5b6dd868 Blue Swirl
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358 5b6dd868 Blue Swirl
359 5b6dd868 Blue Swirl
                    if (h2g_valid(endaddr)) {
360 5b6dd868 Blue Swirl
                        endaddr = h2g(endaddr);
361 5b6dd868 Blue Swirl
                    } else {
362 5b6dd868 Blue Swirl
                        endaddr = ~0ul;
363 5b6dd868 Blue Swirl
                    }
364 5b6dd868 Blue Swirl
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
365 5b6dd868 Blue Swirl
                }
366 5b6dd868 Blue Swirl
            } while (!feof(f));
367 5b6dd868 Blue Swirl
368 5b6dd868 Blue Swirl
            fclose(f);
369 5b6dd868 Blue Swirl
            mmap_unlock();
370 5b6dd868 Blue Swirl
        }
371 5b6dd868 Blue Swirl
#endif
372 5b6dd868 Blue Swirl
    }
373 5b6dd868 Blue Swirl
#endif
374 5b6dd868 Blue Swirl
}
375 5b6dd868 Blue Swirl
376 5b6dd868 Blue Swirl
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
377 5b6dd868 Blue Swirl
{
378 5b6dd868 Blue Swirl
    PageDesc *pd;
379 5b6dd868 Blue Swirl
    void **lp;
380 5b6dd868 Blue Swirl
    int i;
381 5b6dd868 Blue Swirl
382 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
383 5b6dd868 Blue Swirl
    /* We can't use g_malloc because it may recurse into a locked mutex. */
384 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE)                                 \
385 5b6dd868 Blue Swirl
    do {                                                \
386 5b6dd868 Blue Swirl
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
387 5b6dd868 Blue Swirl
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
388 5b6dd868 Blue Swirl
    } while (0)
389 5b6dd868 Blue Swirl
#else
390 5b6dd868 Blue Swirl
# define ALLOC(P, SIZE) \
391 5b6dd868 Blue Swirl
    do { P = g_malloc0(SIZE); } while (0)
392 5b6dd868 Blue Swirl
#endif
393 5b6dd868 Blue Swirl
394 5b6dd868 Blue Swirl
    /* Level 1.  Always allocated.  */
395 5b6dd868 Blue Swirl
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
396 5b6dd868 Blue Swirl
397 5b6dd868 Blue Swirl
    /* Level 2..N-1.  */
398 5b6dd868 Blue Swirl
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399 5b6dd868 Blue Swirl
        void **p = *lp;
400 5b6dd868 Blue Swirl
401 5b6dd868 Blue Swirl
        if (p == NULL) {
402 5b6dd868 Blue Swirl
            if (!alloc) {
403 5b6dd868 Blue Swirl
                return NULL;
404 5b6dd868 Blue Swirl
            }
405 5b6dd868 Blue Swirl
            ALLOC(p, sizeof(void *) * L2_SIZE);
406 5b6dd868 Blue Swirl
            *lp = p;
407 5b6dd868 Blue Swirl
        }
408 5b6dd868 Blue Swirl
409 5b6dd868 Blue Swirl
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
410 5b6dd868 Blue Swirl
    }
411 5b6dd868 Blue Swirl
412 5b6dd868 Blue Swirl
    pd = *lp;
413 5b6dd868 Blue Swirl
    if (pd == NULL) {
414 5b6dd868 Blue Swirl
        if (!alloc) {
415 5b6dd868 Blue Swirl
            return NULL;
416 5b6dd868 Blue Swirl
        }
417 5b6dd868 Blue Swirl
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418 5b6dd868 Blue Swirl
        *lp = pd;
419 5b6dd868 Blue Swirl
    }
420 5b6dd868 Blue Swirl
421 5b6dd868 Blue Swirl
#undef ALLOC
422 5b6dd868 Blue Swirl
423 5b6dd868 Blue Swirl
    return pd + (index & (L2_SIZE - 1));
424 5b6dd868 Blue Swirl
}
425 5b6dd868 Blue Swirl
426 5b6dd868 Blue Swirl
static inline PageDesc *page_find(tb_page_addr_t index)
427 5b6dd868 Blue Swirl
{
428 5b6dd868 Blue Swirl
    return page_find_alloc(index, 0);
429 5b6dd868 Blue Swirl
}
430 5b6dd868 Blue Swirl
431 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
432 5b6dd868 Blue Swirl
#define mmap_lock() do { } while (0)
433 5b6dd868 Blue Swirl
#define mmap_unlock() do { } while (0)
434 5b6dd868 Blue Swirl
#endif
435 5b6dd868 Blue Swirl
436 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
437 5b6dd868 Blue Swirl
/* Currently it is not recommended to allocate big chunks of data in
438 5b6dd868 Blue Swirl
   user mode. It will change when a dedicated libc will be used.  */
439 5b6dd868 Blue Swirl
/* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 5b6dd868 Blue Swirl
   region in which the guest needs to run.  Revisit this.  */
441 5b6dd868 Blue Swirl
#define USE_STATIC_CODE_GEN_BUFFER
442 5b6dd868 Blue Swirl
#endif
443 5b6dd868 Blue Swirl
444 5b6dd868 Blue Swirl
/* ??? Should configure for this, not list operating systems here.  */
445 5b6dd868 Blue Swirl
#if (defined(__linux__) \
446 5b6dd868 Blue Swirl
    || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 5b6dd868 Blue Swirl
    || defined(__DragonFly__) || defined(__OpenBSD__) \
448 5b6dd868 Blue Swirl
    || defined(__NetBSD__))
449 5b6dd868 Blue Swirl
# define USE_MMAP
450 5b6dd868 Blue Swirl
#endif
451 5b6dd868 Blue Swirl
452 5b6dd868 Blue Swirl
/* Minimum size of the code gen buffer.  This number is randomly chosen,
453 5b6dd868 Blue Swirl
   but not so small that we can't have a fair number of TB's live.  */
454 5b6dd868 Blue Swirl
#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
455 5b6dd868 Blue Swirl
456 5b6dd868 Blue Swirl
/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
457 5b6dd868 Blue Swirl
   indicated, this is constrained by the range of direct branches on the
458 5b6dd868 Blue Swirl
   host cpu, as used by the TCG implementation of goto_tb.  */
459 5b6dd868 Blue Swirl
#if defined(__x86_64__)
460 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
461 5b6dd868 Blue Swirl
#elif defined(__sparc__)
462 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
463 5b6dd868 Blue Swirl
#elif defined(__arm__)
464 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
465 5b6dd868 Blue Swirl
#elif defined(__s390x__)
466 5b6dd868 Blue Swirl
  /* We have a +- 4GB range on the branches; leave some slop.  */
467 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
468 5b6dd868 Blue Swirl
#else
469 5b6dd868 Blue Swirl
# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
470 5b6dd868 Blue Swirl
#endif
471 5b6dd868 Blue Swirl
472 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
473 5b6dd868 Blue Swirl
474 5b6dd868 Blue Swirl
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
475 5b6dd868 Blue Swirl
  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
476 5b6dd868 Blue Swirl
   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
477 5b6dd868 Blue Swirl
478 5b6dd868 Blue Swirl
static inline size_t size_code_gen_buffer(size_t tb_size)
479 5b6dd868 Blue Swirl
{
480 5b6dd868 Blue Swirl
    /* Size the buffer.  */
481 5b6dd868 Blue Swirl
    if (tb_size == 0) {
482 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
483 5b6dd868 Blue Swirl
        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
484 5b6dd868 Blue Swirl
#else
485 5b6dd868 Blue Swirl
        /* ??? Needs adjustments.  */
486 5b6dd868 Blue Swirl
        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
487 5b6dd868 Blue Swirl
           static buffer, we could size this on RESERVED_VA, on the text
488 5b6dd868 Blue Swirl
           segment size of the executable, or continue to use the default.  */
489 5b6dd868 Blue Swirl
        tb_size = (unsigned long)(ram_size / 4);
490 5b6dd868 Blue Swirl
#endif
491 5b6dd868 Blue Swirl
    }
492 5b6dd868 Blue Swirl
    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
493 5b6dd868 Blue Swirl
        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
494 5b6dd868 Blue Swirl
    }
495 5b6dd868 Blue Swirl
    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
496 5b6dd868 Blue Swirl
        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
497 5b6dd868 Blue Swirl
    }
498 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = tb_size;
499 5b6dd868 Blue Swirl
    return tb_size;
500 5b6dd868 Blue Swirl
}
501 5b6dd868 Blue Swirl
502 5b6dd868 Blue Swirl
#ifdef USE_STATIC_CODE_GEN_BUFFER
503 5b6dd868 Blue Swirl
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504 5b6dd868 Blue Swirl
    __attribute__((aligned(CODE_GEN_ALIGN)));
505 5b6dd868 Blue Swirl
506 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
507 5b6dd868 Blue Swirl
{
508 0b0d3320 Evgeny Voevodin
    map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
509 5b6dd868 Blue Swirl
    return static_code_gen_buffer;
510 5b6dd868 Blue Swirl
}
511 5b6dd868 Blue Swirl
#elif defined(USE_MMAP)
512 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
513 5b6dd868 Blue Swirl
{
514 5b6dd868 Blue Swirl
    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
515 5b6dd868 Blue Swirl
    uintptr_t start = 0;
516 5b6dd868 Blue Swirl
    void *buf;
517 5b6dd868 Blue Swirl
518 5b6dd868 Blue Swirl
    /* Constrain the position of the buffer based on the host cpu.
519 5b6dd868 Blue Swirl
       Note that these addresses are chosen in concert with the
520 5b6dd868 Blue Swirl
       addresses assigned in the relevant linker script file.  */
521 5b6dd868 Blue Swirl
# if defined(__PIE__) || defined(__PIC__)
522 5b6dd868 Blue Swirl
    /* Don't bother setting a preferred location if we're building
523 5b6dd868 Blue Swirl
       a position-independent executable.  We're more likely to get
524 5b6dd868 Blue Swirl
       an address near the main executable if we let the kernel
525 5b6dd868 Blue Swirl
       choose the address.  */
526 5b6dd868 Blue Swirl
# elif defined(__x86_64__) && defined(MAP_32BIT)
527 5b6dd868 Blue Swirl
    /* Force the memory down into low memory with the executable.
528 5b6dd868 Blue Swirl
       Leave the choice of exact location with the kernel.  */
529 5b6dd868 Blue Swirl
    flags |= MAP_32BIT;
530 5b6dd868 Blue Swirl
    /* Cannot expect to map more than 800MB in low memory.  */
531 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
532 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
533 5b6dd868 Blue Swirl
    }
534 5b6dd868 Blue Swirl
# elif defined(__sparc__)
535 5b6dd868 Blue Swirl
    start = 0x40000000ul;
536 5b6dd868 Blue Swirl
# elif defined(__s390x__)
537 5b6dd868 Blue Swirl
    start = 0x90000000ul;
538 5b6dd868 Blue Swirl
# endif
539 5b6dd868 Blue Swirl
540 0b0d3320 Evgeny Voevodin
    buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
541 5b6dd868 Blue Swirl
               PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
542 5b6dd868 Blue Swirl
    return buf == MAP_FAILED ? NULL : buf;
543 5b6dd868 Blue Swirl
}
544 5b6dd868 Blue Swirl
#else
545 5b6dd868 Blue Swirl
static inline void *alloc_code_gen_buffer(void)
546 5b6dd868 Blue Swirl
{
547 0b0d3320 Evgeny Voevodin
    void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
548 5b6dd868 Blue Swirl
549 5b6dd868 Blue Swirl
    if (buf) {
550 0b0d3320 Evgeny Voevodin
        map_exec(buf, tcg_ctx.code_gen_buffer_size);
551 5b6dd868 Blue Swirl
    }
552 5b6dd868 Blue Swirl
    return buf;
553 5b6dd868 Blue Swirl
}
554 5b6dd868 Blue Swirl
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
555 5b6dd868 Blue Swirl
556 5b6dd868 Blue Swirl
static inline void code_gen_alloc(size_t tb_size)
557 5b6dd868 Blue Swirl
{
558 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
559 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
560 0b0d3320 Evgeny Voevodin
    if (tcg_ctx.code_gen_buffer == NULL) {
561 5b6dd868 Blue Swirl
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 5b6dd868 Blue Swirl
        exit(1);
563 5b6dd868 Blue Swirl
    }
564 5b6dd868 Blue Swirl
565 0b0d3320 Evgeny Voevodin
    qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
566 0b0d3320 Evgeny Voevodin
            QEMU_MADV_HUGEPAGE);
567 5b6dd868 Blue Swirl
568 5b6dd868 Blue Swirl
    /* Steal room for the prologue at the end of the buffer.  This ensures
569 5b6dd868 Blue Swirl
       (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
570 5b6dd868 Blue Swirl
       from TB's to the prologue are going to be in range.  It also means
571 5b6dd868 Blue Swirl
       that we don't need to mark (additional) portions of the data segment
572 5b6dd868 Blue Swirl
       as executable.  */
573 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
574 0b0d3320 Evgeny Voevodin
            tcg_ctx.code_gen_buffer_size - 1024;
575 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_size -= 1024;
576 5b6dd868 Blue Swirl
577 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
578 5b6dd868 Blue Swirl
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
579 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
580 0b0d3320 Evgeny Voevodin
            CODE_GEN_AVG_BLOCK_SIZE;
581 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tbs =
582 5e5f07e0 Evgeny Voevodin
            g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
583 5b6dd868 Blue Swirl
}
584 5b6dd868 Blue Swirl
585 5b6dd868 Blue Swirl
/* Must be called before using the QEMU cpus. 'tb_size' is the size
586 5b6dd868 Blue Swirl
   (in bytes) allocated to the translation buffer. Zero means default
587 5b6dd868 Blue Swirl
   size. */
588 5b6dd868 Blue Swirl
void tcg_exec_init(unsigned long tb_size)
589 5b6dd868 Blue Swirl
{
590 5b6dd868 Blue Swirl
    cpu_gen_init();
591 5b6dd868 Blue Swirl
    code_gen_alloc(tb_size);
592 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
593 0b0d3320 Evgeny Voevodin
    tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
594 5b6dd868 Blue Swirl
    page_init();
595 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
596 5b6dd868 Blue Swirl
    /* There's no guest base to take into account, so go ahead and
597 5b6dd868 Blue Swirl
       initialize the prologue now.  */
598 5b6dd868 Blue Swirl
    tcg_prologue_init(&tcg_ctx);
599 5b6dd868 Blue Swirl
#endif
600 5b6dd868 Blue Swirl
}
601 5b6dd868 Blue Swirl
602 5b6dd868 Blue Swirl
bool tcg_enabled(void)
603 5b6dd868 Blue Swirl
{
604 0b0d3320 Evgeny Voevodin
    return tcg_ctx.code_gen_buffer != NULL;
605 5b6dd868 Blue Swirl
}
606 5b6dd868 Blue Swirl
607 5b6dd868 Blue Swirl
/* Allocate a new translation block. Flush the translation buffer if
608 5b6dd868 Blue Swirl
   too many translation blocks or too much generated code. */
609 5b6dd868 Blue Swirl
static TranslationBlock *tb_alloc(target_ulong pc)
610 5b6dd868 Blue Swirl
{
611 5b6dd868 Blue Swirl
    TranslationBlock *tb;
612 5b6dd868 Blue Swirl
613 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
614 0b0d3320 Evgeny Voevodin
        (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
615 0b0d3320 Evgeny Voevodin
         tcg_ctx.code_gen_buffer_max_size) {
616 5b6dd868 Blue Swirl
        return NULL;
617 5b6dd868 Blue Swirl
    }
618 5e5f07e0 Evgeny Voevodin
    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
619 5b6dd868 Blue Swirl
    tb->pc = pc;
620 5b6dd868 Blue Swirl
    tb->cflags = 0;
621 5b6dd868 Blue Swirl
    return tb;
622 5b6dd868 Blue Swirl
}
623 5b6dd868 Blue Swirl
624 5b6dd868 Blue Swirl
void tb_free(TranslationBlock *tb)
625 5b6dd868 Blue Swirl
{
626 5b6dd868 Blue Swirl
    /* In practice this is mostly used for single use temporary TB
627 5b6dd868 Blue Swirl
       Ignore the hard cases and just back up if this TB happens to
628 5b6dd868 Blue Swirl
       be the last one generated.  */
629 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
630 5e5f07e0 Evgeny Voevodin
            tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
631 0b0d3320 Evgeny Voevodin
        tcg_ctx.code_gen_ptr = tb->tc_ptr;
632 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.nb_tbs--;
633 5b6dd868 Blue Swirl
    }
634 5b6dd868 Blue Swirl
}
635 5b6dd868 Blue Swirl
636 5b6dd868 Blue Swirl
static inline void invalidate_page_bitmap(PageDesc *p)
637 5b6dd868 Blue Swirl
{
638 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
639 5b6dd868 Blue Swirl
        g_free(p->code_bitmap);
640 5b6dd868 Blue Swirl
        p->code_bitmap = NULL;
641 5b6dd868 Blue Swirl
    }
642 5b6dd868 Blue Swirl
    p->code_write_count = 0;
643 5b6dd868 Blue Swirl
}
644 5b6dd868 Blue Swirl
645 5b6dd868 Blue Swirl
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
646 5b6dd868 Blue Swirl
static void page_flush_tb_1(int level, void **lp)
647 5b6dd868 Blue Swirl
{
648 5b6dd868 Blue Swirl
    int i;
649 5b6dd868 Blue Swirl
650 5b6dd868 Blue Swirl
    if (*lp == NULL) {
651 5b6dd868 Blue Swirl
        return;
652 5b6dd868 Blue Swirl
    }
653 5b6dd868 Blue Swirl
    if (level == 0) {
654 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
655 5b6dd868 Blue Swirl
656 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
657 5b6dd868 Blue Swirl
            pd[i].first_tb = NULL;
658 5b6dd868 Blue Swirl
            invalidate_page_bitmap(pd + i);
659 5b6dd868 Blue Swirl
        }
660 5b6dd868 Blue Swirl
    } else {
661 5b6dd868 Blue Swirl
        void **pp = *lp;
662 5b6dd868 Blue Swirl
663 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
664 5b6dd868 Blue Swirl
            page_flush_tb_1(level - 1, pp + i);
665 5b6dd868 Blue Swirl
        }
666 5b6dd868 Blue Swirl
    }
667 5b6dd868 Blue Swirl
}
668 5b6dd868 Blue Swirl
669 5b6dd868 Blue Swirl
static void page_flush_tb(void)
670 5b6dd868 Blue Swirl
{
671 5b6dd868 Blue Swirl
    int i;
672 5b6dd868 Blue Swirl
673 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
674 5b6dd868 Blue Swirl
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
675 5b6dd868 Blue Swirl
    }
676 5b6dd868 Blue Swirl
}
677 5b6dd868 Blue Swirl
678 5b6dd868 Blue Swirl
/* flush all the translation blocks */
679 5b6dd868 Blue Swirl
/* XXX: tb_flush is currently not thread safe */
680 5b6dd868 Blue Swirl
void tb_flush(CPUArchState *env1)
681 5b6dd868 Blue Swirl
{
682 5b6dd868 Blue Swirl
    CPUArchState *env;
683 5b6dd868 Blue Swirl
684 5b6dd868 Blue Swirl
#if defined(DEBUG_FLUSH)
685 5b6dd868 Blue Swirl
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
686 0b0d3320 Evgeny Voevodin
           (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
687 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
688 0b0d3320 Evgeny Voevodin
           ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
689 5e5f07e0 Evgeny Voevodin
           tcg_ctx.tb_ctx.nb_tbs : 0);
690 5b6dd868 Blue Swirl
#endif
691 0b0d3320 Evgeny Voevodin
    if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
692 0b0d3320 Evgeny Voevodin
        > tcg_ctx.code_gen_buffer_size) {
693 5b6dd868 Blue Swirl
        cpu_abort(env1, "Internal error: code buffer overflow\n");
694 5b6dd868 Blue Swirl
    }
695 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.nb_tbs = 0;
696 5b6dd868 Blue Swirl
697 5b6dd868 Blue Swirl
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
698 5b6dd868 Blue Swirl
        memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
699 5b6dd868 Blue Swirl
    }
700 5b6dd868 Blue Swirl
701 5e5f07e0 Evgeny Voevodin
    memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
702 5e5f07e0 Evgeny Voevodin
            CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
703 5b6dd868 Blue Swirl
    page_flush_tb();
704 5b6dd868 Blue Swirl
705 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
706 5b6dd868 Blue Swirl
    /* XXX: flush processor icache at this point if cache flush is
707 5b6dd868 Blue Swirl
       expensive */
708 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_flush_count++;
709 5b6dd868 Blue Swirl
}
710 5b6dd868 Blue Swirl
711 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
712 5b6dd868 Blue Swirl
713 5b6dd868 Blue Swirl
static void tb_invalidate_check(target_ulong address)
714 5b6dd868 Blue Swirl
{
715 5b6dd868 Blue Swirl
    TranslationBlock *tb;
716 5b6dd868 Blue Swirl
    int i;
717 5b6dd868 Blue Swirl
718 5b6dd868 Blue Swirl
    address &= TARGET_PAGE_MASK;
719 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
720 5e5f07e0 Evgeny Voevodin
        for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
721 5b6dd868 Blue Swirl
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
722 5b6dd868 Blue Swirl
                  address >= tb->pc + tb->size)) {
723 5b6dd868 Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
724 5b6dd868 Blue Swirl
                       " PC=%08lx size=%04x\n",
725 5b6dd868 Blue Swirl
                       address, (long)tb->pc, tb->size);
726 5b6dd868 Blue Swirl
            }
727 5b6dd868 Blue Swirl
        }
728 5b6dd868 Blue Swirl
    }
729 5b6dd868 Blue Swirl
}
730 5b6dd868 Blue Swirl
731 5b6dd868 Blue Swirl
/* verify that all the pages have correct rights for code */
732 5b6dd868 Blue Swirl
static void tb_page_check(void)
733 5b6dd868 Blue Swirl
{
734 5b6dd868 Blue Swirl
    TranslationBlock *tb;
735 5b6dd868 Blue Swirl
    int i, flags1, flags2;
736 5b6dd868 Blue Swirl
737 5b6dd868 Blue Swirl
    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
738 5e5f07e0 Evgeny Voevodin
        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
739 5e5f07e0 Evgeny Voevodin
                tb = tb->phys_hash_next) {
740 5b6dd868 Blue Swirl
            flags1 = page_get_flags(tb->pc);
741 5b6dd868 Blue Swirl
            flags2 = page_get_flags(tb->pc + tb->size - 1);
742 5b6dd868 Blue Swirl
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
743 5b6dd868 Blue Swirl
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
744 5b6dd868 Blue Swirl
                       (long)tb->pc, tb->size, flags1, flags2);
745 5b6dd868 Blue Swirl
            }
746 5b6dd868 Blue Swirl
        }
747 5b6dd868 Blue Swirl
    }
748 5b6dd868 Blue Swirl
}
749 5b6dd868 Blue Swirl
750 5b6dd868 Blue Swirl
#endif
751 5b6dd868 Blue Swirl
752 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
753 5b6dd868 Blue Swirl
{
754 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
755 5b6dd868 Blue Swirl
756 5b6dd868 Blue Swirl
    for (;;) {
757 5b6dd868 Blue Swirl
        tb1 = *ptb;
758 5b6dd868 Blue Swirl
        if (tb1 == tb) {
759 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
            *ptb = tb1->phys_hash_next;
760 5b6dd868 Blue Swirl
            break;
761 5b6dd868 Blue Swirl
        }
762 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
        ptb = &tb1->phys_hash_next;
763 5b6dd868 Blue Swirl
    }
764 5b6dd868 Blue Swirl
}
765 5b6dd868 Blue Swirl
766 5b6dd868 Blue Swirl
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
767 5b6dd868 Blue Swirl
{
768 5b6dd868 Blue Swirl
    TranslationBlock *tb1;
769 5b6dd868 Blue Swirl
    unsigned int n1;
770 5b6dd868 Blue Swirl
771 5b6dd868 Blue Swirl
    for (;;) {
772 5b6dd868 Blue Swirl
        tb1 = *ptb;
773 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
774 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
775 5b6dd868 Blue Swirl
        if (tb1 == tb) {
776 5b6dd868 Blue Swirl
            *ptb = tb1->page_next[n1];
777 5b6dd868 Blue Swirl
            break;
778 5b6dd868 Blue Swirl
        }
779 5b6dd868 Blue Swirl
        ptb = &tb1->page_next[n1];
780 5b6dd868 Blue Swirl
    }
781 5b6dd868 Blue Swirl
}
782 5b6dd868 Blue Swirl
783 5b6dd868 Blue Swirl
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
784 5b6dd868 Blue Swirl
{
785 5b6dd868 Blue Swirl
    TranslationBlock *tb1, **ptb;
786 5b6dd868 Blue Swirl
    unsigned int n1;
787 5b6dd868 Blue Swirl
788 5b6dd868 Blue Swirl
    ptb = &tb->jmp_next[n];
789 5b6dd868 Blue Swirl
    tb1 = *ptb;
790 5b6dd868 Blue Swirl
    if (tb1) {
791 5b6dd868 Blue Swirl
        /* find tb(n) in circular list */
792 5b6dd868 Blue Swirl
        for (;;) {
793 5b6dd868 Blue Swirl
            tb1 = *ptb;
794 5b6dd868 Blue Swirl
            n1 = (uintptr_t)tb1 & 3;
795 5b6dd868 Blue Swirl
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
796 5b6dd868 Blue Swirl
            if (n1 == n && tb1 == tb) {
797 5b6dd868 Blue Swirl
                break;
798 5b6dd868 Blue Swirl
            }
799 5b6dd868 Blue Swirl
            if (n1 == 2) {
800 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_first;
801 5b6dd868 Blue Swirl
            } else {
802 5b6dd868 Blue Swirl
                ptb = &tb1->jmp_next[n1];
803 5b6dd868 Blue Swirl
            }
804 5b6dd868 Blue Swirl
        }
805 5b6dd868 Blue Swirl
        /* now we can suppress tb(n) from the list */
806 5b6dd868 Blue Swirl
        *ptb = tb->jmp_next[n];
807 5b6dd868 Blue Swirl
808 5b6dd868 Blue Swirl
        tb->jmp_next[n] = NULL;
809 5b6dd868 Blue Swirl
    }
810 5b6dd868 Blue Swirl
}
811 5b6dd868 Blue Swirl
812 5b6dd868 Blue Swirl
/* reset the jump entry 'n' of a TB so that it is not chained to
813 5b6dd868 Blue Swirl
   another TB */
814 5b6dd868 Blue Swirl
static inline void tb_reset_jump(TranslationBlock *tb, int n)
815 5b6dd868 Blue Swirl
{
816 5b6dd868 Blue Swirl
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
817 5b6dd868 Blue Swirl
}
818 5b6dd868 Blue Swirl
819 0c884d16 ้™ณ้Ÿ‹ไปป (Wei-Ren Chen)
/* invalidate one TB */
820 5b6dd868 Blue Swirl
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
821 5b6dd868 Blue Swirl
{
822 5b6dd868 Blue Swirl
    CPUArchState *env;
823 5b6dd868 Blue Swirl
    PageDesc *p;
824 5b6dd868 Blue Swirl
    unsigned int h, n1;
825 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc;
826 5b6dd868 Blue Swirl
    TranslationBlock *tb1, *tb2;
827 5b6dd868 Blue Swirl
828 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
829 5b6dd868 Blue Swirl
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
830 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
831 5e5f07e0 Evgeny Voevodin
    tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
832 5b6dd868 Blue Swirl
833 5b6dd868 Blue Swirl
    /* remove the TB from the page list */
834 5b6dd868 Blue Swirl
    if (tb->page_addr[0] != page_addr) {
835 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
836 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
837 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
838 5b6dd868 Blue Swirl
    }
839 5b6dd868 Blue Swirl
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
840 5b6dd868 Blue Swirl
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
841 5b6dd868 Blue Swirl
        tb_page_remove(&p->first_tb, tb);
842 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
843 5b6dd868 Blue Swirl
    }
844 5b6dd868 Blue Swirl
845 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
846 5b6dd868 Blue Swirl
847 5b6dd868 Blue Swirl
    /* remove the TB from the hash list */
848 5b6dd868 Blue Swirl
    h = tb_jmp_cache_hash_func(tb->pc);
849 5b6dd868 Blue Swirl
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
850 5b6dd868 Blue Swirl
        if (env->tb_jmp_cache[h] == tb) {
851 5b6dd868 Blue Swirl
            env->tb_jmp_cache[h] = NULL;
852 5b6dd868 Blue Swirl
        }
853 5b6dd868 Blue Swirl
    }
854 5b6dd868 Blue Swirl
855 5b6dd868 Blue Swirl
    /* suppress this TB from the two jump lists */
856 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 0);
857 5b6dd868 Blue Swirl
    tb_jmp_remove(tb, 1);
858 5b6dd868 Blue Swirl
859 5b6dd868 Blue Swirl
    /* suppress any remaining jumps to this TB */
860 5b6dd868 Blue Swirl
    tb1 = tb->jmp_first;
861 5b6dd868 Blue Swirl
    for (;;) {
862 5b6dd868 Blue Swirl
        n1 = (uintptr_t)tb1 & 3;
863 5b6dd868 Blue Swirl
        if (n1 == 2) {
864 5b6dd868 Blue Swirl
            break;
865 5b6dd868 Blue Swirl
        }
866 5b6dd868 Blue Swirl
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
867 5b6dd868 Blue Swirl
        tb2 = tb1->jmp_next[n1];
868 5b6dd868 Blue Swirl
        tb_reset_jump(tb1, n1);
869 5b6dd868 Blue Swirl
        tb1->jmp_next[n1] = NULL;
870 5b6dd868 Blue Swirl
        tb1 = tb2;
871 5b6dd868 Blue Swirl
    }
872 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
873 5b6dd868 Blue Swirl
874 5e5f07e0 Evgeny Voevodin
    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
875 5b6dd868 Blue Swirl
}
876 5b6dd868 Blue Swirl
877 5b6dd868 Blue Swirl
static inline void set_bits(uint8_t *tab, int start, int len)
878 5b6dd868 Blue Swirl
{
879 5b6dd868 Blue Swirl
    int end, mask, end1;
880 5b6dd868 Blue Swirl
881 5b6dd868 Blue Swirl
    end = start + len;
882 5b6dd868 Blue Swirl
    tab += start >> 3;
883 5b6dd868 Blue Swirl
    mask = 0xff << (start & 7);
884 5b6dd868 Blue Swirl
    if ((start & ~7) == (end & ~7)) {
885 5b6dd868 Blue Swirl
        if (start < end) {
886 5b6dd868 Blue Swirl
            mask &= ~(0xff << (end & 7));
887 5b6dd868 Blue Swirl
            *tab |= mask;
888 5b6dd868 Blue Swirl
        }
889 5b6dd868 Blue Swirl
    } else {
890 5b6dd868 Blue Swirl
        *tab++ |= mask;
891 5b6dd868 Blue Swirl
        start = (start + 8) & ~7;
892 5b6dd868 Blue Swirl
        end1 = end & ~7;
893 5b6dd868 Blue Swirl
        while (start < end1) {
894 5b6dd868 Blue Swirl
            *tab++ = 0xff;
895 5b6dd868 Blue Swirl
            start += 8;
896 5b6dd868 Blue Swirl
        }
897 5b6dd868 Blue Swirl
        if (start < end) {
898 5b6dd868 Blue Swirl
            mask = ~(0xff << (end & 7));
899 5b6dd868 Blue Swirl
            *tab |= mask;
900 5b6dd868 Blue Swirl
        }
901 5b6dd868 Blue Swirl
    }
902 5b6dd868 Blue Swirl
}
903 5b6dd868 Blue Swirl
904 5b6dd868 Blue Swirl
static void build_page_bitmap(PageDesc *p)
905 5b6dd868 Blue Swirl
{
906 5b6dd868 Blue Swirl
    int n, tb_start, tb_end;
907 5b6dd868 Blue Swirl
    TranslationBlock *tb;
908 5b6dd868 Blue Swirl
909 5b6dd868 Blue Swirl
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
910 5b6dd868 Blue Swirl
911 5b6dd868 Blue Swirl
    tb = p->first_tb;
912 5b6dd868 Blue Swirl
    while (tb != NULL) {
913 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
914 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
915 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
916 5b6dd868 Blue Swirl
        if (n == 0) {
917 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
918 5b6dd868 Blue Swirl
               it is not a problem */
919 5b6dd868 Blue Swirl
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
920 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
921 5b6dd868 Blue Swirl
            if (tb_end > TARGET_PAGE_SIZE) {
922 5b6dd868 Blue Swirl
                tb_end = TARGET_PAGE_SIZE;
923 5b6dd868 Blue Swirl
            }
924 5b6dd868 Blue Swirl
        } else {
925 5b6dd868 Blue Swirl
            tb_start = 0;
926 5b6dd868 Blue Swirl
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
927 5b6dd868 Blue Swirl
        }
928 5b6dd868 Blue Swirl
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
929 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
930 5b6dd868 Blue Swirl
    }
931 5b6dd868 Blue Swirl
}
932 5b6dd868 Blue Swirl
933 5b6dd868 Blue Swirl
TranslationBlock *tb_gen_code(CPUArchState *env,
934 5b6dd868 Blue Swirl
                              target_ulong pc, target_ulong cs_base,
935 5b6dd868 Blue Swirl
                              int flags, int cflags)
936 5b6dd868 Blue Swirl
{
937 5b6dd868 Blue Swirl
    TranslationBlock *tb;
938 5b6dd868 Blue Swirl
    uint8_t *tc_ptr;
939 5b6dd868 Blue Swirl
    tb_page_addr_t phys_pc, phys_page2;
940 5b6dd868 Blue Swirl
    target_ulong virt_page2;
941 5b6dd868 Blue Swirl
    int code_gen_size;
942 5b6dd868 Blue Swirl
943 5b6dd868 Blue Swirl
    phys_pc = get_page_addr_code(env, pc);
944 5b6dd868 Blue Swirl
    tb = tb_alloc(pc);
945 5b6dd868 Blue Swirl
    if (!tb) {
946 5b6dd868 Blue Swirl
        /* flush must be done */
947 5b6dd868 Blue Swirl
        tb_flush(env);
948 5b6dd868 Blue Swirl
        /* cannot fail at this point */
949 5b6dd868 Blue Swirl
        tb = tb_alloc(pc);
950 5b6dd868 Blue Swirl
        /* Don't forget to invalidate previous TB info.  */
951 5e5f07e0 Evgeny Voevodin
        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
952 5b6dd868 Blue Swirl
    }
953 0b0d3320 Evgeny Voevodin
    tc_ptr = tcg_ctx.code_gen_ptr;
954 5b6dd868 Blue Swirl
    tb->tc_ptr = tc_ptr;
955 5b6dd868 Blue Swirl
    tb->cs_base = cs_base;
956 5b6dd868 Blue Swirl
    tb->flags = flags;
957 5b6dd868 Blue Swirl
    tb->cflags = cflags;
958 5b6dd868 Blue Swirl
    cpu_gen_code(env, tb, &code_gen_size);
959 0b0d3320 Evgeny Voevodin
    tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
960 0b0d3320 Evgeny Voevodin
            code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
961 5b6dd868 Blue Swirl
962 5b6dd868 Blue Swirl
    /* check next page if needed */
963 5b6dd868 Blue Swirl
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
964 5b6dd868 Blue Swirl
    phys_page2 = -1;
965 5b6dd868 Blue Swirl
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
966 5b6dd868 Blue Swirl
        phys_page2 = get_page_addr_code(env, virt_page2);
967 5b6dd868 Blue Swirl
    }
968 5b6dd868 Blue Swirl
    tb_link_page(tb, phys_pc, phys_page2);
969 5b6dd868 Blue Swirl
    return tb;
970 5b6dd868 Blue Swirl
}
971 5b6dd868 Blue Swirl
972 5b6dd868 Blue Swirl
/*
973 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
974 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end may refer to *different* physical pages.
975 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
976 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
977 5b6dd868 Blue Swirl
 * this TB.
978 5b6dd868 Blue Swirl
 */
979 5b6dd868 Blue Swirl
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
980 5b6dd868 Blue Swirl
                              int is_cpu_write_access)
981 5b6dd868 Blue Swirl
{
982 5b6dd868 Blue Swirl
    while (start < end) {
983 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
984 5b6dd868 Blue Swirl
        start &= TARGET_PAGE_MASK;
985 5b6dd868 Blue Swirl
        start += TARGET_PAGE_SIZE;
986 5b6dd868 Blue Swirl
    }
987 5b6dd868 Blue Swirl
}
988 5b6dd868 Blue Swirl
989 5b6dd868 Blue Swirl
/*
990 5b6dd868 Blue Swirl
 * Invalidate all TBs which intersect with the target physical address range
991 5b6dd868 Blue Swirl
 * [start;end[. NOTE: start and end must refer to the *same* physical page.
992 5b6dd868 Blue Swirl
 * 'is_cpu_write_access' should be true if called from a real cpu write
993 5b6dd868 Blue Swirl
 * access: the virtual CPU will exit the current TB if code is modified inside
994 5b6dd868 Blue Swirl
 * this TB.
995 5b6dd868 Blue Swirl
 */
996 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
997 5b6dd868 Blue Swirl
                                   int is_cpu_write_access)
998 5b6dd868 Blue Swirl
{
999 5b6dd868 Blue Swirl
    TranslationBlock *tb, *tb_next, *saved_tb;
1000 5b6dd868 Blue Swirl
    CPUArchState *env = cpu_single_env;
1001 d77953b9 Andreas Fรคrber
    CPUState *cpu = NULL;
1002 5b6dd868 Blue Swirl
    tb_page_addr_t tb_start, tb_end;
1003 5b6dd868 Blue Swirl
    PageDesc *p;
1004 5b6dd868 Blue Swirl
    int n;
1005 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1006 5b6dd868 Blue Swirl
    int current_tb_not_found = is_cpu_write_access;
1007 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1008 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1009 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1010 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1011 5b6dd868 Blue Swirl
    int current_flags = 0;
1012 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1013 5b6dd868 Blue Swirl
1014 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1015 5b6dd868 Blue Swirl
    if (!p) {
1016 5b6dd868 Blue Swirl
        return;
1017 5b6dd868 Blue Swirl
    }
1018 5b6dd868 Blue Swirl
    if (!p->code_bitmap &&
1019 5b6dd868 Blue Swirl
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1020 5b6dd868 Blue Swirl
        is_cpu_write_access) {
1021 5b6dd868 Blue Swirl
        /* build code bitmap */
1022 5b6dd868 Blue Swirl
        build_page_bitmap(p);
1023 5b6dd868 Blue Swirl
    }
1024 d77953b9 Andreas Fรคrber
    if (env != NULL) {
1025 d77953b9 Andreas Fรคrber
        cpu = ENV_GET_CPU(env);
1026 d77953b9 Andreas Fรคrber
    }
1027 5b6dd868 Blue Swirl
1028 5b6dd868 Blue Swirl
    /* we remove all the TBs in the range [start, end[ */
1029 5b6dd868 Blue Swirl
    /* XXX: see if in some cases it could be faster to invalidate all
1030 5b6dd868 Blue Swirl
       the code */
1031 5b6dd868 Blue Swirl
    tb = p->first_tb;
1032 5b6dd868 Blue Swirl
    while (tb != NULL) {
1033 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1034 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1035 5b6dd868 Blue Swirl
        tb_next = tb->page_next[n];
1036 5b6dd868 Blue Swirl
        /* NOTE: this is subtle as a TB may span two physical pages */
1037 5b6dd868 Blue Swirl
        if (n == 0) {
1038 5b6dd868 Blue Swirl
            /* NOTE: tb_end may be after the end of the page, but
1039 5b6dd868 Blue Swirl
               it is not a problem */
1040 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1041 5b6dd868 Blue Swirl
            tb_end = tb_start + tb->size;
1042 5b6dd868 Blue Swirl
        } else {
1043 5b6dd868 Blue Swirl
            tb_start = tb->page_addr[1];
1044 5b6dd868 Blue Swirl
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1045 5b6dd868 Blue Swirl
        }
1046 5b6dd868 Blue Swirl
        if (!(tb_end <= start || tb_start >= end)) {
1047 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1048 5b6dd868 Blue Swirl
            if (current_tb_not_found) {
1049 5b6dd868 Blue Swirl
                current_tb_not_found = 0;
1050 5b6dd868 Blue Swirl
                current_tb = NULL;
1051 5b6dd868 Blue Swirl
                if (env->mem_io_pc) {
1052 5b6dd868 Blue Swirl
                    /* now we have a real cpu fault */
1053 5b6dd868 Blue Swirl
                    current_tb = tb_find_pc(env->mem_io_pc);
1054 5b6dd868 Blue Swirl
                }
1055 5b6dd868 Blue Swirl
            }
1056 5b6dd868 Blue Swirl
            if (current_tb == tb &&
1057 5b6dd868 Blue Swirl
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1058 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1059 5b6dd868 Blue Swirl
                its execution. We could be more precise by checking
1060 5b6dd868 Blue Swirl
                that the modification is after the current PC, but it
1061 5b6dd868 Blue Swirl
                would require a specialized function to partially
1062 5b6dd868 Blue Swirl
                restore the CPU state */
1063 5b6dd868 Blue Swirl
1064 5b6dd868 Blue Swirl
                current_tb_modified = 1;
1065 a8a826a3 Blue Swirl
                cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1066 5b6dd868 Blue Swirl
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1067 5b6dd868 Blue Swirl
                                     &current_flags);
1068 5b6dd868 Blue Swirl
            }
1069 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1070 5b6dd868 Blue Swirl
            /* we need to do that to handle the case where a signal
1071 5b6dd868 Blue Swirl
               occurs while doing tb_phys_invalidate() */
1072 5b6dd868 Blue Swirl
            saved_tb = NULL;
1073 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1074 d77953b9 Andreas Fรคrber
                saved_tb = cpu->current_tb;
1075 d77953b9 Andreas Fรคrber
                cpu->current_tb = NULL;
1076 5b6dd868 Blue Swirl
            }
1077 5b6dd868 Blue Swirl
            tb_phys_invalidate(tb, -1);
1078 d77953b9 Andreas Fรคrber
            if (cpu != NULL) {
1079 d77953b9 Andreas Fรคrber
                cpu->current_tb = saved_tb;
1080 c3affe56 Andreas Fรคrber
                if (cpu->interrupt_request && cpu->current_tb) {
1081 c3affe56 Andreas Fรคrber
                    cpu_interrupt(cpu, cpu->interrupt_request);
1082 5b6dd868 Blue Swirl
                }
1083 5b6dd868 Blue Swirl
            }
1084 5b6dd868 Blue Swirl
        }
1085 5b6dd868 Blue Swirl
        tb = tb_next;
1086 5b6dd868 Blue Swirl
    }
1087 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
1088 5b6dd868 Blue Swirl
    /* if no code remaining, no need to continue to use slow writes */
1089 5b6dd868 Blue Swirl
    if (!p->first_tb) {
1090 5b6dd868 Blue Swirl
        invalidate_page_bitmap(p);
1091 5b6dd868 Blue Swirl
        if (is_cpu_write_access) {
1092 5b6dd868 Blue Swirl
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1093 5b6dd868 Blue Swirl
        }
1094 5b6dd868 Blue Swirl
    }
1095 5b6dd868 Blue Swirl
#endif
1096 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1097 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1098 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1099 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1100 5b6dd868 Blue Swirl
           itself */
1101 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1102 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1103 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, NULL);
1104 5b6dd868 Blue Swirl
    }
1105 5b6dd868 Blue Swirl
#endif
1106 5b6dd868 Blue Swirl
}
1107 5b6dd868 Blue Swirl
1108 5b6dd868 Blue Swirl
/* len must be <= 8 and start must be a multiple of len */
1109 5b6dd868 Blue Swirl
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1110 5b6dd868 Blue Swirl
{
1111 5b6dd868 Blue Swirl
    PageDesc *p;
1112 5b6dd868 Blue Swirl
    int offset, b;
1113 5b6dd868 Blue Swirl
1114 5b6dd868 Blue Swirl
#if 0
1115 5b6dd868 Blue Swirl
    if (1) {
1116 5b6dd868 Blue Swirl
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1117 5b6dd868 Blue Swirl
                  cpu_single_env->mem_io_vaddr, len,
1118 5b6dd868 Blue Swirl
                  cpu_single_env->eip,
1119 5b6dd868 Blue Swirl
                  cpu_single_env->eip +
1120 5b6dd868 Blue Swirl
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1121 5b6dd868 Blue Swirl
    }
1122 5b6dd868 Blue Swirl
#endif
1123 5b6dd868 Blue Swirl
    p = page_find(start >> TARGET_PAGE_BITS);
1124 5b6dd868 Blue Swirl
    if (!p) {
1125 5b6dd868 Blue Swirl
        return;
1126 5b6dd868 Blue Swirl
    }
1127 5b6dd868 Blue Swirl
    if (p->code_bitmap) {
1128 5b6dd868 Blue Swirl
        offset = start & ~TARGET_PAGE_MASK;
1129 5b6dd868 Blue Swirl
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1130 5b6dd868 Blue Swirl
        if (b & ((1 << len) - 1)) {
1131 5b6dd868 Blue Swirl
            goto do_invalidate;
1132 5b6dd868 Blue Swirl
        }
1133 5b6dd868 Blue Swirl
    } else {
1134 5b6dd868 Blue Swirl
    do_invalidate:
1135 5b6dd868 Blue Swirl
        tb_invalidate_phys_page_range(start, start + len, 1);
1136 5b6dd868 Blue Swirl
    }
1137 5b6dd868 Blue Swirl
}
1138 5b6dd868 Blue Swirl
1139 5b6dd868 Blue Swirl
#if !defined(CONFIG_SOFTMMU)
1140 5b6dd868 Blue Swirl
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1141 5b6dd868 Blue Swirl
                                    uintptr_t pc, void *puc)
1142 5b6dd868 Blue Swirl
{
1143 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1144 5b6dd868 Blue Swirl
    PageDesc *p;
1145 5b6dd868 Blue Swirl
    int n;
1146 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1147 5b6dd868 Blue Swirl
    TranslationBlock *current_tb = NULL;
1148 5b6dd868 Blue Swirl
    CPUArchState *env = cpu_single_env;
1149 d77953b9 Andreas Fรคrber
    CPUState *cpu = NULL;
1150 5b6dd868 Blue Swirl
    int current_tb_modified = 0;
1151 5b6dd868 Blue Swirl
    target_ulong current_pc = 0;
1152 5b6dd868 Blue Swirl
    target_ulong current_cs_base = 0;
1153 5b6dd868 Blue Swirl
    int current_flags = 0;
1154 5b6dd868 Blue Swirl
#endif
1155 5b6dd868 Blue Swirl
1156 5b6dd868 Blue Swirl
    addr &= TARGET_PAGE_MASK;
1157 5b6dd868 Blue Swirl
    p = page_find(addr >> TARGET_PAGE_BITS);
1158 5b6dd868 Blue Swirl
    if (!p) {
1159 5b6dd868 Blue Swirl
        return;
1160 5b6dd868 Blue Swirl
    }
1161 5b6dd868 Blue Swirl
    tb = p->first_tb;
1162 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1163 5b6dd868 Blue Swirl
    if (tb && pc != 0) {
1164 5b6dd868 Blue Swirl
        current_tb = tb_find_pc(pc);
1165 5b6dd868 Blue Swirl
    }
1166 d77953b9 Andreas Fรคrber
    if (env != NULL) {
1167 d77953b9 Andreas Fรคrber
        cpu = ENV_GET_CPU(env);
1168 d77953b9 Andreas Fรคrber
    }
1169 5b6dd868 Blue Swirl
#endif
1170 5b6dd868 Blue Swirl
    while (tb != NULL) {
1171 5b6dd868 Blue Swirl
        n = (uintptr_t)tb & 3;
1172 5b6dd868 Blue Swirl
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1173 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1174 5b6dd868 Blue Swirl
        if (current_tb == tb &&
1175 5b6dd868 Blue Swirl
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176 5b6dd868 Blue Swirl
                /* If we are modifying the current TB, we must stop
1177 5b6dd868 Blue Swirl
                   its execution. We could be more precise by checking
1178 5b6dd868 Blue Swirl
                   that the modification is after the current PC, but it
1179 5b6dd868 Blue Swirl
                   would require a specialized function to partially
1180 5b6dd868 Blue Swirl
                   restore the CPU state */
1181 5b6dd868 Blue Swirl
1182 5b6dd868 Blue Swirl
            current_tb_modified = 1;
1183 a8a826a3 Blue Swirl
            cpu_restore_state_from_tb(current_tb, env, pc);
1184 5b6dd868 Blue Swirl
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 5b6dd868 Blue Swirl
                                 &current_flags);
1186 5b6dd868 Blue Swirl
        }
1187 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_PRECISE_SMC */
1188 5b6dd868 Blue Swirl
        tb_phys_invalidate(tb, addr);
1189 5b6dd868 Blue Swirl
        tb = tb->page_next[n];
1190 5b6dd868 Blue Swirl
    }
1191 5b6dd868 Blue Swirl
    p->first_tb = NULL;
1192 5b6dd868 Blue Swirl
#ifdef TARGET_HAS_PRECISE_SMC
1193 5b6dd868 Blue Swirl
    if (current_tb_modified) {
1194 5b6dd868 Blue Swirl
        /* we generate a block containing just the instruction
1195 5b6dd868 Blue Swirl
           modifying the memory. It will ensure that it cannot modify
1196 5b6dd868 Blue Swirl
           itself */
1197 d77953b9 Andreas Fรคrber
        cpu->current_tb = NULL;
1198 5b6dd868 Blue Swirl
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199 5b6dd868 Blue Swirl
        cpu_resume_from_signal(env, puc);
1200 5b6dd868 Blue Swirl
    }
1201 5b6dd868 Blue Swirl
#endif
1202 5b6dd868 Blue Swirl
}
1203 5b6dd868 Blue Swirl
#endif
1204 5b6dd868 Blue Swirl
1205 5b6dd868 Blue Swirl
/* add the tb in the target page and protect it if necessary */
1206 5b6dd868 Blue Swirl
static inline void tb_alloc_page(TranslationBlock *tb,
1207 5b6dd868 Blue Swirl
                                 unsigned int n, tb_page_addr_t page_addr)
1208 5b6dd868 Blue Swirl
{
1209 5b6dd868 Blue Swirl
    PageDesc *p;
1210 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1211 5b6dd868 Blue Swirl
    bool page_already_protected;
1212 5b6dd868 Blue Swirl
#endif
1213 5b6dd868 Blue Swirl
1214 5b6dd868 Blue Swirl
    tb->page_addr[n] = page_addr;
1215 5b6dd868 Blue Swirl
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1216 5b6dd868 Blue Swirl
    tb->page_next[n] = p->first_tb;
1217 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1218 5b6dd868 Blue Swirl
    page_already_protected = p->first_tb != NULL;
1219 5b6dd868 Blue Swirl
#endif
1220 5b6dd868 Blue Swirl
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1221 5b6dd868 Blue Swirl
    invalidate_page_bitmap(p);
1222 5b6dd868 Blue Swirl
1223 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_SMC) || 1
1224 5b6dd868 Blue Swirl
1225 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
1226 5b6dd868 Blue Swirl
    if (p->flags & PAGE_WRITE) {
1227 5b6dd868 Blue Swirl
        target_ulong addr;
1228 5b6dd868 Blue Swirl
        PageDesc *p2;
1229 5b6dd868 Blue Swirl
        int prot;
1230 5b6dd868 Blue Swirl
1231 5b6dd868 Blue Swirl
        /* force the host page as non writable (writes will have a
1232 5b6dd868 Blue Swirl
           page fault + mprotect overhead) */
1233 5b6dd868 Blue Swirl
        page_addr &= qemu_host_page_mask;
1234 5b6dd868 Blue Swirl
        prot = 0;
1235 5b6dd868 Blue Swirl
        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1236 5b6dd868 Blue Swirl
            addr += TARGET_PAGE_SIZE) {
1237 5b6dd868 Blue Swirl
1238 5b6dd868 Blue Swirl
            p2 = page_find(addr >> TARGET_PAGE_BITS);
1239 5b6dd868 Blue Swirl
            if (!p2) {
1240 5b6dd868 Blue Swirl
                continue;
1241 5b6dd868 Blue Swirl
            }
1242 5b6dd868 Blue Swirl
            prot |= p2->flags;
1243 5b6dd868 Blue Swirl
            p2->flags &= ~PAGE_WRITE;
1244 5b6dd868 Blue Swirl
          }
1245 5b6dd868 Blue Swirl
        mprotect(g2h(page_addr), qemu_host_page_size,
1246 5b6dd868 Blue Swirl
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1247 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_INVALIDATE
1248 5b6dd868 Blue Swirl
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1249 5b6dd868 Blue Swirl
               page_addr);
1250 5b6dd868 Blue Swirl
#endif
1251 5b6dd868 Blue Swirl
    }
1252 5b6dd868 Blue Swirl
#else
1253 5b6dd868 Blue Swirl
    /* if some code is already present, then the pages are already
1254 5b6dd868 Blue Swirl
       protected. So we handle the case where only the first TB is
1255 5b6dd868 Blue Swirl
       allocated in a physical page */
1256 5b6dd868 Blue Swirl
    if (!page_already_protected) {
1257 5b6dd868 Blue Swirl
        tlb_protect_code(page_addr);
1258 5b6dd868 Blue Swirl
    }
1259 5b6dd868 Blue Swirl
#endif
1260 5b6dd868 Blue Swirl
1261 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_SMC */
1262 5b6dd868 Blue Swirl
}
1263 5b6dd868 Blue Swirl
1264 5b6dd868 Blue Swirl
/* add a new TB and link it to the physical page tables. phys_page2 is
1265 5b6dd868 Blue Swirl
   (-1) to indicate that only one page contains the TB. */
1266 5b6dd868 Blue Swirl
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1267 5b6dd868 Blue Swirl
                         tb_page_addr_t phys_page2)
1268 5b6dd868 Blue Swirl
{
1269 5b6dd868 Blue Swirl
    unsigned int h;
1270 5b6dd868 Blue Swirl
    TranslationBlock **ptb;
1271 5b6dd868 Blue Swirl
1272 5b6dd868 Blue Swirl
    /* Grab the mmap lock to stop another thread invalidating this TB
1273 5b6dd868 Blue Swirl
       before we are done.  */
1274 5b6dd868 Blue Swirl
    mmap_lock();
1275 5b6dd868 Blue Swirl
    /* add in the physical hash table */
1276 5b6dd868 Blue Swirl
    h = tb_phys_hash_func(phys_pc);
1277 5e5f07e0 Evgeny Voevodin
    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1278 5b6dd868 Blue Swirl
    tb->phys_hash_next = *ptb;
1279 5b6dd868 Blue Swirl
    *ptb = tb;
1280 5b6dd868 Blue Swirl
1281 5b6dd868 Blue Swirl
    /* add in the page list */
1282 5b6dd868 Blue Swirl
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1283 5b6dd868 Blue Swirl
    if (phys_page2 != -1) {
1284 5b6dd868 Blue Swirl
        tb_alloc_page(tb, 1, phys_page2);
1285 5b6dd868 Blue Swirl
    } else {
1286 5b6dd868 Blue Swirl
        tb->page_addr[1] = -1;
1287 5b6dd868 Blue Swirl
    }
1288 5b6dd868 Blue Swirl
1289 5b6dd868 Blue Swirl
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1290 5b6dd868 Blue Swirl
    tb->jmp_next[0] = NULL;
1291 5b6dd868 Blue Swirl
    tb->jmp_next[1] = NULL;
1292 5b6dd868 Blue Swirl
1293 5b6dd868 Blue Swirl
    /* init original jump addresses */
1294 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[0] != 0xffff) {
1295 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 0);
1296 5b6dd868 Blue Swirl
    }
1297 5b6dd868 Blue Swirl
    if (tb->tb_next_offset[1] != 0xffff) {
1298 5b6dd868 Blue Swirl
        tb_reset_jump(tb, 1);
1299 5b6dd868 Blue Swirl
    }
1300 5b6dd868 Blue Swirl
1301 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1302 5b6dd868 Blue Swirl
    tb_page_check();
1303 5b6dd868 Blue Swirl
#endif
1304 5b6dd868 Blue Swirl
    mmap_unlock();
1305 5b6dd868 Blue Swirl
}
1306 5b6dd868 Blue Swirl
1307 5b6dd868 Blue Swirl
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1308 5b6dd868 Blue Swirl
/* check whether the given addr is in TCG generated code buffer or not */
1309 5b6dd868 Blue Swirl
bool is_tcg_gen_code(uintptr_t tc_ptr)
1310 5b6dd868 Blue Swirl
{
1311 52ae646d Yeongkyoon Lee
    /* This can be called during code generation, code_gen_buffer_size
1312 5b6dd868 Blue Swirl
       is used instead of code_gen_ptr for upper boundary checking */
1313 0b0d3320 Evgeny Voevodin
    return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
1314 0b0d3320 Evgeny Voevodin
            tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
1315 52ae646d Yeongkyoon Lee
                    tcg_ctx.code_gen_buffer_size));
1316 5b6dd868 Blue Swirl
}
1317 5b6dd868 Blue Swirl
#endif
1318 5b6dd868 Blue Swirl
1319 5b6dd868 Blue Swirl
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1320 5b6dd868 Blue Swirl
   tb[1].tc_ptr. Return NULL if not found */
1321 a8a826a3 Blue Swirl
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1322 5b6dd868 Blue Swirl
{
1323 5b6dd868 Blue Swirl
    int m_min, m_max, m;
1324 5b6dd868 Blue Swirl
    uintptr_t v;
1325 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1326 5b6dd868 Blue Swirl
1327 5e5f07e0 Evgeny Voevodin
    if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1328 5b6dd868 Blue Swirl
        return NULL;
1329 5b6dd868 Blue Swirl
    }
1330 0b0d3320 Evgeny Voevodin
    if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1331 0b0d3320 Evgeny Voevodin
        tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1332 5b6dd868 Blue Swirl
        return NULL;
1333 5b6dd868 Blue Swirl
    }
1334 5b6dd868 Blue Swirl
    /* binary search (cf Knuth) */
1335 5b6dd868 Blue Swirl
    m_min = 0;
1336 5e5f07e0 Evgeny Voevodin
    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1337 5b6dd868 Blue Swirl
    while (m_min <= m_max) {
1338 5b6dd868 Blue Swirl
        m = (m_min + m_max) >> 1;
1339 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[m];
1340 5b6dd868 Blue Swirl
        v = (uintptr_t)tb->tc_ptr;
1341 5b6dd868 Blue Swirl
        if (v == tc_ptr) {
1342 5b6dd868 Blue Swirl
            return tb;
1343 5b6dd868 Blue Swirl
        } else if (tc_ptr < v) {
1344 5b6dd868 Blue Swirl
            m_max = m - 1;
1345 5b6dd868 Blue Swirl
        } else {
1346 5b6dd868 Blue Swirl
            m_min = m + 1;
1347 5b6dd868 Blue Swirl
        }
1348 5b6dd868 Blue Swirl
    }
1349 5e5f07e0 Evgeny Voevodin
    return &tcg_ctx.tb_ctx.tbs[m_max];
1350 5b6dd868 Blue Swirl
}
1351 5b6dd868 Blue Swirl
1352 5b6dd868 Blue Swirl
#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1353 5b6dd868 Blue Swirl
void tb_invalidate_phys_addr(hwaddr addr)
1354 5b6dd868 Blue Swirl
{
1355 5b6dd868 Blue Swirl
    ram_addr_t ram_addr;
1356 5b6dd868 Blue Swirl
    MemoryRegionSection *section;
1357 5b6dd868 Blue Swirl
1358 5b6dd868 Blue Swirl
    section = phys_page_find(address_space_memory.dispatch,
1359 5b6dd868 Blue Swirl
                             addr >> TARGET_PAGE_BITS);
1360 5b6dd868 Blue Swirl
    if (!(memory_region_is_ram(section->mr)
1361 5b6dd868 Blue Swirl
          || (section->mr->rom_device && section->mr->readable))) {
1362 5b6dd868 Blue Swirl
        return;
1363 5b6dd868 Blue Swirl
    }
1364 5b6dd868 Blue Swirl
    ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1365 5b6dd868 Blue Swirl
        + memory_region_section_addr(section, addr);
1366 5b6dd868 Blue Swirl
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1367 5b6dd868 Blue Swirl
}
1368 5b6dd868 Blue Swirl
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1369 5b6dd868 Blue Swirl
1370 5b6dd868 Blue Swirl
void tb_check_watchpoint(CPUArchState *env)
1371 5b6dd868 Blue Swirl
{
1372 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1373 5b6dd868 Blue Swirl
1374 5b6dd868 Blue Swirl
    tb = tb_find_pc(env->mem_io_pc);
1375 5b6dd868 Blue Swirl
    if (!tb) {
1376 5b6dd868 Blue Swirl
        cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1377 5b6dd868 Blue Swirl
                  (void *)env->mem_io_pc);
1378 5b6dd868 Blue Swirl
    }
1379 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1380 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1381 5b6dd868 Blue Swirl
}
1382 5b6dd868 Blue Swirl
1383 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
1384 5b6dd868 Blue Swirl
/* mask must never be zero, except for A20 change call */
1385 c3affe56 Andreas Fรคrber
static void tcg_handle_interrupt(CPUState *cpu, int mask)
1386 5b6dd868 Blue Swirl
{
1387 c3affe56 Andreas Fรคrber
    CPUArchState *env = cpu->env_ptr;
1388 5b6dd868 Blue Swirl
    int old_mask;
1389 5b6dd868 Blue Swirl
1390 259186a7 Andreas Fรคrber
    old_mask = cpu->interrupt_request;
1391 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1392 5b6dd868 Blue Swirl
1393 5b6dd868 Blue Swirl
    /*
1394 5b6dd868 Blue Swirl
     * If called from iothread context, wake the target cpu in
1395 5b6dd868 Blue Swirl
     * case its halted.
1396 5b6dd868 Blue Swirl
     */
1397 5b6dd868 Blue Swirl
    if (!qemu_cpu_is_self(cpu)) {
1398 5b6dd868 Blue Swirl
        qemu_cpu_kick(cpu);
1399 5b6dd868 Blue Swirl
        return;
1400 5b6dd868 Blue Swirl
    }
1401 5b6dd868 Blue Swirl
1402 5b6dd868 Blue Swirl
    if (use_icount) {
1403 5b6dd868 Blue Swirl
        env->icount_decr.u16.high = 0xffff;
1404 5b6dd868 Blue Swirl
        if (!can_do_io(env)
1405 5b6dd868 Blue Swirl
            && (mask & ~old_mask) != 0) {
1406 5b6dd868 Blue Swirl
            cpu_abort(env, "Raised interrupt while not in I/O function");
1407 5b6dd868 Blue Swirl
        }
1408 5b6dd868 Blue Swirl
    } else {
1409 378df4b2 Peter Maydell
        cpu->tcg_exit_req = 1;
1410 5b6dd868 Blue Swirl
    }
1411 5b6dd868 Blue Swirl
}
1412 5b6dd868 Blue Swirl
1413 5b6dd868 Blue Swirl
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1414 5b6dd868 Blue Swirl
1415 5b6dd868 Blue Swirl
/* in deterministic execution mode, instructions doing device I/Os
1416 5b6dd868 Blue Swirl
   must be at the end of the TB */
1417 5b6dd868 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1418 5b6dd868 Blue Swirl
{
1419 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1420 5b6dd868 Blue Swirl
    uint32_t n, cflags;
1421 5b6dd868 Blue Swirl
    target_ulong pc, cs_base;
1422 5b6dd868 Blue Swirl
    uint64_t flags;
1423 5b6dd868 Blue Swirl
1424 5b6dd868 Blue Swirl
    tb = tb_find_pc(retaddr);
1425 5b6dd868 Blue Swirl
    if (!tb) {
1426 5b6dd868 Blue Swirl
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1427 5b6dd868 Blue Swirl
                  (void *)retaddr);
1428 5b6dd868 Blue Swirl
    }
1429 5b6dd868 Blue Swirl
    n = env->icount_decr.u16.low + tb->icount;
1430 a8a826a3 Blue Swirl
    cpu_restore_state_from_tb(tb, env, retaddr);
1431 5b6dd868 Blue Swirl
    /* Calculate how many instructions had been executed before the fault
1432 5b6dd868 Blue Swirl
       occurred.  */
1433 5b6dd868 Blue Swirl
    n = n - env->icount_decr.u16.low;
1434 5b6dd868 Blue Swirl
    /* Generate a new TB ending on the I/O insn.  */
1435 5b6dd868 Blue Swirl
    n++;
1436 5b6dd868 Blue Swirl
    /* On MIPS and SH, delay slot instructions can only be restarted if
1437 5b6dd868 Blue Swirl
       they were already the first instruction in the TB.  If this is not
1438 5b6dd868 Blue Swirl
       the first instruction in a TB then re-execute the preceding
1439 5b6dd868 Blue Swirl
       branch.  */
1440 5b6dd868 Blue Swirl
#if defined(TARGET_MIPS)
1441 5b6dd868 Blue Swirl
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1442 5b6dd868 Blue Swirl
        env->active_tc.PC -= 4;
1443 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1444 5b6dd868 Blue Swirl
        env->hflags &= ~MIPS_HFLAG_BMASK;
1445 5b6dd868 Blue Swirl
    }
1446 5b6dd868 Blue Swirl
#elif defined(TARGET_SH4)
1447 5b6dd868 Blue Swirl
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1448 5b6dd868 Blue Swirl
            && n > 1) {
1449 5b6dd868 Blue Swirl
        env->pc -= 2;
1450 5b6dd868 Blue Swirl
        env->icount_decr.u16.low++;
1451 5b6dd868 Blue Swirl
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1452 5b6dd868 Blue Swirl
    }
1453 5b6dd868 Blue Swirl
#endif
1454 5b6dd868 Blue Swirl
    /* This should never happen.  */
1455 5b6dd868 Blue Swirl
    if (n > CF_COUNT_MASK) {
1456 5b6dd868 Blue Swirl
        cpu_abort(env, "TB too big during recompile");
1457 5b6dd868 Blue Swirl
    }
1458 5b6dd868 Blue Swirl
1459 5b6dd868 Blue Swirl
    cflags = n | CF_LAST_IO;
1460 5b6dd868 Blue Swirl
    pc = tb->pc;
1461 5b6dd868 Blue Swirl
    cs_base = tb->cs_base;
1462 5b6dd868 Blue Swirl
    flags = tb->flags;
1463 5b6dd868 Blue Swirl
    tb_phys_invalidate(tb, -1);
1464 5b6dd868 Blue Swirl
    /* FIXME: In theory this could raise an exception.  In practice
1465 5b6dd868 Blue Swirl
       we have already translated the block once so it's probably ok.  */
1466 5b6dd868 Blue Swirl
    tb_gen_code(env, pc, cs_base, flags, cflags);
1467 5b6dd868 Blue Swirl
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1468 5b6dd868 Blue Swirl
       the first in the TB) then we end up generating a whole new TB and
1469 5b6dd868 Blue Swirl
       repeating the fault, which is horribly inefficient.
1470 5b6dd868 Blue Swirl
       Better would be to execute just this insn uncached, or generate a
1471 5b6dd868 Blue Swirl
       second new TB.  */
1472 5b6dd868 Blue Swirl
    cpu_resume_from_signal(env, NULL);
1473 5b6dd868 Blue Swirl
}
1474 5b6dd868 Blue Swirl
1475 5b6dd868 Blue Swirl
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1476 5b6dd868 Blue Swirl
{
1477 5b6dd868 Blue Swirl
    unsigned int i;
1478 5b6dd868 Blue Swirl
1479 5b6dd868 Blue Swirl
    /* Discard jump cache entries for any tb which might potentially
1480 5b6dd868 Blue Swirl
       overlap the flushed page.  */
1481 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1482 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1483 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1484 5b6dd868 Blue Swirl
1485 5b6dd868 Blue Swirl
    i = tb_jmp_cache_hash_page(addr);
1486 5b6dd868 Blue Swirl
    memset(&env->tb_jmp_cache[i], 0,
1487 5b6dd868 Blue Swirl
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1488 5b6dd868 Blue Swirl
}
1489 5b6dd868 Blue Swirl
1490 5b6dd868 Blue Swirl
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1491 5b6dd868 Blue Swirl
{
1492 5b6dd868 Blue Swirl
    int i, target_code_size, max_target_code_size;
1493 5b6dd868 Blue Swirl
    int direct_jmp_count, direct_jmp2_count, cross_page;
1494 5b6dd868 Blue Swirl
    TranslationBlock *tb;
1495 5b6dd868 Blue Swirl
1496 5b6dd868 Blue Swirl
    target_code_size = 0;
1497 5b6dd868 Blue Swirl
    max_target_code_size = 0;
1498 5b6dd868 Blue Swirl
    cross_page = 0;
1499 5b6dd868 Blue Swirl
    direct_jmp_count = 0;
1500 5b6dd868 Blue Swirl
    direct_jmp2_count = 0;
1501 5e5f07e0 Evgeny Voevodin
    for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1502 5e5f07e0 Evgeny Voevodin
        tb = &tcg_ctx.tb_ctx.tbs[i];
1503 5b6dd868 Blue Swirl
        target_code_size += tb->size;
1504 5b6dd868 Blue Swirl
        if (tb->size > max_target_code_size) {
1505 5b6dd868 Blue Swirl
            max_target_code_size = tb->size;
1506 5b6dd868 Blue Swirl
        }
1507 5b6dd868 Blue Swirl
        if (tb->page_addr[1] != -1) {
1508 5b6dd868 Blue Swirl
            cross_page++;
1509 5b6dd868 Blue Swirl
        }
1510 5b6dd868 Blue Swirl
        if (tb->tb_next_offset[0] != 0xffff) {
1511 5b6dd868 Blue Swirl
            direct_jmp_count++;
1512 5b6dd868 Blue Swirl
            if (tb->tb_next_offset[1] != 0xffff) {
1513 5b6dd868 Blue Swirl
                direct_jmp2_count++;
1514 5b6dd868 Blue Swirl
            }
1515 5b6dd868 Blue Swirl
        }
1516 5b6dd868 Blue Swirl
    }
1517 5b6dd868 Blue Swirl
    /* XXX: avoid using doubles ? */
1518 5b6dd868 Blue Swirl
    cpu_fprintf(f, "Translation buffer state:\n");
1519 5b6dd868 Blue Swirl
    cpu_fprintf(f, "gen code size       %td/%zd\n",
1520 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1521 0b0d3320 Evgeny Voevodin
                tcg_ctx.code_gen_buffer_max_size);
1522 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB count            %d/%d\n",
1523 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1524 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1525 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1526 5e5f07e0 Evgeny Voevodin
                    tcg_ctx.tb_ctx.nb_tbs : 0,
1527 5e5f07e0 Evgeny Voevodin
            max_target_code_size);
1528 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1529 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1530 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.code_gen_buffer) /
1531 5e5f07e0 Evgeny Voevodin
                                     tcg_ctx.tb_ctx.nb_tbs : 0,
1532 5e5f07e0 Evgeny Voevodin
                target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1533 5e5f07e0 Evgeny Voevodin
                                             tcg_ctx.code_gen_buffer) /
1534 5e5f07e0 Evgeny Voevodin
                                             target_code_size : 0);
1535 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1536 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1537 5e5f07e0 Evgeny Voevodin
                                    tcg_ctx.tb_ctx.nb_tbs : 0);
1538 5b6dd868 Blue Swirl
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1539 5b6dd868 Blue Swirl
                direct_jmp_count,
1540 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1541 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0,
1542 5b6dd868 Blue Swirl
                direct_jmp2_count,
1543 5e5f07e0 Evgeny Voevodin
                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1544 5e5f07e0 Evgeny Voevodin
                        tcg_ctx.tb_ctx.nb_tbs : 0);
1545 5b6dd868 Blue Swirl
    cpu_fprintf(f, "\nStatistics:\n");
1546 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1547 5e5f07e0 Evgeny Voevodin
    cpu_fprintf(f, "TB invalidate count %d\n",
1548 5e5f07e0 Evgeny Voevodin
            tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1549 5b6dd868 Blue Swirl
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1550 5b6dd868 Blue Swirl
    tcg_dump_info(f, cpu_fprintf);
1551 5b6dd868 Blue Swirl
}
1552 5b6dd868 Blue Swirl
1553 5b6dd868 Blue Swirl
#else /* CONFIG_USER_ONLY */
1554 5b6dd868 Blue Swirl
1555 c3affe56 Andreas Fรคrber
void cpu_interrupt(CPUState *cpu, int mask)
1556 5b6dd868 Blue Swirl
{
1557 259186a7 Andreas Fรคrber
    cpu->interrupt_request |= mask;
1558 378df4b2 Peter Maydell
    cpu->tcg_exit_req = 1;
1559 5b6dd868 Blue Swirl
}
1560 5b6dd868 Blue Swirl
1561 5b6dd868 Blue Swirl
/*
1562 5b6dd868 Blue Swirl
 * Walks guest process memory "regions" one by one
1563 5b6dd868 Blue Swirl
 * and calls callback function 'fn' for each region.
1564 5b6dd868 Blue Swirl
 */
1565 5b6dd868 Blue Swirl
struct walk_memory_regions_data {
1566 5b6dd868 Blue Swirl
    walk_memory_regions_fn fn;
1567 5b6dd868 Blue Swirl
    void *priv;
1568 5b6dd868 Blue Swirl
    uintptr_t start;
1569 5b6dd868 Blue Swirl
    int prot;
1570 5b6dd868 Blue Swirl
};
1571 5b6dd868 Blue Swirl
1572 5b6dd868 Blue Swirl
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1573 5b6dd868 Blue Swirl
                                   abi_ulong end, int new_prot)
1574 5b6dd868 Blue Swirl
{
1575 5b6dd868 Blue Swirl
    if (data->start != -1ul) {
1576 5b6dd868 Blue Swirl
        int rc = data->fn(data->priv, data->start, end, data->prot);
1577 5b6dd868 Blue Swirl
        if (rc != 0) {
1578 5b6dd868 Blue Swirl
            return rc;
1579 5b6dd868 Blue Swirl
        }
1580 5b6dd868 Blue Swirl
    }
1581 5b6dd868 Blue Swirl
1582 5b6dd868 Blue Swirl
    data->start = (new_prot ? end : -1ul);
1583 5b6dd868 Blue Swirl
    data->prot = new_prot;
1584 5b6dd868 Blue Swirl
1585 5b6dd868 Blue Swirl
    return 0;
1586 5b6dd868 Blue Swirl
}
1587 5b6dd868 Blue Swirl
1588 5b6dd868 Blue Swirl
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1589 5b6dd868 Blue Swirl
                                 abi_ulong base, int level, void **lp)
1590 5b6dd868 Blue Swirl
{
1591 5b6dd868 Blue Swirl
    abi_ulong pa;
1592 5b6dd868 Blue Swirl
    int i, rc;
1593 5b6dd868 Blue Swirl
1594 5b6dd868 Blue Swirl
    if (*lp == NULL) {
1595 5b6dd868 Blue Swirl
        return walk_memory_regions_end(data, base, 0);
1596 5b6dd868 Blue Swirl
    }
1597 5b6dd868 Blue Swirl
1598 5b6dd868 Blue Swirl
    if (level == 0) {
1599 5b6dd868 Blue Swirl
        PageDesc *pd = *lp;
1600 5b6dd868 Blue Swirl
1601 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
1602 5b6dd868 Blue Swirl
            int prot = pd[i].flags;
1603 5b6dd868 Blue Swirl
1604 5b6dd868 Blue Swirl
            pa = base | (i << TARGET_PAGE_BITS);
1605 5b6dd868 Blue Swirl
            if (prot != data->prot) {
1606 5b6dd868 Blue Swirl
                rc = walk_memory_regions_end(data, pa, prot);
1607 5b6dd868 Blue Swirl
                if (rc != 0) {
1608 5b6dd868 Blue Swirl
                    return rc;
1609 5b6dd868 Blue Swirl
                }
1610 5b6dd868 Blue Swirl
            }
1611 5b6dd868 Blue Swirl
        }
1612 5b6dd868 Blue Swirl
    } else {
1613 5b6dd868 Blue Swirl
        void **pp = *lp;
1614 5b6dd868 Blue Swirl
1615 5b6dd868 Blue Swirl
        for (i = 0; i < L2_SIZE; ++i) {
1616 5b6dd868 Blue Swirl
            pa = base | ((abi_ulong)i <<
1617 5b6dd868 Blue Swirl
                (TARGET_PAGE_BITS + L2_BITS * level));
1618 5b6dd868 Blue Swirl
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1619 5b6dd868 Blue Swirl
            if (rc != 0) {
1620 5b6dd868 Blue Swirl
                return rc;
1621 5b6dd868 Blue Swirl
            }
1622 5b6dd868 Blue Swirl
        }
1623 5b6dd868 Blue Swirl
    }
1624 5b6dd868 Blue Swirl
1625 5b6dd868 Blue Swirl
    return 0;
1626 5b6dd868 Blue Swirl
}
1627 5b6dd868 Blue Swirl
1628 5b6dd868 Blue Swirl
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1629 5b6dd868 Blue Swirl
{
1630 5b6dd868 Blue Swirl
    struct walk_memory_regions_data data;
1631 5b6dd868 Blue Swirl
    uintptr_t i;
1632 5b6dd868 Blue Swirl
1633 5b6dd868 Blue Swirl
    data.fn = fn;
1634 5b6dd868 Blue Swirl
    data.priv = priv;
1635 5b6dd868 Blue Swirl
    data.start = -1ul;
1636 5b6dd868 Blue Swirl
    data.prot = 0;
1637 5b6dd868 Blue Swirl
1638 5b6dd868 Blue Swirl
    for (i = 0; i < V_L1_SIZE; i++) {
1639 5b6dd868 Blue Swirl
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1640 5b6dd868 Blue Swirl
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1641 5b6dd868 Blue Swirl
1642 5b6dd868 Blue Swirl
        if (rc != 0) {
1643 5b6dd868 Blue Swirl
            return rc;
1644 5b6dd868 Blue Swirl
        }
1645 5b6dd868 Blue Swirl
    }
1646 5b6dd868 Blue Swirl
1647 5b6dd868 Blue Swirl
    return walk_memory_regions_end(&data, 0, 0);
1648 5b6dd868 Blue Swirl
}
1649 5b6dd868 Blue Swirl
1650 5b6dd868 Blue Swirl
static int dump_region(void *priv, abi_ulong start,
1651 5b6dd868 Blue Swirl
    abi_ulong end, unsigned long prot)
1652 5b6dd868 Blue Swirl
{
1653 5b6dd868 Blue Swirl
    FILE *f = (FILE *)priv;
1654 5b6dd868 Blue Swirl
1655 5b6dd868 Blue Swirl
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1656 5b6dd868 Blue Swirl
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
1657 5b6dd868 Blue Swirl
        start, end, end - start,
1658 5b6dd868 Blue Swirl
        ((prot & PAGE_READ) ? 'r' : '-'),
1659 5b6dd868 Blue Swirl
        ((prot & PAGE_WRITE) ? 'w' : '-'),
1660 5b6dd868 Blue Swirl
        ((prot & PAGE_EXEC) ? 'x' : '-'));
1661 5b6dd868 Blue Swirl
1662 5b6dd868 Blue Swirl
    return 0;
1663 5b6dd868 Blue Swirl
}
1664 5b6dd868 Blue Swirl
1665 5b6dd868 Blue Swirl
/* dump memory mappings */
1666 5b6dd868 Blue Swirl
void page_dump(FILE *f)
1667 5b6dd868 Blue Swirl
{
1668 5b6dd868 Blue Swirl
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
1669 5b6dd868 Blue Swirl
            "start", "end", "size", "prot");
1670 5b6dd868 Blue Swirl
    walk_memory_regions(f, dump_region);
1671 5b6dd868 Blue Swirl
}
1672 5b6dd868 Blue Swirl
1673 5b6dd868 Blue Swirl
int page_get_flags(target_ulong address)
1674 5b6dd868 Blue Swirl
{
1675 5b6dd868 Blue Swirl
    PageDesc *p;
1676 5b6dd868 Blue Swirl
1677 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1678 5b6dd868 Blue Swirl
    if (!p) {
1679 5b6dd868 Blue Swirl
        return 0;
1680 5b6dd868 Blue Swirl
    }
1681 5b6dd868 Blue Swirl
    return p->flags;
1682 5b6dd868 Blue Swirl
}
1683 5b6dd868 Blue Swirl
1684 5b6dd868 Blue Swirl
/* Modify the flags of a page and invalidate the code if necessary.
1685 5b6dd868 Blue Swirl
   The flag PAGE_WRITE_ORG is positioned automatically depending
1686 5b6dd868 Blue Swirl
   on PAGE_WRITE.  The mmap_lock should already be held.  */
1687 5b6dd868 Blue Swirl
void page_set_flags(target_ulong start, target_ulong end, int flags)
1688 5b6dd868 Blue Swirl
{
1689 5b6dd868 Blue Swirl
    target_ulong addr, len;
1690 5b6dd868 Blue Swirl
1691 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1692 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1693 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1694 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1695 5b6dd868 Blue Swirl
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1696 5b6dd868 Blue Swirl
#endif
1697 5b6dd868 Blue Swirl
    assert(start < end);
1698 5b6dd868 Blue Swirl
1699 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1700 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(end);
1701 5b6dd868 Blue Swirl
1702 5b6dd868 Blue Swirl
    if (flags & PAGE_WRITE) {
1703 5b6dd868 Blue Swirl
        flags |= PAGE_WRITE_ORG;
1704 5b6dd868 Blue Swirl
    }
1705 5b6dd868 Blue Swirl
1706 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1707 5b6dd868 Blue Swirl
         len != 0;
1708 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1709 5b6dd868 Blue Swirl
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1710 5b6dd868 Blue Swirl
1711 5b6dd868 Blue Swirl
        /* If the write protection bit is set, then we invalidate
1712 5b6dd868 Blue Swirl
           the code inside.  */
1713 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_WRITE) &&
1714 5b6dd868 Blue Swirl
            (flags & PAGE_WRITE) &&
1715 5b6dd868 Blue Swirl
            p->first_tb) {
1716 5b6dd868 Blue Swirl
            tb_invalidate_phys_page(addr, 0, NULL);
1717 5b6dd868 Blue Swirl
        }
1718 5b6dd868 Blue Swirl
        p->flags = flags;
1719 5b6dd868 Blue Swirl
    }
1720 5b6dd868 Blue Swirl
}
1721 5b6dd868 Blue Swirl
1722 5b6dd868 Blue Swirl
int page_check_range(target_ulong start, target_ulong len, int flags)
1723 5b6dd868 Blue Swirl
{
1724 5b6dd868 Blue Swirl
    PageDesc *p;
1725 5b6dd868 Blue Swirl
    target_ulong end;
1726 5b6dd868 Blue Swirl
    target_ulong addr;
1727 5b6dd868 Blue Swirl
1728 5b6dd868 Blue Swirl
    /* This function should never be called with addresses outside the
1729 5b6dd868 Blue Swirl
       guest address space.  If this assert fires, it probably indicates
1730 5b6dd868 Blue Swirl
       a missing call to h2g_valid.  */
1731 5b6dd868 Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1732 5b6dd868 Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1733 5b6dd868 Blue Swirl
#endif
1734 5b6dd868 Blue Swirl
1735 5b6dd868 Blue Swirl
    if (len == 0) {
1736 5b6dd868 Blue Swirl
        return 0;
1737 5b6dd868 Blue Swirl
    }
1738 5b6dd868 Blue Swirl
    if (start + len - 1 < start) {
1739 5b6dd868 Blue Swirl
        /* We've wrapped around.  */
1740 5b6dd868 Blue Swirl
        return -1;
1741 5b6dd868 Blue Swirl
    }
1742 5b6dd868 Blue Swirl
1743 5b6dd868 Blue Swirl
    /* must do before we loose bits in the next step */
1744 5b6dd868 Blue Swirl
    end = TARGET_PAGE_ALIGN(start + len);
1745 5b6dd868 Blue Swirl
    start = start & TARGET_PAGE_MASK;
1746 5b6dd868 Blue Swirl
1747 5b6dd868 Blue Swirl
    for (addr = start, len = end - start;
1748 5b6dd868 Blue Swirl
         len != 0;
1749 5b6dd868 Blue Swirl
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1750 5b6dd868 Blue Swirl
        p = page_find(addr >> TARGET_PAGE_BITS);
1751 5b6dd868 Blue Swirl
        if (!p) {
1752 5b6dd868 Blue Swirl
            return -1;
1753 5b6dd868 Blue Swirl
        }
1754 5b6dd868 Blue Swirl
        if (!(p->flags & PAGE_VALID)) {
1755 5b6dd868 Blue Swirl
            return -1;
1756 5b6dd868 Blue Swirl
        }
1757 5b6dd868 Blue Swirl
1758 5b6dd868 Blue Swirl
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1759 5b6dd868 Blue Swirl
            return -1;
1760 5b6dd868 Blue Swirl
        }
1761 5b6dd868 Blue Swirl
        if (flags & PAGE_WRITE) {
1762 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE_ORG)) {
1763 5b6dd868 Blue Swirl
                return -1;
1764 5b6dd868 Blue Swirl
            }
1765 5b6dd868 Blue Swirl
            /* unprotect the page if it was put read-only because it
1766 5b6dd868 Blue Swirl
               contains translated code */
1767 5b6dd868 Blue Swirl
            if (!(p->flags & PAGE_WRITE)) {
1768 5b6dd868 Blue Swirl
                if (!page_unprotect(addr, 0, NULL)) {
1769 5b6dd868 Blue Swirl
                    return -1;
1770 5b6dd868 Blue Swirl
                }
1771 5b6dd868 Blue Swirl
            }
1772 5b6dd868 Blue Swirl
            return 0;
1773 5b6dd868 Blue Swirl
        }
1774 5b6dd868 Blue Swirl
    }
1775 5b6dd868 Blue Swirl
    return 0;
1776 5b6dd868 Blue Swirl
}
1777 5b6dd868 Blue Swirl
1778 5b6dd868 Blue Swirl
/* called from signal handler: invalidate the code and unprotect the
1779 5b6dd868 Blue Swirl
   page. Return TRUE if the fault was successfully handled. */
1780 5b6dd868 Blue Swirl
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1781 5b6dd868 Blue Swirl
{
1782 5b6dd868 Blue Swirl
    unsigned int prot;
1783 5b6dd868 Blue Swirl
    PageDesc *p;
1784 5b6dd868 Blue Swirl
    target_ulong host_start, host_end, addr;
1785 5b6dd868 Blue Swirl
1786 5b6dd868 Blue Swirl
    /* Technically this isn't safe inside a signal handler.  However we
1787 5b6dd868 Blue Swirl
       know this only ever happens in a synchronous SEGV handler, so in
1788 5b6dd868 Blue Swirl
       practice it seems to be ok.  */
1789 5b6dd868 Blue Swirl
    mmap_lock();
1790 5b6dd868 Blue Swirl
1791 5b6dd868 Blue Swirl
    p = page_find(address >> TARGET_PAGE_BITS);
1792 5b6dd868 Blue Swirl
    if (!p) {
1793 5b6dd868 Blue Swirl
        mmap_unlock();
1794 5b6dd868 Blue Swirl
        return 0;
1795 5b6dd868 Blue Swirl
    }
1796 5b6dd868 Blue Swirl
1797 5b6dd868 Blue Swirl
    /* if the page was really writable, then we change its
1798 5b6dd868 Blue Swirl
       protection back to writable */
1799 5b6dd868 Blue Swirl
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1800 5b6dd868 Blue Swirl
        host_start = address & qemu_host_page_mask;
1801 5b6dd868 Blue Swirl
        host_end = host_start + qemu_host_page_size;
1802 5b6dd868 Blue Swirl
1803 5b6dd868 Blue Swirl
        prot = 0;
1804 5b6dd868 Blue Swirl
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1805 5b6dd868 Blue Swirl
            p = page_find(addr >> TARGET_PAGE_BITS);
1806 5b6dd868 Blue Swirl
            p->flags |= PAGE_WRITE;
1807 5b6dd868 Blue Swirl
            prot |= p->flags;
1808 5b6dd868 Blue Swirl
1809 5b6dd868 Blue Swirl
            /* and since the content will be modified, we must invalidate
1810 5b6dd868 Blue Swirl
               the corresponding translated code. */
1811 5b6dd868 Blue Swirl
            tb_invalidate_phys_page(addr, pc, puc);
1812 5b6dd868 Blue Swirl
#ifdef DEBUG_TB_CHECK
1813 5b6dd868 Blue Swirl
            tb_invalidate_check(addr);
1814 5b6dd868 Blue Swirl
#endif
1815 5b6dd868 Blue Swirl
        }
1816 5b6dd868 Blue Swirl
        mprotect((void *)g2h(host_start), qemu_host_page_size,
1817 5b6dd868 Blue Swirl
                 prot & PAGE_BITS);
1818 5b6dd868 Blue Swirl
1819 5b6dd868 Blue Swirl
        mmap_unlock();
1820 5b6dd868 Blue Swirl
        return 1;
1821 5b6dd868 Blue Swirl
    }
1822 5b6dd868 Blue Swirl
    mmap_unlock();
1823 5b6dd868 Blue Swirl
    return 0;
1824 5b6dd868 Blue Swirl
}
1825 5b6dd868 Blue Swirl
#endif /* CONFIG_USER_ONLY */