Statistics
| Branch: | Revision:

root / exec.c @ 520860ef

History | View | Annotate | Download (109.5 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 54936004 bellard
 * License along with this library; if not, write to the Free Software
18 fad6cb1a aurel32
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19 54936004 bellard
 */
20 67b915a5 bellard
#include "config.h"
21 d5a8f07c bellard
#ifdef _WIN32
22 d5a8f07c bellard
#include <windows.h>
23 d5a8f07c bellard
#else
24 a98d49b1 bellard
#include <sys/types.h>
25 d5a8f07c bellard
#include <sys/mman.h>
26 d5a8f07c bellard
#endif
27 54936004 bellard
#include <stdlib.h>
28 54936004 bellard
#include <stdio.h>
29 54936004 bellard
#include <stdarg.h>
30 54936004 bellard
#include <string.h>
31 54936004 bellard
#include <errno.h>
32 54936004 bellard
#include <unistd.h>
33 54936004 bellard
#include <inttypes.h>
34 54936004 bellard
35 6180a181 bellard
#include "cpu.h"
36 6180a181 bellard
#include "exec-all.h"
37 ca10f867 aurel32
#include "qemu-common.h"
38 b67d9a52 bellard
#include "tcg.h"
39 b3c7724c pbrook
#include "hw/hw.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 00f82b8a aurel32
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 00f82b8a aurel32
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81 108c49b8 bellard
#endif
82 108c49b8 bellard
83 bdaf78e0 blueswir1
static TranslationBlock *tbs;
84 26a5f13b bellard
int code_gen_max_blocks;
85 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 bdaf78e0 blueswir1
static int nb_tbs;
87 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
88 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 fd6ce8f6 bellard
90 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
91 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
93 d03d860b blueswir1
 section close to code segment. */
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
96 d03d860b blueswir1
    __attribute__((aligned (32)))
97 d03d860b blueswir1
#else
98 d03d860b blueswir1
#define code_gen_section                                \
99 d03d860b blueswir1
    __attribute__((aligned (32)))
100 d03d860b blueswir1
#endif
101 d03d860b blueswir1
102 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
103 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
105 26a5f13b bellard
/* threshold to flush the translated code buffer */
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
107 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
108 fd6ce8f6 bellard
109 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
110 00f82b8a aurel32
ram_addr_t phys_ram_size;
111 9fa3e853 bellard
int phys_ram_fd;
112 9fa3e853 bellard
uint8_t *phys_ram_base;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 e9a1ab19 bellard
static ram_addr_t phys_ram_alloc_offset = 0;
116 e2eef170 pbrook
#endif
117 9fa3e853 bellard
118 6a00d601 bellard
CPUState *first_cpu;
119 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
120 6a00d601 bellard
   cpu_exec() */
121 5fafdf24 ths
CPUState *cpu_single_env;
122 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
123 bf20dc07 ths
   1 = Precise instruction counting.
124 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
125 2e70f6ef pbrook
int use_icount = 0;
126 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
127 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
128 2e70f6ef pbrook
int64_t qemu_icount;
129 6a00d601 bellard
130 54936004 bellard
typedef struct PageDesc {
131 92e873b9 bellard
    /* list of TBs intersecting this ram page */
132 fd6ce8f6 bellard
    TranslationBlock *first_tb;
133 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
134 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
135 9fa3e853 bellard
    unsigned int code_write_count;
136 9fa3e853 bellard
    uint8_t *code_bitmap;
137 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
138 9fa3e853 bellard
    unsigned long flags;
139 9fa3e853 bellard
#endif
140 54936004 bellard
} PageDesc;
141 54936004 bellard
142 92e873b9 bellard
typedef struct PhysPageDesc {
143 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
144 00f82b8a aurel32
    ram_addr_t phys_offset;
145 8da3ff18 pbrook
    ram_addr_t region_offset;
146 92e873b9 bellard
} PhysPageDesc;
147 92e873b9 bellard
148 54936004 bellard
#define L2_BITS 10
149 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
151 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
152 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
153 bedb69ea j_mayer
 */
154 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155 bedb69ea j_mayer
#else
156 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157 bedb69ea j_mayer
#endif
158 54936004 bellard
159 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
160 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
161 54936004 bellard
162 83fb7adf bellard
unsigned long qemu_real_host_page_size;
163 83fb7adf bellard
unsigned long qemu_host_page_bits;
164 83fb7adf bellard
unsigned long qemu_host_page_size;
165 83fb7adf bellard
unsigned long qemu_host_page_mask;
166 54936004 bellard
167 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
168 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
169 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
170 54936004 bellard
171 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
172 e2eef170 pbrook
static void io_mem_init(void);
173 e2eef170 pbrook
174 33417e70 bellard
/* io memory support */
175 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
179 6658ffb8 pbrook
static int io_mem_watch;
180 6658ffb8 pbrook
#endif
181 33417e70 bellard
182 34865134 bellard
/* log support */
183 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
184 34865134 bellard
FILE *logfile;
185 34865134 bellard
int loglevel;
186 e735b91c pbrook
static int log_append = 0;
187 34865134 bellard
188 e3db7226 bellard
/* statistics */
189 e3db7226 bellard
static int tlb_flush_count;
190 e3db7226 bellard
static int tb_flush_count;
191 e3db7226 bellard
static int tb_phys_invalidate_count;
192 e3db7226 bellard
193 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 db7b5426 blueswir1
typedef struct subpage_t {
195 db7b5426 blueswir1
    target_phys_addr_t base;
196 3ee89922 blueswir1
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 3ee89922 blueswir1
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
199 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
200 db7b5426 blueswir1
} subpage_t;
201 db7b5426 blueswir1
202 7cb69cae bellard
#ifdef _WIN32
203 7cb69cae bellard
static void map_exec(void *addr, long size)
204 7cb69cae bellard
{
205 7cb69cae bellard
    DWORD old_protect;
206 7cb69cae bellard
    VirtualProtect(addr, size,
207 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
208 7cb69cae bellard
    
209 7cb69cae bellard
}
210 7cb69cae bellard
#else
211 7cb69cae bellard
static void map_exec(void *addr, long size)
212 7cb69cae bellard
{
213 4369415f bellard
    unsigned long start, end, page_size;
214 7cb69cae bellard
    
215 4369415f bellard
    page_size = getpagesize();
216 7cb69cae bellard
    start = (unsigned long)addr;
217 4369415f bellard
    start &= ~(page_size - 1);
218 7cb69cae bellard
    
219 7cb69cae bellard
    end = (unsigned long)addr + size;
220 4369415f bellard
    end += page_size - 1;
221 4369415f bellard
    end &= ~(page_size - 1);
222 7cb69cae bellard
    
223 7cb69cae bellard
    mprotect((void *)start, end - start,
224 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
225 7cb69cae bellard
}
226 7cb69cae bellard
#endif
227 7cb69cae bellard
228 b346ff46 bellard
static void page_init(void)
229 54936004 bellard
{
230 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
231 54936004 bellard
       TARGET_PAGE_SIZE */
232 c2b48b69 aliguori
#ifdef _WIN32
233 c2b48b69 aliguori
    {
234 c2b48b69 aliguori
        SYSTEM_INFO system_info;
235 c2b48b69 aliguori
236 c2b48b69 aliguori
        GetSystemInfo(&system_info);
237 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
238 c2b48b69 aliguori
    }
239 c2b48b69 aliguori
#else
240 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
241 c2b48b69 aliguori
#endif
242 83fb7adf bellard
    if (qemu_host_page_size == 0)
243 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
244 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
246 83fb7adf bellard
    qemu_host_page_bits = 0;
247 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 83fb7adf bellard
        qemu_host_page_bits++;
249 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252 50a9569b balrog
253 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 50a9569b balrog
    {
255 50a9569b balrog
        long long startaddr, endaddr;
256 50a9569b balrog
        FILE *f;
257 50a9569b balrog
        int n;
258 50a9569b balrog
259 c8a706fe pbrook
        mmap_lock();
260 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
261 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
262 50a9569b balrog
        if (f) {
263 50a9569b balrog
            do {
264 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 50a9569b balrog
                if (n == 2) {
266 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
267 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
269 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
272 50a9569b balrog
                                   PAGE_RESERVED); 
273 50a9569b balrog
                }
274 50a9569b balrog
            } while (!feof(f));
275 50a9569b balrog
            fclose(f);
276 50a9569b balrog
        }
277 c8a706fe pbrook
        mmap_unlock();
278 50a9569b balrog
    }
279 50a9569b balrog
#endif
280 54936004 bellard
}
281 54936004 bellard
282 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
283 54936004 bellard
{
284 17e2377a pbrook
#if TARGET_LONG_BITS > 32
285 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
286 17e2377a pbrook
       excluded high addresses.  */
287 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288 17e2377a pbrook
        return NULL;
289 17e2377a pbrook
#endif
290 434929bf aliguori
    return &l1_map[index >> L2_BITS];
291 434929bf aliguori
}
292 434929bf aliguori
293 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
294 434929bf aliguori
{
295 434929bf aliguori
    PageDesc **lp, *p;
296 434929bf aliguori
    lp = page_l1_map(index);
297 434929bf aliguori
    if (!lp)
298 434929bf aliguori
        return NULL;
299 434929bf aliguori
300 54936004 bellard
    p = *lp;
301 54936004 bellard
    if (!p) {
302 54936004 bellard
        /* allocate if not found */
303 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
304 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
305 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
306 17e2377a pbrook
        p = mmap(0, len, PROT_READ | PROT_WRITE,
307 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
308 54936004 bellard
        *lp = p;
309 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
310 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
311 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
312 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
313 17e2377a pbrook
                           PAGE_RESERVED); 
314 17e2377a pbrook
        }
315 17e2377a pbrook
#else
316 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317 17e2377a pbrook
        *lp = p;
318 17e2377a pbrook
#endif
319 54936004 bellard
    }
320 54936004 bellard
    return p + (index & (L2_SIZE - 1));
321 54936004 bellard
}
322 54936004 bellard
323 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
324 54936004 bellard
{
325 434929bf aliguori
    PageDesc **lp, *p;
326 434929bf aliguori
    lp = page_l1_map(index);
327 434929bf aliguori
    if (!lp)
328 434929bf aliguori
        return NULL;
329 54936004 bellard
330 434929bf aliguori
    p = *lp;
331 54936004 bellard
    if (!p)
332 54936004 bellard
        return 0;
333 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
334 fd6ce8f6 bellard
}
335 fd6ce8f6 bellard
336 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
337 92e873b9 bellard
{
338 108c49b8 bellard
    void **lp, **p;
339 e3f4e2a4 pbrook
    PhysPageDesc *pd;
340 92e873b9 bellard
341 108c49b8 bellard
    p = (void **)l1_phys_map;
342 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
343 108c49b8 bellard
344 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346 108c49b8 bellard
#endif
347 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
348 92e873b9 bellard
    p = *lp;
349 92e873b9 bellard
    if (!p) {
350 92e873b9 bellard
        /* allocate if not found */
351 108c49b8 bellard
        if (!alloc)
352 108c49b8 bellard
            return NULL;
353 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
355 108c49b8 bellard
        *lp = p;
356 108c49b8 bellard
    }
357 108c49b8 bellard
#endif
358 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
359 e3f4e2a4 pbrook
    pd = *lp;
360 e3f4e2a4 pbrook
    if (!pd) {
361 e3f4e2a4 pbrook
        int i;
362 108c49b8 bellard
        /* allocate if not found */
363 108c49b8 bellard
        if (!alloc)
364 108c49b8 bellard
            return NULL;
365 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366 e3f4e2a4 pbrook
        *lp = pd;
367 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
368 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
369 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370 67c4d23c pbrook
        }
371 92e873b9 bellard
    }
372 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
373 92e873b9 bellard
}
374 92e873b9 bellard
375 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
376 92e873b9 bellard
{
377 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
378 92e873b9 bellard
}
379 92e873b9 bellard
380 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
381 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
382 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
383 3a7d929e bellard
                                    target_ulong vaddr);
384 c8a706fe pbrook
#define mmap_lock() do { } while(0)
385 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
386 9fa3e853 bellard
#endif
387 fd6ce8f6 bellard
388 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389 4369415f bellard
390 4369415f bellard
#if defined(CONFIG_USER_ONLY)
391 4369415f bellard
/* Currently it is not recommanded to allocate big chunks of data in
392 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
393 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
394 4369415f bellard
#endif
395 4369415f bellard
396 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
397 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398 4369415f bellard
#endif
399 4369415f bellard
400 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
401 26a5f13b bellard
{
402 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
403 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
404 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
406 4369415f bellard
#else
407 26a5f13b bellard
    code_gen_buffer_size = tb_size;
408 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
409 4369415f bellard
#if defined(CONFIG_USER_ONLY)
410 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
411 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412 4369415f bellard
#else
413 26a5f13b bellard
        /* XXX: needs ajustments */
414 174a9a1f aliguori
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
415 4369415f bellard
#endif
416 26a5f13b bellard
    }
417 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
420 26a5f13b bellard
       the host cpu and OS */
421 26a5f13b bellard
#if defined(__linux__) 
422 26a5f13b bellard
    {
423 26a5f13b bellard
        int flags;
424 141ac468 blueswir1
        void *start = NULL;
425 141ac468 blueswir1
426 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
427 26a5f13b bellard
#if defined(__x86_64__)
428 26a5f13b bellard
        flags |= MAP_32BIT;
429 26a5f13b bellard
        /* Cannot map more than that */
430 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
431 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
432 141ac468 blueswir1
#elif defined(__sparc_v9__)
433 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
434 141ac468 blueswir1
        flags |= MAP_FIXED;
435 141ac468 blueswir1
        start = (void *) 0x60000000UL;
436 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
437 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
438 1cb0661e balrog
#elif defined(__arm__)
439 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
440 1cb0661e balrog
        flags |= MAP_FIXED;
441 1cb0661e balrog
        start = (void *) 0x01000000UL;
442 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
443 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
444 26a5f13b bellard
#endif
445 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
446 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
447 26a5f13b bellard
                               flags, -1, 0);
448 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
449 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450 26a5f13b bellard
            exit(1);
451 26a5f13b bellard
        }
452 26a5f13b bellard
    }
453 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
454 06e67a82 aliguori
    {
455 06e67a82 aliguori
        int flags;
456 06e67a82 aliguori
        void *addr = NULL;
457 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
458 06e67a82 aliguori
#if defined(__x86_64__)
459 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460 06e67a82 aliguori
         * 0x40000000 is free */
461 06e67a82 aliguori
        flags |= MAP_FIXED;
462 06e67a82 aliguori
        addr = (void *)0x40000000;
463 06e67a82 aliguori
        /* Cannot map more than that */
464 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
465 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
466 06e67a82 aliguori
#endif
467 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
468 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
469 06e67a82 aliguori
                               flags, -1, 0);
470 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
471 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 06e67a82 aliguori
            exit(1);
473 06e67a82 aliguori
        }
474 06e67a82 aliguori
    }
475 26a5f13b bellard
#else
476 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
478 26a5f13b bellard
#endif
479 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
480 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
482 26a5f13b bellard
        code_gen_max_block_size();
483 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485 26a5f13b bellard
}
486 26a5f13b bellard
487 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
488 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
489 26a5f13b bellard
   size. */
490 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
491 26a5f13b bellard
{
492 26a5f13b bellard
    cpu_gen_init();
493 26a5f13b bellard
    code_gen_alloc(tb_size);
494 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
495 4369415f bellard
    page_init();
496 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
497 26a5f13b bellard
    io_mem_init();
498 e2eef170 pbrook
#endif
499 26a5f13b bellard
}
500 26a5f13b bellard
501 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 9656f324 pbrook
503 9656f324 pbrook
#define CPU_COMMON_SAVE_VERSION 1
504 9656f324 pbrook
505 9656f324 pbrook
static void cpu_common_save(QEMUFile *f, void *opaque)
506 9656f324 pbrook
{
507 9656f324 pbrook
    CPUState *env = opaque;
508 9656f324 pbrook
509 9656f324 pbrook
    qemu_put_be32s(f, &env->halted);
510 9656f324 pbrook
    qemu_put_be32s(f, &env->interrupt_request);
511 9656f324 pbrook
}
512 9656f324 pbrook
513 9656f324 pbrook
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514 9656f324 pbrook
{
515 9656f324 pbrook
    CPUState *env = opaque;
516 9656f324 pbrook
517 9656f324 pbrook
    if (version_id != CPU_COMMON_SAVE_VERSION)
518 9656f324 pbrook
        return -EINVAL;
519 9656f324 pbrook
520 9656f324 pbrook
    qemu_get_be32s(f, &env->halted);
521 75f482ae pbrook
    qemu_get_be32s(f, &env->interrupt_request);
522 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523 3098dba0 aurel32
       version_id is increased. */
524 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
525 9656f324 pbrook
    tlb_flush(env, 1);
526 9656f324 pbrook
527 9656f324 pbrook
    return 0;
528 9656f324 pbrook
}
529 9656f324 pbrook
#endif
530 9656f324 pbrook
531 6a00d601 bellard
void cpu_exec_init(CPUState *env)
532 fd6ce8f6 bellard
{
533 6a00d601 bellard
    CPUState **penv;
534 6a00d601 bellard
    int cpu_index;
535 6a00d601 bellard
536 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
537 c2764719 pbrook
    cpu_list_lock();
538 c2764719 pbrook
#endif
539 6a00d601 bellard
    env->next_cpu = NULL;
540 6a00d601 bellard
    penv = &first_cpu;
541 6a00d601 bellard
    cpu_index = 0;
542 6a00d601 bellard
    while (*penv != NULL) {
543 6a00d601 bellard
        penv = (CPUState **)&(*penv)->next_cpu;
544 6a00d601 bellard
        cpu_index++;
545 6a00d601 bellard
    }
546 6a00d601 bellard
    env->cpu_index = cpu_index;
547 c0ce998e aliguori
    TAILQ_INIT(&env->breakpoints);
548 c0ce998e aliguori
    TAILQ_INIT(&env->watchpoints);
549 6a00d601 bellard
    *penv = env;
550 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
551 c2764719 pbrook
    cpu_list_unlock();
552 c2764719 pbrook
#endif
553 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554 9656f324 pbrook
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555 9656f324 pbrook
                    cpu_common_save, cpu_common_load, env);
556 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557 b3c7724c pbrook
                    cpu_save, cpu_load, env);
558 b3c7724c pbrook
#endif
559 fd6ce8f6 bellard
}
560 fd6ce8f6 bellard
561 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
562 9fa3e853 bellard
{
563 9fa3e853 bellard
    if (p->code_bitmap) {
564 59817ccb bellard
        qemu_free(p->code_bitmap);
565 9fa3e853 bellard
        p->code_bitmap = NULL;
566 9fa3e853 bellard
    }
567 9fa3e853 bellard
    p->code_write_count = 0;
568 9fa3e853 bellard
}
569 9fa3e853 bellard
570 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
571 fd6ce8f6 bellard
static void page_flush_tb(void)
572 fd6ce8f6 bellard
{
573 fd6ce8f6 bellard
    int i, j;
574 fd6ce8f6 bellard
    PageDesc *p;
575 fd6ce8f6 bellard
576 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
577 fd6ce8f6 bellard
        p = l1_map[i];
578 fd6ce8f6 bellard
        if (p) {
579 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
580 9fa3e853 bellard
                p->first_tb = NULL;
581 9fa3e853 bellard
                invalidate_page_bitmap(p);
582 9fa3e853 bellard
                p++;
583 9fa3e853 bellard
            }
584 fd6ce8f6 bellard
        }
585 fd6ce8f6 bellard
    }
586 fd6ce8f6 bellard
}
587 fd6ce8f6 bellard
588 fd6ce8f6 bellard
/* flush all the translation blocks */
589 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
590 6a00d601 bellard
void tb_flush(CPUState *env1)
591 fd6ce8f6 bellard
{
592 6a00d601 bellard
    CPUState *env;
593 0124311e bellard
#if defined(DEBUG_FLUSH)
594 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
596 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
597 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
598 fd6ce8f6 bellard
#endif
599 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
601 a208e54a pbrook
602 fd6ce8f6 bellard
    nb_tbs = 0;
603 3b46e624 ths
604 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
605 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 6a00d601 bellard
    }
607 9fa3e853 bellard
608 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
609 fd6ce8f6 bellard
    page_flush_tb();
610 9fa3e853 bellard
611 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
612 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
613 d4e8164f bellard
       expensive */
614 e3db7226 bellard
    tb_flush_count++;
615 fd6ce8f6 bellard
}
616 fd6ce8f6 bellard
617 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
618 fd6ce8f6 bellard
619 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
620 fd6ce8f6 bellard
{
621 fd6ce8f6 bellard
    TranslationBlock *tb;
622 fd6ce8f6 bellard
    int i;
623 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
624 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
628 fd6ce8f6 bellard
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
630 fd6ce8f6 bellard
            }
631 fd6ce8f6 bellard
        }
632 fd6ce8f6 bellard
    }
633 fd6ce8f6 bellard
}
634 fd6ce8f6 bellard
635 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
636 fd6ce8f6 bellard
static void tb_page_check(void)
637 fd6ce8f6 bellard
{
638 fd6ce8f6 bellard
    TranslationBlock *tb;
639 fd6ce8f6 bellard
    int i, flags1, flags2;
640 3b46e624 ths
641 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
644 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
645 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
648 fd6ce8f6 bellard
            }
649 fd6ce8f6 bellard
        }
650 fd6ce8f6 bellard
    }
651 fd6ce8f6 bellard
}
652 fd6ce8f6 bellard
653 bdaf78e0 blueswir1
static void tb_jmp_check(TranslationBlock *tb)
654 d4e8164f bellard
{
655 d4e8164f bellard
    TranslationBlock *tb1;
656 d4e8164f bellard
    unsigned int n1;
657 d4e8164f bellard
658 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
659 d4e8164f bellard
    tb1 = tb->jmp_first;
660 d4e8164f bellard
    for(;;) {
661 d4e8164f bellard
        n1 = (long)tb1 & 3;
662 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
663 d4e8164f bellard
        if (n1 == 2)
664 d4e8164f bellard
            break;
665 d4e8164f bellard
        tb1 = tb1->jmp_next[n1];
666 d4e8164f bellard
    }
667 d4e8164f bellard
    /* check end of list */
668 d4e8164f bellard
    if (tb1 != tb) {
669 d4e8164f bellard
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670 d4e8164f bellard
    }
671 d4e8164f bellard
}
672 d4e8164f bellard
673 fd6ce8f6 bellard
#endif
674 fd6ce8f6 bellard
675 fd6ce8f6 bellard
/* invalidate one TB */
676 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677 fd6ce8f6 bellard
                             int next_offset)
678 fd6ce8f6 bellard
{
679 fd6ce8f6 bellard
    TranslationBlock *tb1;
680 fd6ce8f6 bellard
    for(;;) {
681 fd6ce8f6 bellard
        tb1 = *ptb;
682 fd6ce8f6 bellard
        if (tb1 == tb) {
683 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684 fd6ce8f6 bellard
            break;
685 fd6ce8f6 bellard
        }
686 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687 fd6ce8f6 bellard
    }
688 fd6ce8f6 bellard
}
689 fd6ce8f6 bellard
690 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691 9fa3e853 bellard
{
692 9fa3e853 bellard
    TranslationBlock *tb1;
693 9fa3e853 bellard
    unsigned int n1;
694 9fa3e853 bellard
695 9fa3e853 bellard
    for(;;) {
696 9fa3e853 bellard
        tb1 = *ptb;
697 9fa3e853 bellard
        n1 = (long)tb1 & 3;
698 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
699 9fa3e853 bellard
        if (tb1 == tb) {
700 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
701 9fa3e853 bellard
            break;
702 9fa3e853 bellard
        }
703 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
704 9fa3e853 bellard
    }
705 9fa3e853 bellard
}
706 9fa3e853 bellard
707 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708 d4e8164f bellard
{
709 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
710 d4e8164f bellard
    unsigned int n1;
711 d4e8164f bellard
712 d4e8164f bellard
    ptb = &tb->jmp_next[n];
713 d4e8164f bellard
    tb1 = *ptb;
714 d4e8164f bellard
    if (tb1) {
715 d4e8164f bellard
        /* find tb(n) in circular list */
716 d4e8164f bellard
        for(;;) {
717 d4e8164f bellard
            tb1 = *ptb;
718 d4e8164f bellard
            n1 = (long)tb1 & 3;
719 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 d4e8164f bellard
            if (n1 == n && tb1 == tb)
721 d4e8164f bellard
                break;
722 d4e8164f bellard
            if (n1 == 2) {
723 d4e8164f bellard
                ptb = &tb1->jmp_first;
724 d4e8164f bellard
            } else {
725 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
726 d4e8164f bellard
            }
727 d4e8164f bellard
        }
728 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
729 d4e8164f bellard
        *ptb = tb->jmp_next[n];
730 d4e8164f bellard
731 d4e8164f bellard
        tb->jmp_next[n] = NULL;
732 d4e8164f bellard
    }
733 d4e8164f bellard
}
734 d4e8164f bellard
735 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
736 d4e8164f bellard
   another TB */
737 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
738 d4e8164f bellard
{
739 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740 d4e8164f bellard
}
741 d4e8164f bellard
742 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743 fd6ce8f6 bellard
{
744 6a00d601 bellard
    CPUState *env;
745 8a40a180 bellard
    PageDesc *p;
746 d4e8164f bellard
    unsigned int h, n1;
747 00f82b8a aurel32
    target_phys_addr_t phys_pc;
748 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
749 3b46e624 ths
750 8a40a180 bellard
    /* remove the TB from the hash list */
751 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
753 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
754 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
755 8a40a180 bellard
756 8a40a180 bellard
    /* remove the TB from the page list */
757 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
758 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
760 8a40a180 bellard
        invalidate_page_bitmap(p);
761 8a40a180 bellard
    }
762 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
765 8a40a180 bellard
        invalidate_page_bitmap(p);
766 8a40a180 bellard
    }
767 8a40a180 bellard
768 36bdbe54 bellard
    tb_invalidated_flag = 1;
769 59817ccb bellard
770 fd6ce8f6 bellard
    /* remove the TB from the hash list */
771 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
772 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
773 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
774 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
775 6a00d601 bellard
    }
776 d4e8164f bellard
777 d4e8164f bellard
    /* suppress this TB from the two jump lists */
778 d4e8164f bellard
    tb_jmp_remove(tb, 0);
779 d4e8164f bellard
    tb_jmp_remove(tb, 1);
780 d4e8164f bellard
781 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
782 d4e8164f bellard
    tb1 = tb->jmp_first;
783 d4e8164f bellard
    for(;;) {
784 d4e8164f bellard
        n1 = (long)tb1 & 3;
785 d4e8164f bellard
        if (n1 == 2)
786 d4e8164f bellard
            break;
787 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
789 d4e8164f bellard
        tb_reset_jump(tb1, n1);
790 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
791 d4e8164f bellard
        tb1 = tb2;
792 d4e8164f bellard
    }
793 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
794 9fa3e853 bellard
795 e3db7226 bellard
    tb_phys_invalidate_count++;
796 9fa3e853 bellard
}
797 9fa3e853 bellard
798 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
799 9fa3e853 bellard
{
800 9fa3e853 bellard
    int end, mask, end1;
801 9fa3e853 bellard
802 9fa3e853 bellard
    end = start + len;
803 9fa3e853 bellard
    tab += start >> 3;
804 9fa3e853 bellard
    mask = 0xff << (start & 7);
805 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
806 9fa3e853 bellard
        if (start < end) {
807 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
808 9fa3e853 bellard
            *tab |= mask;
809 9fa3e853 bellard
        }
810 9fa3e853 bellard
    } else {
811 9fa3e853 bellard
        *tab++ |= mask;
812 9fa3e853 bellard
        start = (start + 8) & ~7;
813 9fa3e853 bellard
        end1 = end & ~7;
814 9fa3e853 bellard
        while (start < end1) {
815 9fa3e853 bellard
            *tab++ = 0xff;
816 9fa3e853 bellard
            start += 8;
817 9fa3e853 bellard
        }
818 9fa3e853 bellard
        if (start < end) {
819 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
820 9fa3e853 bellard
            *tab |= mask;
821 9fa3e853 bellard
        }
822 9fa3e853 bellard
    }
823 9fa3e853 bellard
}
824 9fa3e853 bellard
825 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
826 9fa3e853 bellard
{
827 9fa3e853 bellard
    int n, tb_start, tb_end;
828 9fa3e853 bellard
    TranslationBlock *tb;
829 3b46e624 ths
830 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
831 9fa3e853 bellard
832 9fa3e853 bellard
    tb = p->first_tb;
833 9fa3e853 bellard
    while (tb != NULL) {
834 9fa3e853 bellard
        n = (long)tb & 3;
835 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
836 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
837 9fa3e853 bellard
        if (n == 0) {
838 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
839 9fa3e853 bellard
               it is not a problem */
840 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 9fa3e853 bellard
            tb_end = tb_start + tb->size;
842 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
843 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
844 9fa3e853 bellard
        } else {
845 9fa3e853 bellard
            tb_start = 0;
846 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 9fa3e853 bellard
        }
848 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 9fa3e853 bellard
        tb = tb->page_next[n];
850 9fa3e853 bellard
    }
851 9fa3e853 bellard
}
852 9fa3e853 bellard
853 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
854 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
855 2e70f6ef pbrook
                              int flags, int cflags)
856 d720b93d bellard
{
857 d720b93d bellard
    TranslationBlock *tb;
858 d720b93d bellard
    uint8_t *tc_ptr;
859 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
860 d720b93d bellard
    int code_gen_size;
861 d720b93d bellard
862 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
863 c27004ec bellard
    tb = tb_alloc(pc);
864 d720b93d bellard
    if (!tb) {
865 d720b93d bellard
        /* flush must be done */
866 d720b93d bellard
        tb_flush(env);
867 d720b93d bellard
        /* cannot fail at this point */
868 c27004ec bellard
        tb = tb_alloc(pc);
869 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
870 2e70f6ef pbrook
        tb_invalidated_flag = 1;
871 d720b93d bellard
    }
872 d720b93d bellard
    tc_ptr = code_gen_ptr;
873 d720b93d bellard
    tb->tc_ptr = tc_ptr;
874 d720b93d bellard
    tb->cs_base = cs_base;
875 d720b93d bellard
    tb->flags = flags;
876 d720b93d bellard
    tb->cflags = cflags;
877 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
878 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
879 3b46e624 ths
880 d720b93d bellard
    /* check next page if needed */
881 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882 d720b93d bellard
    phys_page2 = -1;
883 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
885 d720b93d bellard
    }
886 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
887 2e70f6ef pbrook
    return tb;
888 d720b93d bellard
}
889 3b46e624 ths
890 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
891 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
892 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
893 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
894 d720b93d bellard
   TB if code is modified inside this TB. */
895 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 d720b93d bellard
                                   int is_cpu_write_access)
897 d720b93d bellard
{
898 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
899 d720b93d bellard
    CPUState *env = cpu_single_env;
900 9fa3e853 bellard
    target_ulong tb_start, tb_end;
901 6b917547 aliguori
    PageDesc *p;
902 6b917547 aliguori
    int n;
903 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
904 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
905 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
906 6b917547 aliguori
    int current_tb_modified = 0;
907 6b917547 aliguori
    target_ulong current_pc = 0;
908 6b917547 aliguori
    target_ulong current_cs_base = 0;
909 6b917547 aliguori
    int current_flags = 0;
910 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
911 9fa3e853 bellard
912 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
913 5fafdf24 ths
    if (!p)
914 9fa3e853 bellard
        return;
915 5fafdf24 ths
    if (!p->code_bitmap &&
916 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 d720b93d bellard
        is_cpu_write_access) {
918 9fa3e853 bellard
        /* build code bitmap */
919 9fa3e853 bellard
        build_page_bitmap(p);
920 9fa3e853 bellard
    }
921 9fa3e853 bellard
922 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
923 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
924 9fa3e853 bellard
    tb = p->first_tb;
925 9fa3e853 bellard
    while (tb != NULL) {
926 9fa3e853 bellard
        n = (long)tb & 3;
927 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
928 9fa3e853 bellard
        tb_next = tb->page_next[n];
929 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
930 9fa3e853 bellard
        if (n == 0) {
931 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
932 9fa3e853 bellard
               it is not a problem */
933 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 9fa3e853 bellard
            tb_end = tb_start + tb->size;
935 9fa3e853 bellard
        } else {
936 9fa3e853 bellard
            tb_start = tb->page_addr[1];
937 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 9fa3e853 bellard
        }
939 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
940 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
941 d720b93d bellard
            if (current_tb_not_found) {
942 d720b93d bellard
                current_tb_not_found = 0;
943 d720b93d bellard
                current_tb = NULL;
944 2e70f6ef pbrook
                if (env->mem_io_pc) {
945 d720b93d bellard
                    /* now we have a real cpu fault */
946 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
947 d720b93d bellard
                }
948 d720b93d bellard
            }
949 d720b93d bellard
            if (current_tb == tb &&
950 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 d720b93d bellard
                /* If we are modifying the current TB, we must stop
952 d720b93d bellard
                its execution. We could be more precise by checking
953 d720b93d bellard
                that the modification is after the current PC, but it
954 d720b93d bellard
                would require a specialized function to partially
955 d720b93d bellard
                restore the CPU state */
956 3b46e624 ths
957 d720b93d bellard
                current_tb_modified = 1;
958 5fafdf24 ths
                cpu_restore_state(current_tb, env,
959 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
960 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 6b917547 aliguori
                                     &current_flags);
962 d720b93d bellard
            }
963 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
964 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
965 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
966 6f5a9f7e bellard
            saved_tb = NULL;
967 6f5a9f7e bellard
            if (env) {
968 6f5a9f7e bellard
                saved_tb = env->current_tb;
969 6f5a9f7e bellard
                env->current_tb = NULL;
970 6f5a9f7e bellard
            }
971 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
972 6f5a9f7e bellard
            if (env) {
973 6f5a9f7e bellard
                env->current_tb = saved_tb;
974 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
975 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
976 6f5a9f7e bellard
            }
977 9fa3e853 bellard
        }
978 9fa3e853 bellard
        tb = tb_next;
979 9fa3e853 bellard
    }
980 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
981 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
982 9fa3e853 bellard
    if (!p->first_tb) {
983 9fa3e853 bellard
        invalidate_page_bitmap(p);
984 d720b93d bellard
        if (is_cpu_write_access) {
985 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986 d720b93d bellard
        }
987 d720b93d bellard
    }
988 d720b93d bellard
#endif
989 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
990 d720b93d bellard
    if (current_tb_modified) {
991 d720b93d bellard
        /* we generate a block containing just the instruction
992 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
993 d720b93d bellard
           itself */
994 ea1c1802 bellard
        env->current_tb = NULL;
995 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
997 9fa3e853 bellard
    }
998 fd6ce8f6 bellard
#endif
999 9fa3e853 bellard
}
1000 fd6ce8f6 bellard
1001 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1002 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1003 9fa3e853 bellard
{
1004 9fa3e853 bellard
    PageDesc *p;
1005 9fa3e853 bellard
    int offset, b;
1006 59817ccb bellard
#if 0
1007 a4193c8a bellard
    if (1) {
1008 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1010 93fcfe39 aliguori
                  cpu_single_env->eip,
1011 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012 59817ccb bellard
    }
1013 59817ccb bellard
#endif
1014 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1015 5fafdf24 ths
    if (!p)
1016 9fa3e853 bellard
        return;
1017 9fa3e853 bellard
    if (p->code_bitmap) {
1018 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1019 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1021 9fa3e853 bellard
            goto do_invalidate;
1022 9fa3e853 bellard
    } else {
1023 9fa3e853 bellard
    do_invalidate:
1024 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1025 9fa3e853 bellard
    }
1026 9fa3e853 bellard
}
1027 9fa3e853 bellard
1028 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1029 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 d720b93d bellard
                                    unsigned long pc, void *puc)
1031 9fa3e853 bellard
{
1032 6b917547 aliguori
    TranslationBlock *tb;
1033 9fa3e853 bellard
    PageDesc *p;
1034 6b917547 aliguori
    int n;
1035 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1036 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1037 d720b93d bellard
    CPUState *env = cpu_single_env;
1038 6b917547 aliguori
    int current_tb_modified = 0;
1039 6b917547 aliguori
    target_ulong current_pc = 0;
1040 6b917547 aliguori
    target_ulong current_cs_base = 0;
1041 6b917547 aliguori
    int current_flags = 0;
1042 d720b93d bellard
#endif
1043 9fa3e853 bellard
1044 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1045 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1046 5fafdf24 ths
    if (!p)
1047 9fa3e853 bellard
        return;
1048 9fa3e853 bellard
    tb = p->first_tb;
1049 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1050 d720b93d bellard
    if (tb && pc != 0) {
1051 d720b93d bellard
        current_tb = tb_find_pc(pc);
1052 d720b93d bellard
    }
1053 d720b93d bellard
#endif
1054 9fa3e853 bellard
    while (tb != NULL) {
1055 9fa3e853 bellard
        n = (long)tb & 3;
1056 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1057 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1058 d720b93d bellard
        if (current_tb == tb &&
1059 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1061 d720b93d bellard
                   its execution. We could be more precise by checking
1062 d720b93d bellard
                   that the modification is after the current PC, but it
1063 d720b93d bellard
                   would require a specialized function to partially
1064 d720b93d bellard
                   restore the CPU state */
1065 3b46e624 ths
1066 d720b93d bellard
            current_tb_modified = 1;
1067 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1068 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 6b917547 aliguori
                                 &current_flags);
1070 d720b93d bellard
        }
1071 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1072 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1073 9fa3e853 bellard
        tb = tb->page_next[n];
1074 9fa3e853 bellard
    }
1075 fd6ce8f6 bellard
    p->first_tb = NULL;
1076 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1077 d720b93d bellard
    if (current_tb_modified) {
1078 d720b93d bellard
        /* we generate a block containing just the instruction
1079 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1080 d720b93d bellard
           itself */
1081 ea1c1802 bellard
        env->current_tb = NULL;
1082 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1084 d720b93d bellard
    }
1085 d720b93d bellard
#endif
1086 fd6ce8f6 bellard
}
1087 9fa3e853 bellard
#endif
1088 fd6ce8f6 bellard
1089 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1090 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1091 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1092 fd6ce8f6 bellard
{
1093 fd6ce8f6 bellard
    PageDesc *p;
1094 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1095 9fa3e853 bellard
1096 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1097 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1099 9fa3e853 bellard
    last_first_tb = p->first_tb;
1100 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1101 9fa3e853 bellard
    invalidate_page_bitmap(p);
1102 fd6ce8f6 bellard
1103 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1104 d720b93d bellard
1105 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1106 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1107 53a5960a pbrook
        target_ulong addr;
1108 53a5960a pbrook
        PageDesc *p2;
1109 9fa3e853 bellard
        int prot;
1110 9fa3e853 bellard
1111 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1112 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1113 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1114 fd6ce8f6 bellard
        prot = 0;
1115 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1117 53a5960a pbrook
1118 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 53a5960a pbrook
            if (!p2)
1120 53a5960a pbrook
                continue;
1121 53a5960a pbrook
            prot |= p2->flags;
1122 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1123 53a5960a pbrook
            page_get_flags(addr);
1124 53a5960a pbrook
          }
1125 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1126 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1128 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129 53a5960a pbrook
               page_addr);
1130 fd6ce8f6 bellard
#endif
1131 fd6ce8f6 bellard
    }
1132 9fa3e853 bellard
#else
1133 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1134 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1135 9fa3e853 bellard
       allocated in a physical page */
1136 9fa3e853 bellard
    if (!last_first_tb) {
1137 6a00d601 bellard
        tlb_protect_code(page_addr);
1138 9fa3e853 bellard
    }
1139 9fa3e853 bellard
#endif
1140 d720b93d bellard
1141 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1142 fd6ce8f6 bellard
}
1143 fd6ce8f6 bellard
1144 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1145 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1146 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1147 fd6ce8f6 bellard
{
1148 fd6ce8f6 bellard
    TranslationBlock *tb;
1149 fd6ce8f6 bellard
1150 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1151 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152 d4e8164f bellard
        return NULL;
1153 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1154 fd6ce8f6 bellard
    tb->pc = pc;
1155 b448f2f3 bellard
    tb->cflags = 0;
1156 d4e8164f bellard
    return tb;
1157 d4e8164f bellard
}
1158 d4e8164f bellard
1159 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1160 2e70f6ef pbrook
{
1161 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1162 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1163 2e70f6ef pbrook
       be the last one generated.  */
1164 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1166 2e70f6ef pbrook
        nb_tbs--;
1167 2e70f6ef pbrook
    }
1168 2e70f6ef pbrook
}
1169 2e70f6ef pbrook
1170 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1171 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1172 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1173 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1174 d4e8164f bellard
{
1175 9fa3e853 bellard
    unsigned int h;
1176 9fa3e853 bellard
    TranslationBlock **ptb;
1177 9fa3e853 bellard
1178 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1179 c8a706fe pbrook
       before we are done.  */
1180 c8a706fe pbrook
    mmap_lock();
1181 9fa3e853 bellard
    /* add in the physical hash table */
1182 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1183 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1184 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1185 9fa3e853 bellard
    *ptb = tb;
1186 fd6ce8f6 bellard
1187 fd6ce8f6 bellard
    /* add in the page list */
1188 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 9fa3e853 bellard
    if (phys_page2 != -1)
1190 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1191 9fa3e853 bellard
    else
1192 9fa3e853 bellard
        tb->page_addr[1] = -1;
1193 9fa3e853 bellard
1194 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1196 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1197 d4e8164f bellard
1198 d4e8164f bellard
    /* init original jump addresses */
1199 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1200 d4e8164f bellard
        tb_reset_jump(tb, 0);
1201 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1202 d4e8164f bellard
        tb_reset_jump(tb, 1);
1203 8a40a180 bellard
1204 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1205 8a40a180 bellard
    tb_page_check();
1206 8a40a180 bellard
#endif
1207 c8a706fe pbrook
    mmap_unlock();
1208 fd6ce8f6 bellard
}
1209 fd6ce8f6 bellard
1210 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1212 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213 fd6ce8f6 bellard
{
1214 9fa3e853 bellard
    int m_min, m_max, m;
1215 9fa3e853 bellard
    unsigned long v;
1216 9fa3e853 bellard
    TranslationBlock *tb;
1217 a513fe19 bellard
1218 a513fe19 bellard
    if (nb_tbs <= 0)
1219 a513fe19 bellard
        return NULL;
1220 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1222 a513fe19 bellard
        return NULL;
1223 a513fe19 bellard
    /* binary search (cf Knuth) */
1224 a513fe19 bellard
    m_min = 0;
1225 a513fe19 bellard
    m_max = nb_tbs - 1;
1226 a513fe19 bellard
    while (m_min <= m_max) {
1227 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1228 a513fe19 bellard
        tb = &tbs[m];
1229 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1230 a513fe19 bellard
        if (v == tc_ptr)
1231 a513fe19 bellard
            return tb;
1232 a513fe19 bellard
        else if (tc_ptr < v) {
1233 a513fe19 bellard
            m_max = m - 1;
1234 a513fe19 bellard
        } else {
1235 a513fe19 bellard
            m_min = m + 1;
1236 a513fe19 bellard
        }
1237 5fafdf24 ths
    }
1238 a513fe19 bellard
    return &tbs[m_max];
1239 a513fe19 bellard
}
1240 7501267e bellard
1241 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1242 ea041c0e bellard
1243 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244 ea041c0e bellard
{
1245 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1246 ea041c0e bellard
    unsigned int n1;
1247 ea041c0e bellard
1248 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1249 ea041c0e bellard
    if (tb1 != NULL) {
1250 ea041c0e bellard
        /* find head of list */
1251 ea041c0e bellard
        for(;;) {
1252 ea041c0e bellard
            n1 = (long)tb1 & 3;
1253 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 ea041c0e bellard
            if (n1 == 2)
1255 ea041c0e bellard
                break;
1256 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1257 ea041c0e bellard
        }
1258 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1259 ea041c0e bellard
        tb_next = tb1;
1260 ea041c0e bellard
1261 ea041c0e bellard
        /* remove tb from the jmp_first list */
1262 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1263 ea041c0e bellard
        for(;;) {
1264 ea041c0e bellard
            tb1 = *ptb;
1265 ea041c0e bellard
            n1 = (long)tb1 & 3;
1266 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1268 ea041c0e bellard
                break;
1269 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1270 ea041c0e bellard
        }
1271 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1272 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1273 3b46e624 ths
1274 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1275 ea041c0e bellard
        tb_reset_jump(tb, n);
1276 ea041c0e bellard
1277 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1278 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1279 ea041c0e bellard
    }
1280 ea041c0e bellard
}
1281 ea041c0e bellard
1282 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1283 ea041c0e bellard
{
1284 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1285 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1286 ea041c0e bellard
}
1287 ea041c0e bellard
1288 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1289 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290 d720b93d bellard
{
1291 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1292 9b3c35e0 j_mayer
    target_ulong pd;
1293 c2f07f81 pbrook
    ram_addr_t ram_addr;
1294 c2f07f81 pbrook
    PhysPageDesc *p;
1295 d720b93d bellard
1296 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1297 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 c2f07f81 pbrook
    if (!p) {
1299 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1300 c2f07f81 pbrook
    } else {
1301 c2f07f81 pbrook
        pd = p->phys_offset;
1302 c2f07f81 pbrook
    }
1303 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305 d720b93d bellard
}
1306 c27004ec bellard
#endif
1307 d720b93d bellard
1308 6658ffb8 pbrook
/* Add a watchpoint.  */
1309 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1311 6658ffb8 pbrook
{
1312 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1313 c0ce998e aliguori
    CPUWatchpoint *wp;
1314 6658ffb8 pbrook
1315 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 b4051334 aliguori
        return -EINVAL;
1320 b4051334 aliguori
    }
1321 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1322 a1d1bb31 aliguori
1323 a1d1bb31 aliguori
    wp->vaddr = addr;
1324 b4051334 aliguori
    wp->len_mask = len_mask;
1325 a1d1bb31 aliguori
    wp->flags = flags;
1326 a1d1bb31 aliguori
1327 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1328 c0ce998e aliguori
    if (flags & BP_GDB)
1329 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330 c0ce998e aliguori
    else
1331 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1332 6658ffb8 pbrook
1333 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1334 a1d1bb31 aliguori
1335 a1d1bb31 aliguori
    if (watchpoint)
1336 a1d1bb31 aliguori
        *watchpoint = wp;
1337 a1d1bb31 aliguori
    return 0;
1338 6658ffb8 pbrook
}
1339 6658ffb8 pbrook
1340 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1341 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342 a1d1bb31 aliguori
                          int flags)
1343 6658ffb8 pbrook
{
1344 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1345 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1346 6658ffb8 pbrook
1347 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1349 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1351 6658ffb8 pbrook
            return 0;
1352 6658ffb8 pbrook
        }
1353 6658ffb8 pbrook
    }
1354 a1d1bb31 aliguori
    return -ENOENT;
1355 6658ffb8 pbrook
}
1356 6658ffb8 pbrook
1357 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1358 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359 a1d1bb31 aliguori
{
1360 c0ce998e aliguori
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1361 7d03f82f edgar_igl
1362 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1363 a1d1bb31 aliguori
1364 a1d1bb31 aliguori
    qemu_free(watchpoint);
1365 a1d1bb31 aliguori
}
1366 a1d1bb31 aliguori
1367 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1368 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369 a1d1bb31 aliguori
{
1370 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1371 a1d1bb31 aliguori
1372 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373 a1d1bb31 aliguori
        if (wp->flags & mask)
1374 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1375 c0ce998e aliguori
    }
1376 7d03f82f edgar_igl
}
1377 7d03f82f edgar_igl
1378 a1d1bb31 aliguori
/* Add a breakpoint.  */
1379 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1381 4c3a88a2 bellard
{
1382 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1383 c0ce998e aliguori
    CPUBreakpoint *bp;
1384 3b46e624 ths
1385 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1386 4c3a88a2 bellard
1387 a1d1bb31 aliguori
    bp->pc = pc;
1388 a1d1bb31 aliguori
    bp->flags = flags;
1389 a1d1bb31 aliguori
1390 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1391 c0ce998e aliguori
    if (flags & BP_GDB)
1392 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393 c0ce998e aliguori
    else
1394 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1395 3b46e624 ths
1396 d720b93d bellard
    breakpoint_invalidate(env, pc);
1397 a1d1bb31 aliguori
1398 a1d1bb31 aliguori
    if (breakpoint)
1399 a1d1bb31 aliguori
        *breakpoint = bp;
1400 4c3a88a2 bellard
    return 0;
1401 4c3a88a2 bellard
#else
1402 a1d1bb31 aliguori
    return -ENOSYS;
1403 4c3a88a2 bellard
#endif
1404 4c3a88a2 bellard
}
1405 4c3a88a2 bellard
1406 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1407 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408 a1d1bb31 aliguori
{
1409 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1410 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1411 a1d1bb31 aliguori
1412 c0ce998e aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1414 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1415 a1d1bb31 aliguori
            return 0;
1416 a1d1bb31 aliguori
        }
1417 7d03f82f edgar_igl
    }
1418 a1d1bb31 aliguori
    return -ENOENT;
1419 a1d1bb31 aliguori
#else
1420 a1d1bb31 aliguori
    return -ENOSYS;
1421 7d03f82f edgar_igl
#endif
1422 7d03f82f edgar_igl
}
1423 7d03f82f edgar_igl
1424 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1425 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1426 4c3a88a2 bellard
{
1427 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1428 c0ce998e aliguori
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1429 d720b93d bellard
1430 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1431 a1d1bb31 aliguori
1432 a1d1bb31 aliguori
    qemu_free(breakpoint);
1433 a1d1bb31 aliguori
#endif
1434 a1d1bb31 aliguori
}
1435 a1d1bb31 aliguori
1436 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1437 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438 a1d1bb31 aliguori
{
1439 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1440 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1441 a1d1bb31 aliguori
1442 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443 a1d1bb31 aliguori
        if (bp->flags & mask)
1444 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1445 c0ce998e aliguori
    }
1446 4c3a88a2 bellard
#endif
1447 4c3a88a2 bellard
}
1448 4c3a88a2 bellard
1449 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1450 c33a346e bellard
   CPU loop after each instruction */
1451 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1452 c33a346e bellard
{
1453 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1454 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1455 c33a346e bellard
        env->singlestep_enabled = enabled;
1456 e22a25c9 aliguori
        if (kvm_enabled())
1457 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1458 e22a25c9 aliguori
        else {
1459 e22a25c9 aliguori
            /* must flush all the translated code to avoid inconsistancies */
1460 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1461 e22a25c9 aliguori
            tb_flush(env);
1462 e22a25c9 aliguori
        }
1463 c33a346e bellard
    }
1464 c33a346e bellard
#endif
1465 c33a346e bellard
}
1466 c33a346e bellard
1467 34865134 bellard
/* enable or disable low levels log */
1468 34865134 bellard
void cpu_set_log(int log_flags)
1469 34865134 bellard
{
1470 34865134 bellard
    loglevel = log_flags;
1471 34865134 bellard
    if (loglevel && !logfile) {
1472 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1473 34865134 bellard
        if (!logfile) {
1474 34865134 bellard
            perror(logfilename);
1475 34865134 bellard
            _exit(1);
1476 34865134 bellard
        }
1477 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1478 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 9fa3e853 bellard
        {
1480 b55266b5 blueswir1
            static char logfile_buf[4096];
1481 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 9fa3e853 bellard
        }
1483 9fa3e853 bellard
#else
1484 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1485 9fa3e853 bellard
#endif
1486 e735b91c pbrook
        log_append = 1;
1487 e735b91c pbrook
    }
1488 e735b91c pbrook
    if (!loglevel && logfile) {
1489 e735b91c pbrook
        fclose(logfile);
1490 e735b91c pbrook
        logfile = NULL;
1491 34865134 bellard
    }
1492 34865134 bellard
}
1493 34865134 bellard
1494 34865134 bellard
void cpu_set_log_filename(const char *filename)
1495 34865134 bellard
{
1496 34865134 bellard
    logfilename = strdup(filename);
1497 e735b91c pbrook
    if (logfile) {
1498 e735b91c pbrook
        fclose(logfile);
1499 e735b91c pbrook
        logfile = NULL;
1500 e735b91c pbrook
    }
1501 e735b91c pbrook
    cpu_set_log(loglevel);
1502 34865134 bellard
}
1503 c33a346e bellard
1504 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1505 ea041c0e bellard
{
1506 3098dba0 aurel32
#if defined(USE_NPTL)
1507 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1509 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1510 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1511 3098dba0 aurel32
#else
1512 ea041c0e bellard
    TranslationBlock *tb;
1513 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1514 59817ccb bellard
1515 3098dba0 aurel32
    tb = env->current_tb;
1516 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1517 3098dba0 aurel32
       all the potentially executing TB */
1518 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1519 3098dba0 aurel32
        env->current_tb = NULL;
1520 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1521 3098dba0 aurel32
        resetlock(&interrupt_lock);
1522 be214e6c aurel32
    }
1523 3098dba0 aurel32
#endif
1524 3098dba0 aurel32
}
1525 3098dba0 aurel32
1526 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1527 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1528 3098dba0 aurel32
{
1529 3098dba0 aurel32
    int old_mask;
1530 be214e6c aurel32
1531 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1532 68a79315 bellard
    env->interrupt_request |= mask;
1533 3098dba0 aurel32
1534 2e70f6ef pbrook
    if (use_icount) {
1535 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1536 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1537 2e70f6ef pbrook
        if (!can_do_io(env)
1538 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1539 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1540 2e70f6ef pbrook
        }
1541 2e70f6ef pbrook
#endif
1542 2e70f6ef pbrook
    } else {
1543 3098dba0 aurel32
        cpu_unlink_tb(env);
1544 ea041c0e bellard
    }
1545 ea041c0e bellard
}
1546 ea041c0e bellard
1547 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1548 b54ad049 bellard
{
1549 b54ad049 bellard
    env->interrupt_request &= ~mask;
1550 b54ad049 bellard
}
1551 b54ad049 bellard
1552 3098dba0 aurel32
void cpu_exit(CPUState *env)
1553 3098dba0 aurel32
{
1554 3098dba0 aurel32
    env->exit_request = 1;
1555 3098dba0 aurel32
    cpu_unlink_tb(env);
1556 3098dba0 aurel32
}
1557 3098dba0 aurel32
1558 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1559 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1560 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1561 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1562 f193c797 bellard
      "show target assembly code for each compiled TB" },
1563 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1564 57fec1fe bellard
      "show micro ops for each compiled TB" },
1565 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1566 e01a1157 blueswir1
      "show micro ops "
1567 e01a1157 blueswir1
#ifdef TARGET_I386
1568 e01a1157 blueswir1
      "before eflags optimization and "
1569 f193c797 bellard
#endif
1570 e01a1157 blueswir1
      "after liveness analysis" },
1571 f193c797 bellard
    { CPU_LOG_INT, "int",
1572 f193c797 bellard
      "show interrupts/exceptions in short format" },
1573 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1574 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1575 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1576 e91c8a77 ths
      "show CPU state before block translation" },
1577 f193c797 bellard
#ifdef TARGET_I386
1578 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1579 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1580 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1581 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1582 f193c797 bellard
#endif
1583 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1584 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1585 fd872598 bellard
      "show all i/o ports accesses" },
1586 8e3a9fd2 bellard
#endif
1587 f193c797 bellard
    { 0, NULL, NULL },
1588 f193c797 bellard
};
1589 f193c797 bellard
1590 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1591 f193c797 bellard
{
1592 f193c797 bellard
    if (strlen(s2) != n)
1593 f193c797 bellard
        return 0;
1594 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1595 f193c797 bellard
}
1596 3b46e624 ths
1597 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1598 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1599 f193c797 bellard
{
1600 c7cd6a37 blueswir1
    const CPULogItem *item;
1601 f193c797 bellard
    int mask;
1602 f193c797 bellard
    const char *p, *p1;
1603 f193c797 bellard
1604 f193c797 bellard
    p = str;
1605 f193c797 bellard
    mask = 0;
1606 f193c797 bellard
    for(;;) {
1607 f193c797 bellard
        p1 = strchr(p, ',');
1608 f193c797 bellard
        if (!p1)
1609 f193c797 bellard
            p1 = p + strlen(p);
1610 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1611 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1612 8e3a9fd2 bellard
                        mask |= item->mask;
1613 8e3a9fd2 bellard
                }
1614 8e3a9fd2 bellard
        } else {
1615 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1616 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1617 f193c797 bellard
                goto found;
1618 f193c797 bellard
        }
1619 f193c797 bellard
        return 0;
1620 8e3a9fd2 bellard
        }
1621 f193c797 bellard
    found:
1622 f193c797 bellard
        mask |= item->mask;
1623 f193c797 bellard
        if (*p1 != ',')
1624 f193c797 bellard
            break;
1625 f193c797 bellard
        p = p1 + 1;
1626 f193c797 bellard
    }
1627 f193c797 bellard
    return mask;
1628 f193c797 bellard
}
1629 ea041c0e bellard
1630 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1631 7501267e bellard
{
1632 7501267e bellard
    va_list ap;
1633 493ae1f0 pbrook
    va_list ap2;
1634 7501267e bellard
1635 7501267e bellard
    va_start(ap, fmt);
1636 493ae1f0 pbrook
    va_copy(ap2, ap);
1637 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1638 7501267e bellard
    vfprintf(stderr, fmt, ap);
1639 7501267e bellard
    fprintf(stderr, "\n");
1640 7501267e bellard
#ifdef TARGET_I386
1641 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642 7fe48483 bellard
#else
1643 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1644 7501267e bellard
#endif
1645 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1646 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1647 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1648 93fcfe39 aliguori
        qemu_log("\n");
1649 f9373291 j_mayer
#ifdef TARGET_I386
1650 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1651 f9373291 j_mayer
#else
1652 93fcfe39 aliguori
        log_cpu_state(env, 0);
1653 f9373291 j_mayer
#endif
1654 31b1a7b4 aliguori
        qemu_log_flush();
1655 93fcfe39 aliguori
        qemu_log_close();
1656 924edcae balrog
    }
1657 493ae1f0 pbrook
    va_end(ap2);
1658 f9373291 j_mayer
    va_end(ap);
1659 7501267e bellard
    abort();
1660 7501267e bellard
}
1661 7501267e bellard
1662 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1663 c5be9f08 ths
{
1664 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1665 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1666 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1667 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1668 5a38f081 aliguori
    CPUBreakpoint *bp;
1669 5a38f081 aliguori
    CPUWatchpoint *wp;
1670 5a38f081 aliguori
#endif
1671 5a38f081 aliguori
1672 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1673 5a38f081 aliguori
1674 5a38f081 aliguori
    /* Preserve chaining and index. */
1675 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1676 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1677 5a38f081 aliguori
1678 5a38f081 aliguori
    /* Clone all break/watchpoints.
1679 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1680 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1681 5a38f081 aliguori
    TAILQ_INIT(&env->breakpoints);
1682 5a38f081 aliguori
    TAILQ_INIT(&env->watchpoints);
1683 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1684 5a38f081 aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686 5a38f081 aliguori
    }
1687 5a38f081 aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689 5a38f081 aliguori
                              wp->flags, NULL);
1690 5a38f081 aliguori
    }
1691 5a38f081 aliguori
#endif
1692 5a38f081 aliguori
1693 c5be9f08 ths
    return new_env;
1694 c5be9f08 ths
}
1695 c5be9f08 ths
1696 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1697 0124311e bellard
1698 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699 5c751e99 edgar_igl
{
1700 5c751e99 edgar_igl
    unsigned int i;
1701 5c751e99 edgar_igl
1702 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1703 5c751e99 edgar_igl
       overlap the flushed page.  */
1704 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1706 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707 5c751e99 edgar_igl
1708 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1709 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1710 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711 5c751e99 edgar_igl
}
1712 5c751e99 edgar_igl
1713 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1714 ee8b7021 bellard
   implemented yet) */
1715 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1716 33417e70 bellard
{
1717 33417e70 bellard
    int i;
1718 0124311e bellard
1719 9fa3e853 bellard
#if defined(DEBUG_TLB)
1720 9fa3e853 bellard
    printf("tlb_flush:\n");
1721 9fa3e853 bellard
#endif
1722 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1723 0124311e bellard
       links while we are modifying them */
1724 0124311e bellard
    env->current_tb = NULL;
1725 0124311e bellard
1726 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1727 84b7b8e7 bellard
        env->tlb_table[0][i].addr_read = -1;
1728 84b7b8e7 bellard
        env->tlb_table[0][i].addr_write = -1;
1729 84b7b8e7 bellard
        env->tlb_table[0][i].addr_code = -1;
1730 84b7b8e7 bellard
        env->tlb_table[1][i].addr_read = -1;
1731 84b7b8e7 bellard
        env->tlb_table[1][i].addr_write = -1;
1732 84b7b8e7 bellard
        env->tlb_table[1][i].addr_code = -1;
1733 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1734 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_read = -1;
1735 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_write = -1;
1736 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_code = -1;
1737 e37e6ee6 aurel32
#endif
1738 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1739 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_read = -1;
1740 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_write = -1;
1741 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_code = -1;
1742 6fa4cea9 j_mayer
#endif
1743 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1744 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_read = -1;
1745 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_write = -1;
1746 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_code = -1;
1747 6fa4cea9 j_mayer
#endif
1748 e37e6ee6 aurel32
1749 33417e70 bellard
    }
1750 9fa3e853 bellard
1751 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1752 9fa3e853 bellard
1753 0a962c02 bellard
#ifdef USE_KQEMU
1754 0a962c02 bellard
    if (env->kqemu_enabled) {
1755 0a962c02 bellard
        kqemu_flush(env, flush_global);
1756 0a962c02 bellard
    }
1757 0a962c02 bellard
#endif
1758 e3db7226 bellard
    tlb_flush_count++;
1759 33417e70 bellard
}
1760 33417e70 bellard
1761 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1762 61382a50 bellard
{
1763 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1764 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1766 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1768 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769 84b7b8e7 bellard
        tlb_entry->addr_read = -1;
1770 84b7b8e7 bellard
        tlb_entry->addr_write = -1;
1771 84b7b8e7 bellard
        tlb_entry->addr_code = -1;
1772 84b7b8e7 bellard
    }
1773 61382a50 bellard
}
1774 61382a50 bellard
1775 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1776 33417e70 bellard
{
1777 8a40a180 bellard
    int i;
1778 0124311e bellard
1779 9fa3e853 bellard
#if defined(DEBUG_TLB)
1780 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1781 9fa3e853 bellard
#endif
1782 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1783 0124311e bellard
       links while we are modifying them */
1784 0124311e bellard
    env->current_tb = NULL;
1785 61382a50 bellard
1786 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1787 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1789 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1790 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1791 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1792 e37e6ee6 aurel32
#endif
1793 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1794 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1795 6fa4cea9 j_mayer
#endif
1796 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1797 e37e6ee6 aurel32
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1798 6fa4cea9 j_mayer
#endif
1799 0124311e bellard
1800 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1801 9fa3e853 bellard
1802 0a962c02 bellard
#ifdef USE_KQEMU
1803 0a962c02 bellard
    if (env->kqemu_enabled) {
1804 0a962c02 bellard
        kqemu_flush_page(env, addr);
1805 0a962c02 bellard
    }
1806 0a962c02 bellard
#endif
1807 9fa3e853 bellard
}
1808 9fa3e853 bellard
1809 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1810 9fa3e853 bellard
   can be detected */
1811 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1812 9fa3e853 bellard
{
1813 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1814 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1815 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1816 9fa3e853 bellard
}
1817 9fa3e853 bellard
1818 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1819 3a7d929e bellard
   tested for self modifying code */
1820 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1821 3a7d929e bellard
                                    target_ulong vaddr)
1822 9fa3e853 bellard
{
1823 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1824 1ccde1cb bellard
}
1825 1ccde1cb bellard
1826 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1828 1ccde1cb bellard
{
1829 1ccde1cb bellard
    unsigned long addr;
1830 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832 1ccde1cb bellard
        if ((addr - start) < length) {
1833 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1834 1ccde1cb bellard
        }
1835 1ccde1cb bellard
    }
1836 1ccde1cb bellard
}
1837 1ccde1cb bellard
1838 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1839 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1840 0a962c02 bellard
                                     int dirty_flags)
1841 1ccde1cb bellard
{
1842 1ccde1cb bellard
    CPUState *env;
1843 4f2ac237 bellard
    unsigned long length, start1;
1844 0a962c02 bellard
    int i, mask, len;
1845 0a962c02 bellard
    uint8_t *p;
1846 1ccde1cb bellard
1847 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1848 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1849 1ccde1cb bellard
1850 1ccde1cb bellard
    length = end - start;
1851 1ccde1cb bellard
    if (length == 0)
1852 1ccde1cb bellard
        return;
1853 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1854 3a7d929e bellard
#ifdef USE_KQEMU
1855 6a00d601 bellard
    /* XXX: should not depend on cpu context */
1856 6a00d601 bellard
    env = first_cpu;
1857 3a7d929e bellard
    if (env->kqemu_enabled) {
1858 f23db169 bellard
        ram_addr_t addr;
1859 f23db169 bellard
        addr = start;
1860 f23db169 bellard
        for(i = 0; i < len; i++) {
1861 f23db169 bellard
            kqemu_set_notdirty(env, addr);
1862 f23db169 bellard
            addr += TARGET_PAGE_SIZE;
1863 f23db169 bellard
        }
1864 3a7d929e bellard
    }
1865 3a7d929e bellard
#endif
1866 f23db169 bellard
    mask = ~dirty_flags;
1867 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1868 f23db169 bellard
    for(i = 0; i < len; i++)
1869 f23db169 bellard
        p[i] &= mask;
1870 f23db169 bellard
1871 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1872 1ccde1cb bellard
       when accessing the range */
1873 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1874 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1875 5579c7f3 pbrook
       address comparisons below.  */
1876 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1877 5579c7f3 pbrook
            != (end - 1) - start) {
1878 5579c7f3 pbrook
        abort();
1879 5579c7f3 pbrook
    }
1880 5579c7f3 pbrook
1881 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1882 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1883 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1884 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1885 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1886 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1887 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1888 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1889 e37e6ee6 aurel32
#endif
1890 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1891 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1892 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1893 6fa4cea9 j_mayer
#endif
1894 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1895 e37e6ee6 aurel32
        for(i = 0; i < CPU_TLB_SIZE; i++)
1896 e37e6ee6 aurel32
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1897 6fa4cea9 j_mayer
#endif
1898 6a00d601 bellard
    }
1899 1ccde1cb bellard
}
1900 1ccde1cb bellard
1901 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1902 74576198 aliguori
{
1903 74576198 aliguori
    in_migration = enable;
1904 74576198 aliguori
    return 0;
1905 74576198 aliguori
}
1906 74576198 aliguori
1907 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1908 74576198 aliguori
{
1909 74576198 aliguori
    return in_migration;
1910 74576198 aliguori
}
1911 74576198 aliguori
1912 2bec46dc aliguori
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1913 2bec46dc aliguori
{
1914 2bec46dc aliguori
    if (kvm_enabled())
1915 2bec46dc aliguori
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1916 2bec46dc aliguori
}
1917 2bec46dc aliguori
1918 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1919 3a7d929e bellard
{
1920 3a7d929e bellard
    ram_addr_t ram_addr;
1921 5579c7f3 pbrook
    void *p;
1922 3a7d929e bellard
1923 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1924 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1925 5579c7f3 pbrook
            + tlb_entry->addend);
1926 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1927 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1929 3a7d929e bellard
        }
1930 3a7d929e bellard
    }
1931 3a7d929e bellard
}
1932 3a7d929e bellard
1933 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1934 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1935 3a7d929e bellard
{
1936 3a7d929e bellard
    int i;
1937 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1938 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[0][i]);
1939 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1940 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[1][i]);
1941 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1942 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1943 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[2][i]);
1944 e37e6ee6 aurel32
#endif
1945 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1946 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1947 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[3][i]);
1948 6fa4cea9 j_mayer
#endif
1949 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1950 e37e6ee6 aurel32
    for(i = 0; i < CPU_TLB_SIZE; i++)
1951 e37e6ee6 aurel32
        tlb_update_dirty(&env->tlb_table[4][i]);
1952 6fa4cea9 j_mayer
#endif
1953 3a7d929e bellard
}
1954 3a7d929e bellard
1955 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1956 1ccde1cb bellard
{
1957 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1958 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1959 1ccde1cb bellard
}
1960 1ccde1cb bellard
1961 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1962 0f459d16 pbrook
   so that it is no longer dirty */
1963 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1964 1ccde1cb bellard
{
1965 1ccde1cb bellard
    int i;
1966 1ccde1cb bellard
1967 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1968 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1969 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1970 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1971 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1972 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1973 e37e6ee6 aurel32
#endif
1974 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1975 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1976 6fa4cea9 j_mayer
#endif
1977 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1978 e37e6ee6 aurel32
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1979 6fa4cea9 j_mayer
#endif
1980 9fa3e853 bellard
}
1981 9fa3e853 bellard
1982 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
1983 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1984 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
1985 59817ccb bellard
   conflicting with the host address space). */
1986 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1987 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
1988 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
1989 9fa3e853 bellard
{
1990 92e873b9 bellard
    PhysPageDesc *p;
1991 4f2ac237 bellard
    unsigned long pd;
1992 9fa3e853 bellard
    unsigned int index;
1993 4f2ac237 bellard
    target_ulong address;
1994 0f459d16 pbrook
    target_ulong code_address;
1995 108c49b8 bellard
    target_phys_addr_t addend;
1996 9fa3e853 bellard
    int ret;
1997 84b7b8e7 bellard
    CPUTLBEntry *te;
1998 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1999 0f459d16 pbrook
    target_phys_addr_t iotlb;
2000 9fa3e853 bellard
2001 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2002 9fa3e853 bellard
    if (!p) {
2003 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2004 9fa3e853 bellard
    } else {
2005 9fa3e853 bellard
        pd = p->phys_offset;
2006 9fa3e853 bellard
    }
2007 9fa3e853 bellard
#if defined(DEBUG_TLB)
2008 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2009 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2010 9fa3e853 bellard
#endif
2011 9fa3e853 bellard
2012 9fa3e853 bellard
    ret = 0;
2013 0f459d16 pbrook
    address = vaddr;
2014 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2015 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2016 0f459d16 pbrook
        address |= TLB_MMIO;
2017 0f459d16 pbrook
    }
2018 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2019 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2020 0f459d16 pbrook
        /* Normal RAM.  */
2021 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2022 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2023 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2024 0f459d16 pbrook
        else
2025 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2026 0f459d16 pbrook
    } else {
2027 0f459d16 pbrook
        /* IO handlers are currently passed a phsical address.
2028 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2029 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2030 0f459d16 pbrook
           and avoid full address decoding in every device.
2031 0f459d16 pbrook
           We can't use the high bits of pd for this because
2032 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2033 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2034 8da3ff18 pbrook
        if (p) {
2035 8da3ff18 pbrook
            iotlb += p->region_offset;
2036 8da3ff18 pbrook
        } else {
2037 8da3ff18 pbrook
            iotlb += paddr;
2038 8da3ff18 pbrook
        }
2039 0f459d16 pbrook
    }
2040 0f459d16 pbrook
2041 0f459d16 pbrook
    code_address = address;
2042 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2043 0f459d16 pbrook
       watchpoint trap routines.  */
2044 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2045 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2046 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2047 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2048 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2049 0f459d16 pbrook
            address |= TLB_MMIO;
2050 6658ffb8 pbrook
        }
2051 0f459d16 pbrook
    }
2052 d79acba4 balrog
2053 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2054 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2055 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2056 0f459d16 pbrook
    te->addend = addend - vaddr;
2057 0f459d16 pbrook
    if (prot & PAGE_READ) {
2058 0f459d16 pbrook
        te->addr_read = address;
2059 0f459d16 pbrook
    } else {
2060 0f459d16 pbrook
        te->addr_read = -1;
2061 0f459d16 pbrook
    }
2062 5c751e99 edgar_igl
2063 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2064 0f459d16 pbrook
        te->addr_code = code_address;
2065 0f459d16 pbrook
    } else {
2066 0f459d16 pbrook
        te->addr_code = -1;
2067 0f459d16 pbrook
    }
2068 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2069 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2070 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2071 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2072 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2073 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2074 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2075 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2076 9fa3e853 bellard
        } else {
2077 0f459d16 pbrook
            te->addr_write = address;
2078 9fa3e853 bellard
        }
2079 0f459d16 pbrook
    } else {
2080 0f459d16 pbrook
        te->addr_write = -1;
2081 9fa3e853 bellard
    }
2082 9fa3e853 bellard
    return ret;
2083 9fa3e853 bellard
}
2084 9fa3e853 bellard
2085 0124311e bellard
#else
2086 0124311e bellard
2087 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2088 0124311e bellard
{
2089 0124311e bellard
}
2090 0124311e bellard
2091 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2092 0124311e bellard
{
2093 0124311e bellard
}
2094 0124311e bellard
2095 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2096 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2097 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2098 9fa3e853 bellard
{
2099 9fa3e853 bellard
    return 0;
2100 9fa3e853 bellard
}
2101 0124311e bellard
2102 9fa3e853 bellard
/* dump memory mappings */
2103 9fa3e853 bellard
void page_dump(FILE *f)
2104 33417e70 bellard
{
2105 9fa3e853 bellard
    unsigned long start, end;
2106 9fa3e853 bellard
    int i, j, prot, prot1;
2107 9fa3e853 bellard
    PageDesc *p;
2108 33417e70 bellard
2109 9fa3e853 bellard
    fprintf(f, "%-8s %-8s %-8s %s\n",
2110 9fa3e853 bellard
            "start", "end", "size", "prot");
2111 9fa3e853 bellard
    start = -1;
2112 9fa3e853 bellard
    end = -1;
2113 9fa3e853 bellard
    prot = 0;
2114 9fa3e853 bellard
    for(i = 0; i <= L1_SIZE; i++) {
2115 9fa3e853 bellard
        if (i < L1_SIZE)
2116 9fa3e853 bellard
            p = l1_map[i];
2117 9fa3e853 bellard
        else
2118 9fa3e853 bellard
            p = NULL;
2119 9fa3e853 bellard
        for(j = 0;j < L2_SIZE; j++) {
2120 9fa3e853 bellard
            if (!p)
2121 9fa3e853 bellard
                prot1 = 0;
2122 9fa3e853 bellard
            else
2123 9fa3e853 bellard
                prot1 = p[j].flags;
2124 9fa3e853 bellard
            if (prot1 != prot) {
2125 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2126 9fa3e853 bellard
                if (start != -1) {
2127 9fa3e853 bellard
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2128 5fafdf24 ths
                            start, end, end - start,
2129 9fa3e853 bellard
                            prot & PAGE_READ ? 'r' : '-',
2130 9fa3e853 bellard
                            prot & PAGE_WRITE ? 'w' : '-',
2131 9fa3e853 bellard
                            prot & PAGE_EXEC ? 'x' : '-');
2132 9fa3e853 bellard
                }
2133 9fa3e853 bellard
                if (prot1 != 0)
2134 9fa3e853 bellard
                    start = end;
2135 9fa3e853 bellard
                else
2136 9fa3e853 bellard
                    start = -1;
2137 9fa3e853 bellard
                prot = prot1;
2138 9fa3e853 bellard
            }
2139 9fa3e853 bellard
            if (!p)
2140 9fa3e853 bellard
                break;
2141 9fa3e853 bellard
        }
2142 33417e70 bellard
    }
2143 33417e70 bellard
}
2144 33417e70 bellard
2145 53a5960a pbrook
int page_get_flags(target_ulong address)
2146 33417e70 bellard
{
2147 9fa3e853 bellard
    PageDesc *p;
2148 9fa3e853 bellard
2149 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2150 33417e70 bellard
    if (!p)
2151 9fa3e853 bellard
        return 0;
2152 9fa3e853 bellard
    return p->flags;
2153 9fa3e853 bellard
}
2154 9fa3e853 bellard
2155 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2156 9fa3e853 bellard
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2157 9fa3e853 bellard
   depending on PAGE_WRITE */
2158 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2159 9fa3e853 bellard
{
2160 9fa3e853 bellard
    PageDesc *p;
2161 53a5960a pbrook
    target_ulong addr;
2162 9fa3e853 bellard
2163 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2164 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2165 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2166 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2167 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2168 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2169 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2170 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2171 17e2377a pbrook
           address space.  */
2172 17e2377a pbrook
        if (!p)
2173 17e2377a pbrook
            return;
2174 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2175 9fa3e853 bellard
           inside */
2176 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2177 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2178 9fa3e853 bellard
            p->first_tb) {
2179 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2180 9fa3e853 bellard
        }
2181 9fa3e853 bellard
        p->flags = flags;
2182 9fa3e853 bellard
    }
2183 33417e70 bellard
}
2184 33417e70 bellard
2185 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2186 3d97b40b ths
{
2187 3d97b40b ths
    PageDesc *p;
2188 3d97b40b ths
    target_ulong end;
2189 3d97b40b ths
    target_ulong addr;
2190 3d97b40b ths
2191 55f280c9 balrog
    if (start + len < start)
2192 55f280c9 balrog
        /* we've wrapped around */
2193 55f280c9 balrog
        return -1;
2194 55f280c9 balrog
2195 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2196 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2197 3d97b40b ths
2198 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2199 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2200 3d97b40b ths
        if( !p )
2201 3d97b40b ths
            return -1;
2202 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2203 3d97b40b ths
            return -1;
2204 3d97b40b ths
2205 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2206 3d97b40b ths
            return -1;
2207 dae3270c bellard
        if (flags & PAGE_WRITE) {
2208 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2209 dae3270c bellard
                return -1;
2210 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2211 dae3270c bellard
               contains translated code */
2212 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2213 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2214 dae3270c bellard
                    return -1;
2215 dae3270c bellard
            }
2216 dae3270c bellard
            return 0;
2217 dae3270c bellard
        }
2218 3d97b40b ths
    }
2219 3d97b40b ths
    return 0;
2220 3d97b40b ths
}
2221 3d97b40b ths
2222 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2223 9fa3e853 bellard
   page. Return TRUE if the fault was succesfully handled. */
2224 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2225 9fa3e853 bellard
{
2226 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2227 9fa3e853 bellard
    PageDesc *p, *p1;
2228 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2229 9fa3e853 bellard
2230 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2231 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2232 c8a706fe pbrook
       practice it seems to be ok.  */
2233 c8a706fe pbrook
    mmap_lock();
2234 c8a706fe pbrook
2235 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2236 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2237 9fa3e853 bellard
    p1 = page_find(page_index);
2238 c8a706fe pbrook
    if (!p1) {
2239 c8a706fe pbrook
        mmap_unlock();
2240 9fa3e853 bellard
        return 0;
2241 c8a706fe pbrook
    }
2242 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2243 9fa3e853 bellard
    p = p1;
2244 9fa3e853 bellard
    prot = 0;
2245 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2246 9fa3e853 bellard
        prot |= p->flags;
2247 9fa3e853 bellard
        p++;
2248 9fa3e853 bellard
    }
2249 9fa3e853 bellard
    /* if the page was really writable, then we change its
2250 9fa3e853 bellard
       protection back to writable */
2251 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2252 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2253 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2254 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2255 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2256 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2257 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2258 9fa3e853 bellard
               the corresponding translated code. */
2259 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2260 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2261 9fa3e853 bellard
            tb_invalidate_check(address);
2262 9fa3e853 bellard
#endif
2263 c8a706fe pbrook
            mmap_unlock();
2264 9fa3e853 bellard
            return 1;
2265 9fa3e853 bellard
        }
2266 9fa3e853 bellard
    }
2267 c8a706fe pbrook
    mmap_unlock();
2268 9fa3e853 bellard
    return 0;
2269 9fa3e853 bellard
}
2270 9fa3e853 bellard
2271 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2272 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2273 1ccde1cb bellard
{
2274 1ccde1cb bellard
}
2275 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2276 9fa3e853 bellard
2277 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2278 8da3ff18 pbrook
2279 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2280 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2281 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2282 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2283 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2284 db7b5426 blueswir1
                      need_subpage)                                     \
2285 db7b5426 blueswir1
    do {                                                                \
2286 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2287 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2288 db7b5426 blueswir1
        else {                                                          \
2289 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2290 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2291 db7b5426 blueswir1
                need_subpage = 1;                                       \
2292 db7b5426 blueswir1
        }                                                               \
2293 db7b5426 blueswir1
                                                                        \
2294 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2295 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2296 db7b5426 blueswir1
        else {                                                          \
2297 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2298 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2299 db7b5426 blueswir1
                need_subpage = 1;                                       \
2300 db7b5426 blueswir1
        }                                                               \
2301 db7b5426 blueswir1
    } while (0)
2302 db7b5426 blueswir1
2303 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2304 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2305 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2306 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2307 8da3ff18 pbrook
   start_region and regon_offset are rounded down to a page boundary
2308 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2309 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2310 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2311 8da3ff18 pbrook
                                         ram_addr_t size,
2312 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2313 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2314 33417e70 bellard
{
2315 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2316 92e873b9 bellard
    PhysPageDesc *p;
2317 9d42037b bellard
    CPUState *env;
2318 00f82b8a aurel32
    ram_addr_t orig_size = size;
2319 db7b5426 blueswir1
    void *subpage;
2320 33417e70 bellard
2321 da260249 bellard
#ifdef USE_KQEMU
2322 da260249 bellard
    /* XXX: should not depend on cpu context */
2323 da260249 bellard
    env = first_cpu;
2324 da260249 bellard
    if (env->kqemu_enabled) {
2325 da260249 bellard
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2326 da260249 bellard
    }
2327 da260249 bellard
#endif
2328 7ba1e619 aliguori
    if (kvm_enabled())
2329 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2330 7ba1e619 aliguori
2331 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2332 67c4d23c pbrook
        region_offset = start_addr;
2333 67c4d23c pbrook
    }
2334 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2335 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2336 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2337 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2338 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2339 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2340 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2341 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2342 db7b5426 blueswir1
            int need_subpage = 0;
2343 db7b5426 blueswir1
2344 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2345 db7b5426 blueswir1
                          need_subpage);
2346 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2347 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2348 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2349 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2350 8da3ff18 pbrook
                                           p->region_offset);
2351 db7b5426 blueswir1
                } else {
2352 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2353 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2354 db7b5426 blueswir1
                }
2355 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2356 8da3ff18 pbrook
                                 region_offset);
2357 8da3ff18 pbrook
                p->region_offset = 0;
2358 db7b5426 blueswir1
            } else {
2359 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2360 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2361 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2362 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2363 db7b5426 blueswir1
            }
2364 db7b5426 blueswir1
        } else {
2365 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2366 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2367 8da3ff18 pbrook
            p->region_offset = region_offset;
2368 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2369 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2370 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2371 0e8f0967 pbrook
            } else {
2372 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2373 db7b5426 blueswir1
                int need_subpage = 0;
2374 db7b5426 blueswir1
2375 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2376 db7b5426 blueswir1
                              end_addr2, need_subpage);
2377 db7b5426 blueswir1
2378 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2379 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2380 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2381 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2382 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2383 8da3ff18 pbrook
                                     phys_offset, region_offset);
2384 8da3ff18 pbrook
                    p->region_offset = 0;
2385 db7b5426 blueswir1
                }
2386 db7b5426 blueswir1
            }
2387 db7b5426 blueswir1
        }
2388 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2389 33417e70 bellard
    }
2390 3b46e624 ths
2391 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2392 9d42037b bellard
       reset the modified entries */
2393 9d42037b bellard
    /* XXX: slow ! */
2394 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2395 9d42037b bellard
        tlb_flush(env, 1);
2396 9d42037b bellard
    }
2397 33417e70 bellard
}
2398 33417e70 bellard
2399 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2400 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2401 ba863458 bellard
{
2402 ba863458 bellard
    PhysPageDesc *p;
2403 ba863458 bellard
2404 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2405 ba863458 bellard
    if (!p)
2406 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2407 ba863458 bellard
    return p->phys_offset;
2408 ba863458 bellard
}
2409 ba863458 bellard
2410 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2411 f65ed4c1 aliguori
{
2412 f65ed4c1 aliguori
    if (kvm_enabled())
2413 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2414 f65ed4c1 aliguori
}
2415 f65ed4c1 aliguori
2416 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2417 f65ed4c1 aliguori
{
2418 f65ed4c1 aliguori
    if (kvm_enabled())
2419 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2420 f65ed4c1 aliguori
}
2421 f65ed4c1 aliguori
2422 e9a1ab19 bellard
/* XXX: better than nothing */
2423 00f82b8a aurel32
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2424 e9a1ab19 bellard
{
2425 e9a1ab19 bellard
    ram_addr_t addr;
2426 7fb4fdcf balrog
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2427 012a7045 ths
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2428 ed441467 bellard
                (uint64_t)size, (uint64_t)phys_ram_size);
2429 e9a1ab19 bellard
        abort();
2430 e9a1ab19 bellard
    }
2431 e9a1ab19 bellard
    addr = phys_ram_alloc_offset;
2432 e9a1ab19 bellard
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2433 e9a1ab19 bellard
    return addr;
2434 e9a1ab19 bellard
}
2435 e9a1ab19 bellard
2436 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2437 e9a1ab19 bellard
{
2438 e9a1ab19 bellard
}
2439 e9a1ab19 bellard
2440 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2441 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2442 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2443 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2444 5579c7f3 pbrook

2445 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2446 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2447 5579c7f3 pbrook
 */
2448 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2449 dc828ca1 pbrook
{
2450 dc828ca1 pbrook
    return phys_ram_base + addr;
2451 dc828ca1 pbrook
}
2452 dc828ca1 pbrook
2453 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2454 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2455 5579c7f3 pbrook
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2456 5579c7f3 pbrook
{
2457 5579c7f3 pbrook
  return (uint8_t *)ptr - phys_ram_base;
2458 5579c7f3 pbrook
}
2459 5579c7f3 pbrook
2460 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2461 33417e70 bellard
{
2462 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2463 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2464 67d3b957 pbrook
#endif
2465 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2466 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2467 e18231a3 blueswir1
#endif
2468 e18231a3 blueswir1
    return 0;
2469 e18231a3 blueswir1
}
2470 e18231a3 blueswir1
2471 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2472 e18231a3 blueswir1
{
2473 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2474 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2475 e18231a3 blueswir1
#endif
2476 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2477 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2478 e18231a3 blueswir1
#endif
2479 e18231a3 blueswir1
    return 0;
2480 e18231a3 blueswir1
}
2481 e18231a3 blueswir1
2482 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2483 e18231a3 blueswir1
{
2484 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2485 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2486 e18231a3 blueswir1
#endif
2487 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2488 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2489 b4f0a316 blueswir1
#endif
2490 33417e70 bellard
    return 0;
2491 33417e70 bellard
}
2492 33417e70 bellard
2493 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2494 33417e70 bellard
{
2495 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2496 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2497 67d3b957 pbrook
#endif
2498 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2499 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2500 e18231a3 blueswir1
#endif
2501 e18231a3 blueswir1
}
2502 e18231a3 blueswir1
2503 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2504 e18231a3 blueswir1
{
2505 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2506 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2507 e18231a3 blueswir1
#endif
2508 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2509 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2510 e18231a3 blueswir1
#endif
2511 e18231a3 blueswir1
}
2512 e18231a3 blueswir1
2513 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2514 e18231a3 blueswir1
{
2515 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2516 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2517 e18231a3 blueswir1
#endif
2518 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2519 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2520 b4f0a316 blueswir1
#endif
2521 33417e70 bellard
}
2522 33417e70 bellard
2523 33417e70 bellard
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2524 33417e70 bellard
    unassigned_mem_readb,
2525 e18231a3 blueswir1
    unassigned_mem_readw,
2526 e18231a3 blueswir1
    unassigned_mem_readl,
2527 33417e70 bellard
};
2528 33417e70 bellard
2529 33417e70 bellard
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2530 33417e70 bellard
    unassigned_mem_writeb,
2531 e18231a3 blueswir1
    unassigned_mem_writew,
2532 e18231a3 blueswir1
    unassigned_mem_writel,
2533 33417e70 bellard
};
2534 33417e70 bellard
2535 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2536 0f459d16 pbrook
                                uint32_t val)
2537 9fa3e853 bellard
{
2538 3a7d929e bellard
    int dirty_flags;
2539 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2540 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2541 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2542 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2543 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2544 9fa3e853 bellard
#endif
2545 3a7d929e bellard
    }
2546 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2547 f32fc648 bellard
#ifdef USE_KQEMU
2548 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2549 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2550 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2551 f32fc648 bellard
#endif
2552 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2553 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2554 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2555 f23db169 bellard
       flushed */
2556 f23db169 bellard
    if (dirty_flags == 0xff)
2557 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2558 9fa3e853 bellard
}
2559 9fa3e853 bellard
2560 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2561 0f459d16 pbrook
                                uint32_t val)
2562 9fa3e853 bellard
{
2563 3a7d929e bellard
    int dirty_flags;
2564 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2565 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2566 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2567 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2568 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2569 9fa3e853 bellard
#endif
2570 3a7d929e bellard
    }
2571 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2572 f32fc648 bellard
#ifdef USE_KQEMU
2573 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2574 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2575 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2576 f32fc648 bellard
#endif
2577 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2578 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2579 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2580 f23db169 bellard
       flushed */
2581 f23db169 bellard
    if (dirty_flags == 0xff)
2582 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2583 9fa3e853 bellard
}
2584 9fa3e853 bellard
2585 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2586 0f459d16 pbrook
                                uint32_t val)
2587 9fa3e853 bellard
{
2588 3a7d929e bellard
    int dirty_flags;
2589 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2590 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2591 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2592 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2593 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2594 9fa3e853 bellard
#endif
2595 3a7d929e bellard
    }
2596 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2597 f32fc648 bellard
#ifdef USE_KQEMU
2598 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2599 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2600 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2601 f32fc648 bellard
#endif
2602 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2603 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2604 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2605 f23db169 bellard
       flushed */
2606 f23db169 bellard
    if (dirty_flags == 0xff)
2607 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2608 9fa3e853 bellard
}
2609 9fa3e853 bellard
2610 3a7d929e bellard
static CPUReadMemoryFunc *error_mem_read[3] = {
2611 9fa3e853 bellard
    NULL, /* never used */
2612 9fa3e853 bellard
    NULL, /* never used */
2613 9fa3e853 bellard
    NULL, /* never used */
2614 9fa3e853 bellard
};
2615 9fa3e853 bellard
2616 1ccde1cb bellard
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2617 1ccde1cb bellard
    notdirty_mem_writeb,
2618 1ccde1cb bellard
    notdirty_mem_writew,
2619 1ccde1cb bellard
    notdirty_mem_writel,
2620 1ccde1cb bellard
};
2621 1ccde1cb bellard
2622 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2623 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2624 0f459d16 pbrook
{
2625 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2626 06d55cc1 aliguori
    target_ulong pc, cs_base;
2627 06d55cc1 aliguori
    TranslationBlock *tb;
2628 0f459d16 pbrook
    target_ulong vaddr;
2629 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2630 06d55cc1 aliguori
    int cpu_flags;
2631 0f459d16 pbrook
2632 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2633 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2634 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2635 06d55cc1 aliguori
         * current instruction. */
2636 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2637 06d55cc1 aliguori
        return;
2638 06d55cc1 aliguori
    }
2639 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2640 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2641 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2642 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2643 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2644 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2645 6e140f28 aliguori
                env->watchpoint_hit = wp;
2646 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2647 6e140f28 aliguori
                if (!tb) {
2648 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2649 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2650 6e140f28 aliguori
                }
2651 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2652 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2653 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2654 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2655 6e140f28 aliguori
                } else {
2656 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2657 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2658 6e140f28 aliguori
                }
2659 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2660 06d55cc1 aliguori
            }
2661 6e140f28 aliguori
        } else {
2662 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2663 0f459d16 pbrook
        }
2664 0f459d16 pbrook
    }
2665 0f459d16 pbrook
}
2666 0f459d16 pbrook
2667 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2668 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2669 6658ffb8 pbrook
   phys routines.  */
2670 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2671 6658ffb8 pbrook
{
2672 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2673 6658ffb8 pbrook
    return ldub_phys(addr);
2674 6658ffb8 pbrook
}
2675 6658ffb8 pbrook
2676 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2677 6658ffb8 pbrook
{
2678 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2679 6658ffb8 pbrook
    return lduw_phys(addr);
2680 6658ffb8 pbrook
}
2681 6658ffb8 pbrook
2682 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2683 6658ffb8 pbrook
{
2684 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2685 6658ffb8 pbrook
    return ldl_phys(addr);
2686 6658ffb8 pbrook
}
2687 6658ffb8 pbrook
2688 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2689 6658ffb8 pbrook
                             uint32_t val)
2690 6658ffb8 pbrook
{
2691 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2692 6658ffb8 pbrook
    stb_phys(addr, val);
2693 6658ffb8 pbrook
}
2694 6658ffb8 pbrook
2695 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2696 6658ffb8 pbrook
                             uint32_t val)
2697 6658ffb8 pbrook
{
2698 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2699 6658ffb8 pbrook
    stw_phys(addr, val);
2700 6658ffb8 pbrook
}
2701 6658ffb8 pbrook
2702 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2703 6658ffb8 pbrook
                             uint32_t val)
2704 6658ffb8 pbrook
{
2705 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2706 6658ffb8 pbrook
    stl_phys(addr, val);
2707 6658ffb8 pbrook
}
2708 6658ffb8 pbrook
2709 6658ffb8 pbrook
static CPUReadMemoryFunc *watch_mem_read[3] = {
2710 6658ffb8 pbrook
    watch_mem_readb,
2711 6658ffb8 pbrook
    watch_mem_readw,
2712 6658ffb8 pbrook
    watch_mem_readl,
2713 6658ffb8 pbrook
};
2714 6658ffb8 pbrook
2715 6658ffb8 pbrook
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2716 6658ffb8 pbrook
    watch_mem_writeb,
2717 6658ffb8 pbrook
    watch_mem_writew,
2718 6658ffb8 pbrook
    watch_mem_writel,
2719 6658ffb8 pbrook
};
2720 6658ffb8 pbrook
2721 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2722 db7b5426 blueswir1
                                 unsigned int len)
2723 db7b5426 blueswir1
{
2724 db7b5426 blueswir1
    uint32_t ret;
2725 db7b5426 blueswir1
    unsigned int idx;
2726 db7b5426 blueswir1
2727 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2728 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2729 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2730 db7b5426 blueswir1
           mmio, len, addr, idx);
2731 db7b5426 blueswir1
#endif
2732 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2733 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2734 db7b5426 blueswir1
2735 db7b5426 blueswir1
    return ret;
2736 db7b5426 blueswir1
}
2737 db7b5426 blueswir1
2738 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2739 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2740 db7b5426 blueswir1
{
2741 db7b5426 blueswir1
    unsigned int idx;
2742 db7b5426 blueswir1
2743 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2744 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2745 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2746 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2747 db7b5426 blueswir1
#endif
2748 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2749 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2750 8da3ff18 pbrook
                                  value);
2751 db7b5426 blueswir1
}
2752 db7b5426 blueswir1
2753 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2754 db7b5426 blueswir1
{
2755 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2756 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2757 db7b5426 blueswir1
#endif
2758 db7b5426 blueswir1
2759 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2760 db7b5426 blueswir1
}
2761 db7b5426 blueswir1
2762 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2763 db7b5426 blueswir1
                            uint32_t value)
2764 db7b5426 blueswir1
{
2765 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2766 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2767 db7b5426 blueswir1
#endif
2768 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2769 db7b5426 blueswir1
}
2770 db7b5426 blueswir1
2771 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2772 db7b5426 blueswir1
{
2773 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2774 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2775 db7b5426 blueswir1
#endif
2776 db7b5426 blueswir1
2777 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2778 db7b5426 blueswir1
}
2779 db7b5426 blueswir1
2780 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2781 db7b5426 blueswir1
                            uint32_t value)
2782 db7b5426 blueswir1
{
2783 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2784 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2785 db7b5426 blueswir1
#endif
2786 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2787 db7b5426 blueswir1
}
2788 db7b5426 blueswir1
2789 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2790 db7b5426 blueswir1
{
2791 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2792 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2793 db7b5426 blueswir1
#endif
2794 db7b5426 blueswir1
2795 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2796 db7b5426 blueswir1
}
2797 db7b5426 blueswir1
2798 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2799 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2800 db7b5426 blueswir1
{
2801 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2802 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2803 db7b5426 blueswir1
#endif
2804 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2805 db7b5426 blueswir1
}
2806 db7b5426 blueswir1
2807 db7b5426 blueswir1
static CPUReadMemoryFunc *subpage_read[] = {
2808 db7b5426 blueswir1
    &subpage_readb,
2809 db7b5426 blueswir1
    &subpage_readw,
2810 db7b5426 blueswir1
    &subpage_readl,
2811 db7b5426 blueswir1
};
2812 db7b5426 blueswir1
2813 db7b5426 blueswir1
static CPUWriteMemoryFunc *subpage_write[] = {
2814 db7b5426 blueswir1
    &subpage_writeb,
2815 db7b5426 blueswir1
    &subpage_writew,
2816 db7b5426 blueswir1
    &subpage_writel,
2817 db7b5426 blueswir1
};
2818 db7b5426 blueswir1
2819 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2820 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2821 db7b5426 blueswir1
{
2822 db7b5426 blueswir1
    int idx, eidx;
2823 4254fab8 blueswir1
    unsigned int i;
2824 db7b5426 blueswir1
2825 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2826 db7b5426 blueswir1
        return -1;
2827 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2828 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2829 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2830 db7b5426 blueswir1
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2831 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2832 db7b5426 blueswir1
#endif
2833 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2834 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2835 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2836 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2837 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2838 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2839 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2840 3ee89922 blueswir1
            }
2841 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2842 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2843 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2844 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2845 3ee89922 blueswir1
            }
2846 4254fab8 blueswir1
        }
2847 db7b5426 blueswir1
    }
2848 db7b5426 blueswir1
2849 db7b5426 blueswir1
    return 0;
2850 db7b5426 blueswir1
}
2851 db7b5426 blueswir1
2852 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2853 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2854 db7b5426 blueswir1
{
2855 db7b5426 blueswir1
    subpage_t *mmio;
2856 db7b5426 blueswir1
    int subpage_memory;
2857 db7b5426 blueswir1
2858 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2859 1eec614b aliguori
2860 1eec614b aliguori
    mmio->base = base;
2861 1eec614b aliguori
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2862 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2863 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2864 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2865 db7b5426 blueswir1
#endif
2866 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2867 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2868 8da3ff18 pbrook
                         region_offset);
2869 db7b5426 blueswir1
2870 db7b5426 blueswir1
    return mmio;
2871 db7b5426 blueswir1
}
2872 db7b5426 blueswir1
2873 88715657 aliguori
static int get_free_io_mem_idx(void)
2874 88715657 aliguori
{
2875 88715657 aliguori
    int i;
2876 88715657 aliguori
2877 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2878 88715657 aliguori
        if (!io_mem_used[i]) {
2879 88715657 aliguori
            io_mem_used[i] = 1;
2880 88715657 aliguori
            return i;
2881 88715657 aliguori
        }
2882 88715657 aliguori
2883 88715657 aliguori
    return -1;
2884 88715657 aliguori
}
2885 88715657 aliguori
2886 33417e70 bellard
static void io_mem_init(void)
2887 33417e70 bellard
{
2888 88715657 aliguori
    int i;
2889 88715657 aliguori
2890 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2891 a4193c8a bellard
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2892 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2893 88715657 aliguori
    for (i=0; i<5; i++)
2894 88715657 aliguori
        io_mem_used[i] = 1;
2895 1ccde1cb bellard
2896 0f459d16 pbrook
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2897 6658ffb8 pbrook
                                          watch_mem_write, NULL);
2898 1ccde1cb bellard
    /* alloc dirty bits array */
2899 0a962c02 bellard
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2900 3a7d929e bellard
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2901 33417e70 bellard
}
2902 33417e70 bellard
2903 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
2904 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
2905 3ee89922 blueswir1
   2). Functions can be omitted with a NULL function pointer. The
2906 3ee89922 blueswir1
   registered functions may be modified dynamically later.
2907 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
2908 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
2909 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
2910 4254fab8 blueswir1
   returned if error. */
2911 33417e70 bellard
int cpu_register_io_memory(int io_index,
2912 33417e70 bellard
                           CPUReadMemoryFunc **mem_read,
2913 a4193c8a bellard
                           CPUWriteMemoryFunc **mem_write,
2914 a4193c8a bellard
                           void *opaque)
2915 33417e70 bellard
{
2916 4254fab8 blueswir1
    int i, subwidth = 0;
2917 33417e70 bellard
2918 33417e70 bellard
    if (io_index <= 0) {
2919 88715657 aliguori
        io_index = get_free_io_mem_idx();
2920 88715657 aliguori
        if (io_index == -1)
2921 88715657 aliguori
            return io_index;
2922 33417e70 bellard
    } else {
2923 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
2924 33417e70 bellard
            return -1;
2925 33417e70 bellard
    }
2926 b5ff1b31 bellard
2927 33417e70 bellard
    for(i = 0;i < 3; i++) {
2928 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
2929 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
2930 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
2931 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
2932 33417e70 bellard
    }
2933 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
2934 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
2935 33417e70 bellard
}
2936 61382a50 bellard
2937 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
2938 88715657 aliguori
{
2939 88715657 aliguori
    int i;
2940 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
2941 88715657 aliguori
2942 88715657 aliguori
    for (i=0;i < 3; i++) {
2943 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2944 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2945 88715657 aliguori
    }
2946 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
2947 88715657 aliguori
    io_mem_used[io_index] = 0;
2948 88715657 aliguori
}
2949 88715657 aliguori
2950 8926b517 bellard
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2951 8926b517 bellard
{
2952 8926b517 bellard
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2953 8926b517 bellard
}
2954 8926b517 bellard
2955 8926b517 bellard
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2956 8926b517 bellard
{
2957 8926b517 bellard
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2958 8926b517 bellard
}
2959 8926b517 bellard
2960 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
2961 e2eef170 pbrook
2962 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
2963 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
2964 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2965 13eb76e0 bellard
                            int len, int is_write)
2966 13eb76e0 bellard
{
2967 13eb76e0 bellard
    int l, flags;
2968 13eb76e0 bellard
    target_ulong page;
2969 53a5960a pbrook
    void * p;
2970 13eb76e0 bellard
2971 13eb76e0 bellard
    while (len > 0) {
2972 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2973 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2974 13eb76e0 bellard
        if (l > len)
2975 13eb76e0 bellard
            l = len;
2976 13eb76e0 bellard
        flags = page_get_flags(page);
2977 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
2978 13eb76e0 bellard
            return;
2979 13eb76e0 bellard
        if (is_write) {
2980 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
2981 13eb76e0 bellard
                return;
2982 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
2983 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2984 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
2985 579a97f7 bellard
                return;
2986 72fb7daa aurel32
            memcpy(p, buf, l);
2987 72fb7daa aurel32
            unlock_user(p, addr, l);
2988 13eb76e0 bellard
        } else {
2989 13eb76e0 bellard
            if (!(flags & PAGE_READ))
2990 13eb76e0 bellard
                return;
2991 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
2992 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2993 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
2994 579a97f7 bellard
                return;
2995 72fb7daa aurel32
            memcpy(buf, p, l);
2996 5b257578 aurel32
            unlock_user(p, addr, 0);
2997 13eb76e0 bellard
        }
2998 13eb76e0 bellard
        len -= l;
2999 13eb76e0 bellard
        buf += l;
3000 13eb76e0 bellard
        addr += l;
3001 13eb76e0 bellard
    }
3002 13eb76e0 bellard
}
3003 8df1cd07 bellard
3004 13eb76e0 bellard
#else
3005 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3006 13eb76e0 bellard
                            int len, int is_write)
3007 13eb76e0 bellard
{
3008 13eb76e0 bellard
    int l, io_index;
3009 13eb76e0 bellard
    uint8_t *ptr;
3010 13eb76e0 bellard
    uint32_t val;
3011 2e12669a bellard
    target_phys_addr_t page;
3012 2e12669a bellard
    unsigned long pd;
3013 92e873b9 bellard
    PhysPageDesc *p;
3014 3b46e624 ths
3015 13eb76e0 bellard
    while (len > 0) {
3016 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3017 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3018 13eb76e0 bellard
        if (l > len)
3019 13eb76e0 bellard
            l = len;
3020 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3021 13eb76e0 bellard
        if (!p) {
3022 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3023 13eb76e0 bellard
        } else {
3024 13eb76e0 bellard
            pd = p->phys_offset;
3025 13eb76e0 bellard
        }
3026 3b46e624 ths
3027 13eb76e0 bellard
        if (is_write) {
3028 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3029 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3030 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3031 8da3ff18 pbrook
                if (p)
3032 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3033 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3034 6a00d601 bellard
                   potential bugs */
3035 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3036 1c213d19 bellard
                    /* 32 bit write access */
3037 c27004ec bellard
                    val = ldl_p(buf);
3038 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3039 13eb76e0 bellard
                    l = 4;
3040 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3041 1c213d19 bellard
                    /* 16 bit write access */
3042 c27004ec bellard
                    val = lduw_p(buf);
3043 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3044 13eb76e0 bellard
                    l = 2;
3045 13eb76e0 bellard
                } else {
3046 1c213d19 bellard
                    /* 8 bit write access */
3047 c27004ec bellard
                    val = ldub_p(buf);
3048 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3049 13eb76e0 bellard
                    l = 1;
3050 13eb76e0 bellard
                }
3051 13eb76e0 bellard
            } else {
3052 b448f2f3 bellard
                unsigned long addr1;
3053 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3054 13eb76e0 bellard
                /* RAM case */
3055 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3056 13eb76e0 bellard
                memcpy(ptr, buf, l);
3057 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3058 3a7d929e bellard
                    /* invalidate code */
3059 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3060 3a7d929e bellard
                    /* set dirty bit */
3061 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3062 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3063 3a7d929e bellard
                }
3064 13eb76e0 bellard
            }
3065 13eb76e0 bellard
        } else {
3066 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3067 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3068 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3069 13eb76e0 bellard
                /* I/O case */
3070 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3071 8da3ff18 pbrook
                if (p)
3072 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3073 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3074 13eb76e0 bellard
                    /* 32 bit read access */
3075 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3076 c27004ec bellard
                    stl_p(buf, val);
3077 13eb76e0 bellard
                    l = 4;
3078 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3079 13eb76e0 bellard
                    /* 16 bit read access */
3080 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3081 c27004ec bellard
                    stw_p(buf, val);
3082 13eb76e0 bellard
                    l = 2;
3083 13eb76e0 bellard
                } else {
3084 1c213d19 bellard
                    /* 8 bit read access */
3085 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3086 c27004ec bellard
                    stb_p(buf, val);
3087 13eb76e0 bellard
                    l = 1;
3088 13eb76e0 bellard
                }
3089 13eb76e0 bellard
            } else {
3090 13eb76e0 bellard
                /* RAM case */
3091 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3092 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3093 13eb76e0 bellard
                memcpy(buf, ptr, l);
3094 13eb76e0 bellard
            }
3095 13eb76e0 bellard
        }
3096 13eb76e0 bellard
        len -= l;
3097 13eb76e0 bellard
        buf += l;
3098 13eb76e0 bellard
        addr += l;
3099 13eb76e0 bellard
    }
3100 13eb76e0 bellard
}
3101 8df1cd07 bellard
3102 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3103 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3104 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3105 d0ecd2aa bellard
{
3106 d0ecd2aa bellard
    int l;
3107 d0ecd2aa bellard
    uint8_t *ptr;
3108 d0ecd2aa bellard
    target_phys_addr_t page;
3109 d0ecd2aa bellard
    unsigned long pd;
3110 d0ecd2aa bellard
    PhysPageDesc *p;
3111 3b46e624 ths
3112 d0ecd2aa bellard
    while (len > 0) {
3113 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3114 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3115 d0ecd2aa bellard
        if (l > len)
3116 d0ecd2aa bellard
            l = len;
3117 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3118 d0ecd2aa bellard
        if (!p) {
3119 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3120 d0ecd2aa bellard
        } else {
3121 d0ecd2aa bellard
            pd = p->phys_offset;
3122 d0ecd2aa bellard
        }
3123 3b46e624 ths
3124 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3125 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3126 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3127 d0ecd2aa bellard
            /* do nothing */
3128 d0ecd2aa bellard
        } else {
3129 d0ecd2aa bellard
            unsigned long addr1;
3130 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3131 d0ecd2aa bellard
            /* ROM/RAM case */
3132 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3133 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3134 d0ecd2aa bellard
        }
3135 d0ecd2aa bellard
        len -= l;
3136 d0ecd2aa bellard
        buf += l;
3137 d0ecd2aa bellard
        addr += l;
3138 d0ecd2aa bellard
    }
3139 d0ecd2aa bellard
}
3140 d0ecd2aa bellard
3141 6d16c2f8 aliguori
typedef struct {
3142 6d16c2f8 aliguori
    void *buffer;
3143 6d16c2f8 aliguori
    target_phys_addr_t addr;
3144 6d16c2f8 aliguori
    target_phys_addr_t len;
3145 6d16c2f8 aliguori
} BounceBuffer;
3146 6d16c2f8 aliguori
3147 6d16c2f8 aliguori
static BounceBuffer bounce;
3148 6d16c2f8 aliguori
3149 ba223c29 aliguori
typedef struct MapClient {
3150 ba223c29 aliguori
    void *opaque;
3151 ba223c29 aliguori
    void (*callback)(void *opaque);
3152 ba223c29 aliguori
    LIST_ENTRY(MapClient) link;
3153 ba223c29 aliguori
} MapClient;
3154 ba223c29 aliguori
3155 ba223c29 aliguori
static LIST_HEAD(map_client_list, MapClient) map_client_list
3156 ba223c29 aliguori
    = LIST_HEAD_INITIALIZER(map_client_list);
3157 ba223c29 aliguori
3158 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3159 ba223c29 aliguori
{
3160 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3161 ba223c29 aliguori
3162 ba223c29 aliguori
    client->opaque = opaque;
3163 ba223c29 aliguori
    client->callback = callback;
3164 ba223c29 aliguori
    LIST_INSERT_HEAD(&map_client_list, client, link);
3165 ba223c29 aliguori
    return client;
3166 ba223c29 aliguori
}
3167 ba223c29 aliguori
3168 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3169 ba223c29 aliguori
{
3170 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3171 ba223c29 aliguori
3172 ba223c29 aliguori
    LIST_REMOVE(client, link);
3173 ba223c29 aliguori
}
3174 ba223c29 aliguori
3175 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3176 ba223c29 aliguori
{
3177 ba223c29 aliguori
    MapClient *client;
3178 ba223c29 aliguori
3179 ba223c29 aliguori
    while (!LIST_EMPTY(&map_client_list)) {
3180 ba223c29 aliguori
        client = LIST_FIRST(&map_client_list);
3181 ba223c29 aliguori
        client->callback(client->opaque);
3182 ba223c29 aliguori
        LIST_REMOVE(client, link);
3183 ba223c29 aliguori
    }
3184 ba223c29 aliguori
}
3185 ba223c29 aliguori
3186 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3187 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3188 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3189 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3190 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3191 ba223c29 aliguori
 * likely to succeed.
3192 6d16c2f8 aliguori
 */
3193 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3194 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3195 6d16c2f8 aliguori
                              int is_write)
3196 6d16c2f8 aliguori
{
3197 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3198 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3199 6d16c2f8 aliguori
    int l;
3200 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3201 6d16c2f8 aliguori
    uint8_t *ptr;
3202 6d16c2f8 aliguori
    target_phys_addr_t page;
3203 6d16c2f8 aliguori
    unsigned long pd;
3204 6d16c2f8 aliguori
    PhysPageDesc *p;
3205 6d16c2f8 aliguori
    unsigned long addr1;
3206 6d16c2f8 aliguori
3207 6d16c2f8 aliguori
    while (len > 0) {
3208 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3209 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3210 6d16c2f8 aliguori
        if (l > len)
3211 6d16c2f8 aliguori
            l = len;
3212 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3213 6d16c2f8 aliguori
        if (!p) {
3214 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3215 6d16c2f8 aliguori
        } else {
3216 6d16c2f8 aliguori
            pd = p->phys_offset;
3217 6d16c2f8 aliguori
        }
3218 6d16c2f8 aliguori
3219 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3220 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3221 6d16c2f8 aliguori
                break;
3222 6d16c2f8 aliguori
            }
3223 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3224 6d16c2f8 aliguori
            bounce.addr = addr;
3225 6d16c2f8 aliguori
            bounce.len = l;
3226 6d16c2f8 aliguori
            if (!is_write) {
3227 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3228 6d16c2f8 aliguori
            }
3229 6d16c2f8 aliguori
            ptr = bounce.buffer;
3230 6d16c2f8 aliguori
        } else {
3231 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3232 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3233 6d16c2f8 aliguori
        }
3234 6d16c2f8 aliguori
        if (!done) {
3235 6d16c2f8 aliguori
            ret = ptr;
3236 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3237 6d16c2f8 aliguori
            break;
3238 6d16c2f8 aliguori
        }
3239 6d16c2f8 aliguori
3240 6d16c2f8 aliguori
        len -= l;
3241 6d16c2f8 aliguori
        addr += l;
3242 6d16c2f8 aliguori
        done += l;
3243 6d16c2f8 aliguori
    }
3244 6d16c2f8 aliguori
    *plen = done;
3245 6d16c2f8 aliguori
    return ret;
3246 6d16c2f8 aliguori
}
3247 6d16c2f8 aliguori
3248 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3249 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3250 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3251 6d16c2f8 aliguori
 */
3252 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3253 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3254 6d16c2f8 aliguori
{
3255 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3256 6d16c2f8 aliguori
        if (is_write) {
3257 5579c7f3 pbrook
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3258 6d16c2f8 aliguori
            while (access_len) {
3259 6d16c2f8 aliguori
                unsigned l;
3260 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3261 6d16c2f8 aliguori
                if (l > access_len)
3262 6d16c2f8 aliguori
                    l = access_len;
3263 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3264 6d16c2f8 aliguori
                    /* invalidate code */
3265 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3266 6d16c2f8 aliguori
                    /* set dirty bit */
3267 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3268 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3269 6d16c2f8 aliguori
                }
3270 6d16c2f8 aliguori
                addr1 += l;
3271 6d16c2f8 aliguori
                access_len -= l;
3272 6d16c2f8 aliguori
            }
3273 6d16c2f8 aliguori
        }
3274 6d16c2f8 aliguori
        return;
3275 6d16c2f8 aliguori
    }
3276 6d16c2f8 aliguori
    if (is_write) {
3277 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3278 6d16c2f8 aliguori
    }
3279 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3280 6d16c2f8 aliguori
    bounce.buffer = NULL;
3281 ba223c29 aliguori
    cpu_notify_map_clients();
3282 6d16c2f8 aliguori
}
3283 d0ecd2aa bellard
3284 8df1cd07 bellard
/* warning: addr must be aligned */
3285 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3286 8df1cd07 bellard
{
3287 8df1cd07 bellard
    int io_index;
3288 8df1cd07 bellard
    uint8_t *ptr;
3289 8df1cd07 bellard
    uint32_t val;
3290 8df1cd07 bellard
    unsigned long pd;
3291 8df1cd07 bellard
    PhysPageDesc *p;
3292 8df1cd07 bellard
3293 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3294 8df1cd07 bellard
    if (!p) {
3295 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3296 8df1cd07 bellard
    } else {
3297 8df1cd07 bellard
        pd = p->phys_offset;
3298 8df1cd07 bellard
    }
3299 3b46e624 ths
3300 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3301 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3302 8df1cd07 bellard
        /* I/O case */
3303 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3304 8da3ff18 pbrook
        if (p)
3305 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3306 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3307 8df1cd07 bellard
    } else {
3308 8df1cd07 bellard
        /* RAM case */
3309 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3310 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3311 8df1cd07 bellard
        val = ldl_p(ptr);
3312 8df1cd07 bellard
    }
3313 8df1cd07 bellard
    return val;
3314 8df1cd07 bellard
}
3315 8df1cd07 bellard
3316 84b7b8e7 bellard
/* warning: addr must be aligned */
3317 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3318 84b7b8e7 bellard
{
3319 84b7b8e7 bellard
    int io_index;
3320 84b7b8e7 bellard
    uint8_t *ptr;
3321 84b7b8e7 bellard
    uint64_t val;
3322 84b7b8e7 bellard
    unsigned long pd;
3323 84b7b8e7 bellard
    PhysPageDesc *p;
3324 84b7b8e7 bellard
3325 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3326 84b7b8e7 bellard
    if (!p) {
3327 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3328 84b7b8e7 bellard
    } else {
3329 84b7b8e7 bellard
        pd = p->phys_offset;
3330 84b7b8e7 bellard
    }
3331 3b46e624 ths
3332 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3333 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3334 84b7b8e7 bellard
        /* I/O case */
3335 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3336 8da3ff18 pbrook
        if (p)
3337 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3338 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3339 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3340 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3341 84b7b8e7 bellard
#else
3342 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3343 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3344 84b7b8e7 bellard
#endif
3345 84b7b8e7 bellard
    } else {
3346 84b7b8e7 bellard
        /* RAM case */
3347 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3348 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3349 84b7b8e7 bellard
        val = ldq_p(ptr);
3350 84b7b8e7 bellard
    }
3351 84b7b8e7 bellard
    return val;
3352 84b7b8e7 bellard
}
3353 84b7b8e7 bellard
3354 aab33094 bellard
/* XXX: optimize */
3355 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3356 aab33094 bellard
{
3357 aab33094 bellard
    uint8_t val;
3358 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3359 aab33094 bellard
    return val;
3360 aab33094 bellard
}
3361 aab33094 bellard
3362 aab33094 bellard
/* XXX: optimize */
3363 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3364 aab33094 bellard
{
3365 aab33094 bellard
    uint16_t val;
3366 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3367 aab33094 bellard
    return tswap16(val);
3368 aab33094 bellard
}
3369 aab33094 bellard
3370 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3371 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3372 8df1cd07 bellard
   bits are used to track modified PTEs */
3373 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3374 8df1cd07 bellard
{
3375 8df1cd07 bellard
    int io_index;
3376 8df1cd07 bellard
    uint8_t *ptr;
3377 8df1cd07 bellard
    unsigned long pd;
3378 8df1cd07 bellard
    PhysPageDesc *p;
3379 8df1cd07 bellard
3380 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3381 8df1cd07 bellard
    if (!p) {
3382 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3383 8df1cd07 bellard
    } else {
3384 8df1cd07 bellard
        pd = p->phys_offset;
3385 8df1cd07 bellard
    }
3386 3b46e624 ths
3387 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3388 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3389 8da3ff18 pbrook
        if (p)
3390 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3391 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3392 8df1cd07 bellard
    } else {
3393 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3394 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3395 8df1cd07 bellard
        stl_p(ptr, val);
3396 74576198 aliguori
3397 74576198 aliguori
        if (unlikely(in_migration)) {
3398 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3399 74576198 aliguori
                /* invalidate code */
3400 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3401 74576198 aliguori
                /* set dirty bit */
3402 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3403 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3404 74576198 aliguori
            }
3405 74576198 aliguori
        }
3406 8df1cd07 bellard
    }
3407 8df1cd07 bellard
}
3408 8df1cd07 bellard
3409 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3410 bc98a7ef j_mayer
{
3411 bc98a7ef j_mayer
    int io_index;
3412 bc98a7ef j_mayer
    uint8_t *ptr;
3413 bc98a7ef j_mayer
    unsigned long pd;
3414 bc98a7ef j_mayer
    PhysPageDesc *p;
3415 bc98a7ef j_mayer
3416 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3417 bc98a7ef j_mayer
    if (!p) {
3418 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3419 bc98a7ef j_mayer
    } else {
3420 bc98a7ef j_mayer
        pd = p->phys_offset;
3421 bc98a7ef j_mayer
    }
3422 3b46e624 ths
3423 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3424 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3425 8da3ff18 pbrook
        if (p)
3426 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3427 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3428 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3429 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3430 bc98a7ef j_mayer
#else
3431 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3432 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3433 bc98a7ef j_mayer
#endif
3434 bc98a7ef j_mayer
    } else {
3435 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3436 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3437 bc98a7ef j_mayer
        stq_p(ptr, val);
3438 bc98a7ef j_mayer
    }
3439 bc98a7ef j_mayer
}
3440 bc98a7ef j_mayer
3441 8df1cd07 bellard
/* warning: addr must be aligned */
3442 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3443 8df1cd07 bellard
{
3444 8df1cd07 bellard
    int io_index;
3445 8df1cd07 bellard
    uint8_t *ptr;
3446 8df1cd07 bellard
    unsigned long pd;
3447 8df1cd07 bellard
    PhysPageDesc *p;
3448 8df1cd07 bellard
3449 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3450 8df1cd07 bellard
    if (!p) {
3451 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3452 8df1cd07 bellard
    } else {
3453 8df1cd07 bellard
        pd = p->phys_offset;
3454 8df1cd07 bellard
    }
3455 3b46e624 ths
3456 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3458 8da3ff18 pbrook
        if (p)
3459 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3460 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3461 8df1cd07 bellard
    } else {
3462 8df1cd07 bellard
        unsigned long addr1;
3463 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3464 8df1cd07 bellard
        /* RAM case */
3465 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3466 8df1cd07 bellard
        stl_p(ptr, val);
3467 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3468 3a7d929e bellard
            /* invalidate code */
3469 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3470 3a7d929e bellard
            /* set dirty bit */
3471 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3472 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3473 3a7d929e bellard
        }
3474 8df1cd07 bellard
    }
3475 8df1cd07 bellard
}
3476 8df1cd07 bellard
3477 aab33094 bellard
/* XXX: optimize */
3478 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3479 aab33094 bellard
{
3480 aab33094 bellard
    uint8_t v = val;
3481 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3482 aab33094 bellard
}
3483 aab33094 bellard
3484 aab33094 bellard
/* XXX: optimize */
3485 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3486 aab33094 bellard
{
3487 aab33094 bellard
    uint16_t v = tswap16(val);
3488 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3489 aab33094 bellard
}
3490 aab33094 bellard
3491 aab33094 bellard
/* XXX: optimize */
3492 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3493 aab33094 bellard
{
3494 aab33094 bellard
    val = tswap64(val);
3495 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3496 aab33094 bellard
}
3497 aab33094 bellard
3498 13eb76e0 bellard
#endif
3499 13eb76e0 bellard
3500 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3501 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3502 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3503 13eb76e0 bellard
{
3504 13eb76e0 bellard
    int l;
3505 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3506 9b3c35e0 j_mayer
    target_ulong page;
3507 13eb76e0 bellard
3508 13eb76e0 bellard
    while (len > 0) {
3509 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3510 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3511 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3512 13eb76e0 bellard
        if (phys_addr == -1)
3513 13eb76e0 bellard
            return -1;
3514 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3515 13eb76e0 bellard
        if (l > len)
3516 13eb76e0 bellard
            l = len;
3517 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3518 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3519 5e2972fd aliguori
        if (is_write)
3520 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3521 5e2972fd aliguori
        else
3522 5e2972fd aliguori
#endif
3523 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3524 13eb76e0 bellard
        len -= l;
3525 13eb76e0 bellard
        buf += l;
3526 13eb76e0 bellard
        addr += l;
3527 13eb76e0 bellard
    }
3528 13eb76e0 bellard
    return 0;
3529 13eb76e0 bellard
}
3530 13eb76e0 bellard
3531 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3532 2e70f6ef pbrook
   must be at the end of the TB */
3533 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3534 2e70f6ef pbrook
{
3535 2e70f6ef pbrook
    TranslationBlock *tb;
3536 2e70f6ef pbrook
    uint32_t n, cflags;
3537 2e70f6ef pbrook
    target_ulong pc, cs_base;
3538 2e70f6ef pbrook
    uint64_t flags;
3539 2e70f6ef pbrook
3540 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3541 2e70f6ef pbrook
    if (!tb) {
3542 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3543 2e70f6ef pbrook
                  retaddr);
3544 2e70f6ef pbrook
    }
3545 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3546 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3547 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3548 bf20dc07 ths
       occurred.  */
3549 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3550 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3551 2e70f6ef pbrook
    n++;
3552 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3553 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3554 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3555 2e70f6ef pbrook
       branch.  */
3556 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3557 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3558 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3559 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3560 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3561 2e70f6ef pbrook
    }
3562 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3563 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3564 2e70f6ef pbrook
            && n > 1) {
3565 2e70f6ef pbrook
        env->pc -= 2;
3566 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3567 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3568 2e70f6ef pbrook
    }
3569 2e70f6ef pbrook
#endif
3570 2e70f6ef pbrook
    /* This should never happen.  */
3571 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3572 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3573 2e70f6ef pbrook
3574 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3575 2e70f6ef pbrook
    pc = tb->pc;
3576 2e70f6ef pbrook
    cs_base = tb->cs_base;
3577 2e70f6ef pbrook
    flags = tb->flags;
3578 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3579 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3580 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3581 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3582 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3583 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3584 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3585 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3586 2e70f6ef pbrook
       second new TB.  */
3587 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3588 2e70f6ef pbrook
}
3589 2e70f6ef pbrook
3590 e3db7226 bellard
void dump_exec_info(FILE *f,
3591 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3592 e3db7226 bellard
{
3593 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3594 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3595 e3db7226 bellard
    TranslationBlock *tb;
3596 3b46e624 ths
3597 e3db7226 bellard
    target_code_size = 0;
3598 e3db7226 bellard
    max_target_code_size = 0;
3599 e3db7226 bellard
    cross_page = 0;
3600 e3db7226 bellard
    direct_jmp_count = 0;
3601 e3db7226 bellard
    direct_jmp2_count = 0;
3602 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3603 e3db7226 bellard
        tb = &tbs[i];
3604 e3db7226 bellard
        target_code_size += tb->size;
3605 e3db7226 bellard
        if (tb->size > max_target_code_size)
3606 e3db7226 bellard
            max_target_code_size = tb->size;
3607 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3608 e3db7226 bellard
            cross_page++;
3609 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3610 e3db7226 bellard
            direct_jmp_count++;
3611 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3612 e3db7226 bellard
                direct_jmp2_count++;
3613 e3db7226 bellard
            }
3614 e3db7226 bellard
        }
3615 e3db7226 bellard
    }
3616 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3617 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3618 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3619 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3620 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3621 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3622 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3623 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3624 e3db7226 bellard
                max_target_code_size);
3625 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3626 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3627 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3628 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3629 5fafdf24 ths
            cross_page,
3630 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3631 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3632 5fafdf24 ths
                direct_jmp_count,
3633 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3634 e3db7226 bellard
                direct_jmp2_count,
3635 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3636 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3637 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3638 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3639 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3640 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3641 e3db7226 bellard
}
3642 e3db7226 bellard
3643 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3644 61382a50 bellard
3645 61382a50 bellard
#define MMUSUFFIX _cmmu
3646 61382a50 bellard
#define GETPC() NULL
3647 61382a50 bellard
#define env cpu_single_env
3648 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3649 61382a50 bellard
3650 61382a50 bellard
#define SHIFT 0
3651 61382a50 bellard
#include "softmmu_template.h"
3652 61382a50 bellard
3653 61382a50 bellard
#define SHIFT 1
3654 61382a50 bellard
#include "softmmu_template.h"
3655 61382a50 bellard
3656 61382a50 bellard
#define SHIFT 2
3657 61382a50 bellard
#include "softmmu_template.h"
3658 61382a50 bellard
3659 61382a50 bellard
#define SHIFT 3
3660 61382a50 bellard
#include "softmmu_template.h"
3661 61382a50 bellard
3662 61382a50 bellard
#undef env
3663 61382a50 bellard
3664 61382a50 bellard
#endif