Statistics
| Branch: | Revision:

root / exec.c @ 719f66a7

History | View | Annotate | Download (112 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 54936004 bellard
 * License along with this library; if not, write to the Free Software
18 fad6cb1a aurel32
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19 54936004 bellard
 */
20 67b915a5 bellard
#include "config.h"
21 d5a8f07c bellard
#ifdef _WIN32
22 d5a8f07c bellard
#include <windows.h>
23 d5a8f07c bellard
#else
24 a98d49b1 bellard
#include <sys/types.h>
25 d5a8f07c bellard
#include <sys/mman.h>
26 d5a8f07c bellard
#endif
27 54936004 bellard
#include <stdlib.h>
28 54936004 bellard
#include <stdio.h>
29 54936004 bellard
#include <stdarg.h>
30 54936004 bellard
#include <string.h>
31 54936004 bellard
#include <errno.h>
32 54936004 bellard
#include <unistd.h>
33 54936004 bellard
#include <inttypes.h>
34 54936004 bellard
35 6180a181 bellard
#include "cpu.h"
36 6180a181 bellard
#include "exec-all.h"
37 ca10f867 aurel32
#include "qemu-common.h"
38 b67d9a52 bellard
#include "tcg.h"
39 b3c7724c pbrook
#include "hw/hw.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 640f42e4 blueswir1
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 640f42e4 blueswir1
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81 108c49b8 bellard
#endif
82 108c49b8 bellard
83 bdaf78e0 blueswir1
static TranslationBlock *tbs;
84 26a5f13b bellard
int code_gen_max_blocks;
85 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 bdaf78e0 blueswir1
static int nb_tbs;
87 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
88 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 fd6ce8f6 bellard
90 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
91 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
93 d03d860b blueswir1
 section close to code segment. */
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
96 d03d860b blueswir1
    __attribute__((aligned (32)))
97 d03d860b blueswir1
#else
98 d03d860b blueswir1
#define code_gen_section                                \
99 d03d860b blueswir1
    __attribute__((aligned (32)))
100 d03d860b blueswir1
#endif
101 d03d860b blueswir1
102 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
103 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
105 26a5f13b bellard
/* threshold to flush the translated code buffer */
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
107 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
108 fd6ce8f6 bellard
109 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
110 9fa3e853 bellard
int phys_ram_fd;
111 1ccde1cb bellard
uint8_t *phys_ram_dirty;
112 74576198 aliguori
static int in_migration;
113 94a6b54f pbrook
114 94a6b54f pbrook
typedef struct RAMBlock {
115 94a6b54f pbrook
    uint8_t *host;
116 94a6b54f pbrook
    ram_addr_t offset;
117 94a6b54f pbrook
    ram_addr_t length;
118 94a6b54f pbrook
    struct RAMBlock *next;
119 94a6b54f pbrook
} RAMBlock;
120 94a6b54f pbrook
121 94a6b54f pbrook
static RAMBlock *ram_blocks;
122 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 94a6b54f pbrook
   then we can no longet assume contiguous ram offsets, and external uses
124 94a6b54f pbrook
   of this variable will break.  */
125 94a6b54f pbrook
ram_addr_t last_ram_offset;
126 e2eef170 pbrook
#endif
127 9fa3e853 bellard
128 6a00d601 bellard
CPUState *first_cpu;
129 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
130 6a00d601 bellard
   cpu_exec() */
131 5fafdf24 ths
CPUState *cpu_single_env;
132 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
133 bf20dc07 ths
   1 = Precise instruction counting.
134 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
135 2e70f6ef pbrook
int use_icount = 0;
136 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
137 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
138 2e70f6ef pbrook
int64_t qemu_icount;
139 6a00d601 bellard
140 54936004 bellard
typedef struct PageDesc {
141 92e873b9 bellard
    /* list of TBs intersecting this ram page */
142 fd6ce8f6 bellard
    TranslationBlock *first_tb;
143 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
144 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
145 9fa3e853 bellard
    unsigned int code_write_count;
146 9fa3e853 bellard
    uint8_t *code_bitmap;
147 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
148 9fa3e853 bellard
    unsigned long flags;
149 9fa3e853 bellard
#endif
150 54936004 bellard
} PageDesc;
151 54936004 bellard
152 92e873b9 bellard
typedef struct PhysPageDesc {
153 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
154 00f82b8a aurel32
    ram_addr_t phys_offset;
155 8da3ff18 pbrook
    ram_addr_t region_offset;
156 92e873b9 bellard
} PhysPageDesc;
157 92e873b9 bellard
158 54936004 bellard
#define L2_BITS 10
159 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
161 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
162 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
163 bedb69ea j_mayer
 */
164 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165 bedb69ea j_mayer
#else
166 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167 bedb69ea j_mayer
#endif
168 54936004 bellard
169 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
170 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
171 54936004 bellard
172 83fb7adf bellard
unsigned long qemu_real_host_page_size;
173 83fb7adf bellard
unsigned long qemu_host_page_bits;
174 83fb7adf bellard
unsigned long qemu_host_page_size;
175 83fb7adf bellard
unsigned long qemu_host_page_mask;
176 54936004 bellard
177 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
178 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
179 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
180 54936004 bellard
181 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
182 e2eef170 pbrook
static void io_mem_init(void);
183 e2eef170 pbrook
184 33417e70 bellard
/* io memory support */
185 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
189 6658ffb8 pbrook
static int io_mem_watch;
190 6658ffb8 pbrook
#endif
191 33417e70 bellard
192 34865134 bellard
/* log support */
193 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
194 34865134 bellard
FILE *logfile;
195 34865134 bellard
int loglevel;
196 e735b91c pbrook
static int log_append = 0;
197 34865134 bellard
198 e3db7226 bellard
/* statistics */
199 e3db7226 bellard
static int tlb_flush_count;
200 e3db7226 bellard
static int tb_flush_count;
201 e3db7226 bellard
static int tb_phys_invalidate_count;
202 e3db7226 bellard
203 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 db7b5426 blueswir1
typedef struct subpage_t {
205 db7b5426 blueswir1
    target_phys_addr_t base;
206 3ee89922 blueswir1
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207 3ee89922 blueswir1
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
209 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210 db7b5426 blueswir1
} subpage_t;
211 db7b5426 blueswir1
212 7cb69cae bellard
#ifdef _WIN32
213 7cb69cae bellard
static void map_exec(void *addr, long size)
214 7cb69cae bellard
{
215 7cb69cae bellard
    DWORD old_protect;
216 7cb69cae bellard
    VirtualProtect(addr, size,
217 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
218 7cb69cae bellard
    
219 7cb69cae bellard
}
220 7cb69cae bellard
#else
221 7cb69cae bellard
static void map_exec(void *addr, long size)
222 7cb69cae bellard
{
223 4369415f bellard
    unsigned long start, end, page_size;
224 7cb69cae bellard
    
225 4369415f bellard
    page_size = getpagesize();
226 7cb69cae bellard
    start = (unsigned long)addr;
227 4369415f bellard
    start &= ~(page_size - 1);
228 7cb69cae bellard
    
229 7cb69cae bellard
    end = (unsigned long)addr + size;
230 4369415f bellard
    end += page_size - 1;
231 4369415f bellard
    end &= ~(page_size - 1);
232 7cb69cae bellard
    
233 7cb69cae bellard
    mprotect((void *)start, end - start,
234 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
235 7cb69cae bellard
}
236 7cb69cae bellard
#endif
237 7cb69cae bellard
238 b346ff46 bellard
static void page_init(void)
239 54936004 bellard
{
240 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
241 54936004 bellard
       TARGET_PAGE_SIZE */
242 c2b48b69 aliguori
#ifdef _WIN32
243 c2b48b69 aliguori
    {
244 c2b48b69 aliguori
        SYSTEM_INFO system_info;
245 c2b48b69 aliguori
246 c2b48b69 aliguori
        GetSystemInfo(&system_info);
247 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
248 c2b48b69 aliguori
    }
249 c2b48b69 aliguori
#else
250 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
251 c2b48b69 aliguori
#endif
252 83fb7adf bellard
    if (qemu_host_page_size == 0)
253 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
254 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
256 83fb7adf bellard
    qemu_host_page_bits = 0;
257 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 83fb7adf bellard
        qemu_host_page_bits++;
259 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 50a9569b balrog
263 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 50a9569b balrog
    {
265 50a9569b balrog
        long long startaddr, endaddr;
266 50a9569b balrog
        FILE *f;
267 50a9569b balrog
        int n;
268 50a9569b balrog
269 c8a706fe pbrook
        mmap_lock();
270 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
271 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
272 50a9569b balrog
        if (f) {
273 50a9569b balrog
            do {
274 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 50a9569b balrog
                if (n == 2) {
276 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
277 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
279 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
282 50a9569b balrog
                                   PAGE_RESERVED); 
283 50a9569b balrog
                }
284 50a9569b balrog
            } while (!feof(f));
285 50a9569b balrog
            fclose(f);
286 50a9569b balrog
        }
287 c8a706fe pbrook
        mmap_unlock();
288 50a9569b balrog
    }
289 50a9569b balrog
#endif
290 54936004 bellard
}
291 54936004 bellard
292 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
293 54936004 bellard
{
294 17e2377a pbrook
#if TARGET_LONG_BITS > 32
295 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
296 17e2377a pbrook
       excluded high addresses.  */
297 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298 17e2377a pbrook
        return NULL;
299 17e2377a pbrook
#endif
300 434929bf aliguori
    return &l1_map[index >> L2_BITS];
301 434929bf aliguori
}
302 434929bf aliguori
303 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
304 434929bf aliguori
{
305 434929bf aliguori
    PageDesc **lp, *p;
306 434929bf aliguori
    lp = page_l1_map(index);
307 434929bf aliguori
    if (!lp)
308 434929bf aliguori
        return NULL;
309 434929bf aliguori
310 54936004 bellard
    p = *lp;
311 54936004 bellard
    if (!p) {
312 54936004 bellard
        /* allocate if not found */
313 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
314 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
315 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
316 17e2377a pbrook
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318 54936004 bellard
        *lp = p;
319 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
320 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
321 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
322 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
323 17e2377a pbrook
                           PAGE_RESERVED); 
324 17e2377a pbrook
        }
325 17e2377a pbrook
#else
326 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327 17e2377a pbrook
        *lp = p;
328 17e2377a pbrook
#endif
329 54936004 bellard
    }
330 54936004 bellard
    return p + (index & (L2_SIZE - 1));
331 54936004 bellard
}
332 54936004 bellard
333 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
334 54936004 bellard
{
335 434929bf aliguori
    PageDesc **lp, *p;
336 434929bf aliguori
    lp = page_l1_map(index);
337 434929bf aliguori
    if (!lp)
338 434929bf aliguori
        return NULL;
339 54936004 bellard
340 434929bf aliguori
    p = *lp;
341 54936004 bellard
    if (!p)
342 54936004 bellard
        return 0;
343 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
344 fd6ce8f6 bellard
}
345 fd6ce8f6 bellard
346 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347 92e873b9 bellard
{
348 108c49b8 bellard
    void **lp, **p;
349 e3f4e2a4 pbrook
    PhysPageDesc *pd;
350 92e873b9 bellard
351 108c49b8 bellard
    p = (void **)l1_phys_map;
352 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353 108c49b8 bellard
354 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 108c49b8 bellard
#endif
357 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358 92e873b9 bellard
    p = *lp;
359 92e873b9 bellard
    if (!p) {
360 92e873b9 bellard
        /* allocate if not found */
361 108c49b8 bellard
        if (!alloc)
362 108c49b8 bellard
            return NULL;
363 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
365 108c49b8 bellard
        *lp = p;
366 108c49b8 bellard
    }
367 108c49b8 bellard
#endif
368 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369 e3f4e2a4 pbrook
    pd = *lp;
370 e3f4e2a4 pbrook
    if (!pd) {
371 e3f4e2a4 pbrook
        int i;
372 108c49b8 bellard
        /* allocate if not found */
373 108c49b8 bellard
        if (!alloc)
374 108c49b8 bellard
            return NULL;
375 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 e3f4e2a4 pbrook
        *lp = pd;
377 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
378 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 67c4d23c pbrook
        }
381 92e873b9 bellard
    }
382 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 92e873b9 bellard
}
384 92e873b9 bellard
385 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 92e873b9 bellard
{
387 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
388 92e873b9 bellard
}
389 92e873b9 bellard
390 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
391 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
392 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 3a7d929e bellard
                                    target_ulong vaddr);
394 c8a706fe pbrook
#define mmap_lock() do { } while(0)
395 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
396 9fa3e853 bellard
#endif
397 fd6ce8f6 bellard
398 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 4369415f bellard
400 4369415f bellard
#if defined(CONFIG_USER_ONLY)
401 4369415f bellard
/* Currently it is not recommanded to allocate big chunks of data in
402 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
403 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
404 4369415f bellard
#endif
405 4369415f bellard
406 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
407 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 4369415f bellard
#endif
409 4369415f bellard
410 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
411 26a5f13b bellard
{
412 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
413 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
414 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
416 4369415f bellard
#else
417 26a5f13b bellard
    code_gen_buffer_size = tb_size;
418 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
419 4369415f bellard
#if defined(CONFIG_USER_ONLY)
420 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
421 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 4369415f bellard
#else
423 26a5f13b bellard
        /* XXX: needs ajustments */
424 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425 4369415f bellard
#endif
426 26a5f13b bellard
    }
427 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
430 26a5f13b bellard
       the host cpu and OS */
431 26a5f13b bellard
#if defined(__linux__) 
432 26a5f13b bellard
    {
433 26a5f13b bellard
        int flags;
434 141ac468 blueswir1
        void *start = NULL;
435 141ac468 blueswir1
436 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437 26a5f13b bellard
#if defined(__x86_64__)
438 26a5f13b bellard
        flags |= MAP_32BIT;
439 26a5f13b bellard
        /* Cannot map more than that */
440 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
442 141ac468 blueswir1
#elif defined(__sparc_v9__)
443 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
444 141ac468 blueswir1
        flags |= MAP_FIXED;
445 141ac468 blueswir1
        start = (void *) 0x60000000UL;
446 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
448 1cb0661e balrog
#elif defined(__arm__)
449 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
450 1cb0661e balrog
        flags |= MAP_FIXED;
451 1cb0661e balrog
        start = (void *) 0x01000000UL;
452 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
454 26a5f13b bellard
#endif
455 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457 26a5f13b bellard
                               flags, -1, 0);
458 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
459 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460 26a5f13b bellard
            exit(1);
461 26a5f13b bellard
        }
462 26a5f13b bellard
    }
463 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464 06e67a82 aliguori
    {
465 06e67a82 aliguori
        int flags;
466 06e67a82 aliguori
        void *addr = NULL;
467 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468 06e67a82 aliguori
#if defined(__x86_64__)
469 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 06e67a82 aliguori
         * 0x40000000 is free */
471 06e67a82 aliguori
        flags |= MAP_FIXED;
472 06e67a82 aliguori
        addr = (void *)0x40000000;
473 06e67a82 aliguori
        /* Cannot map more than that */
474 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
476 06e67a82 aliguori
#endif
477 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479 06e67a82 aliguori
                               flags, -1, 0);
480 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
481 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 06e67a82 aliguori
            exit(1);
483 06e67a82 aliguori
        }
484 06e67a82 aliguori
    }
485 26a5f13b bellard
#else
486 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
488 26a5f13b bellard
#endif
489 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
492 26a5f13b bellard
        code_gen_max_block_size();
493 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495 26a5f13b bellard
}
496 26a5f13b bellard
497 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
499 26a5f13b bellard
   size. */
500 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
501 26a5f13b bellard
{
502 26a5f13b bellard
    cpu_gen_init();
503 26a5f13b bellard
    code_gen_alloc(tb_size);
504 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
505 4369415f bellard
    page_init();
506 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
507 26a5f13b bellard
    io_mem_init();
508 e2eef170 pbrook
#endif
509 26a5f13b bellard
}
510 26a5f13b bellard
511 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 9656f324 pbrook
513 9656f324 pbrook
#define CPU_COMMON_SAVE_VERSION 1
514 9656f324 pbrook
515 9656f324 pbrook
static void cpu_common_save(QEMUFile *f, void *opaque)
516 9656f324 pbrook
{
517 9656f324 pbrook
    CPUState *env = opaque;
518 9656f324 pbrook
519 9656f324 pbrook
    qemu_put_be32s(f, &env->halted);
520 9656f324 pbrook
    qemu_put_be32s(f, &env->interrupt_request);
521 9656f324 pbrook
}
522 9656f324 pbrook
523 9656f324 pbrook
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524 9656f324 pbrook
{
525 9656f324 pbrook
    CPUState *env = opaque;
526 9656f324 pbrook
527 9656f324 pbrook
    if (version_id != CPU_COMMON_SAVE_VERSION)
528 9656f324 pbrook
        return -EINVAL;
529 9656f324 pbrook
530 9656f324 pbrook
    qemu_get_be32s(f, &env->halted);
531 75f482ae pbrook
    qemu_get_be32s(f, &env->interrupt_request);
532 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533 3098dba0 aurel32
       version_id is increased. */
534 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
535 9656f324 pbrook
    tlb_flush(env, 1);
536 9656f324 pbrook
537 9656f324 pbrook
    return 0;
538 9656f324 pbrook
}
539 9656f324 pbrook
#endif
540 9656f324 pbrook
541 6a00d601 bellard
void cpu_exec_init(CPUState *env)
542 fd6ce8f6 bellard
{
543 6a00d601 bellard
    CPUState **penv;
544 6a00d601 bellard
    int cpu_index;
545 6a00d601 bellard
546 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
547 c2764719 pbrook
    cpu_list_lock();
548 c2764719 pbrook
#endif
549 6a00d601 bellard
    env->next_cpu = NULL;
550 6a00d601 bellard
    penv = &first_cpu;
551 6a00d601 bellard
    cpu_index = 0;
552 6a00d601 bellard
    while (*penv != NULL) {
553 6a00d601 bellard
        penv = (CPUState **)&(*penv)->next_cpu;
554 6a00d601 bellard
        cpu_index++;
555 6a00d601 bellard
    }
556 6a00d601 bellard
    env->cpu_index = cpu_index;
557 268a362c aliguori
    env->numa_node = 0;
558 c0ce998e aliguori
    TAILQ_INIT(&env->breakpoints);
559 c0ce998e aliguori
    TAILQ_INIT(&env->watchpoints);
560 6a00d601 bellard
    *penv = env;
561 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
562 c2764719 pbrook
    cpu_list_unlock();
563 c2764719 pbrook
#endif
564 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
565 9656f324 pbrook
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
566 9656f324 pbrook
                    cpu_common_save, cpu_common_load, env);
567 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
568 b3c7724c pbrook
                    cpu_save, cpu_load, env);
569 b3c7724c pbrook
#endif
570 fd6ce8f6 bellard
}
571 fd6ce8f6 bellard
572 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
573 9fa3e853 bellard
{
574 9fa3e853 bellard
    if (p->code_bitmap) {
575 59817ccb bellard
        qemu_free(p->code_bitmap);
576 9fa3e853 bellard
        p->code_bitmap = NULL;
577 9fa3e853 bellard
    }
578 9fa3e853 bellard
    p->code_write_count = 0;
579 9fa3e853 bellard
}
580 9fa3e853 bellard
581 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
582 fd6ce8f6 bellard
static void page_flush_tb(void)
583 fd6ce8f6 bellard
{
584 fd6ce8f6 bellard
    int i, j;
585 fd6ce8f6 bellard
    PageDesc *p;
586 fd6ce8f6 bellard
587 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
588 fd6ce8f6 bellard
        p = l1_map[i];
589 fd6ce8f6 bellard
        if (p) {
590 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
591 9fa3e853 bellard
                p->first_tb = NULL;
592 9fa3e853 bellard
                invalidate_page_bitmap(p);
593 9fa3e853 bellard
                p++;
594 9fa3e853 bellard
            }
595 fd6ce8f6 bellard
        }
596 fd6ce8f6 bellard
    }
597 fd6ce8f6 bellard
}
598 fd6ce8f6 bellard
599 fd6ce8f6 bellard
/* flush all the translation blocks */
600 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
601 6a00d601 bellard
void tb_flush(CPUState *env1)
602 fd6ce8f6 bellard
{
603 6a00d601 bellard
    CPUState *env;
604 0124311e bellard
#if defined(DEBUG_FLUSH)
605 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
606 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
607 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
608 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
609 fd6ce8f6 bellard
#endif
610 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
611 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
612 a208e54a pbrook
613 fd6ce8f6 bellard
    nb_tbs = 0;
614 3b46e624 ths
615 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
616 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
617 6a00d601 bellard
    }
618 9fa3e853 bellard
619 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
620 fd6ce8f6 bellard
    page_flush_tb();
621 9fa3e853 bellard
622 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
623 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
624 d4e8164f bellard
       expensive */
625 e3db7226 bellard
    tb_flush_count++;
626 fd6ce8f6 bellard
}
627 fd6ce8f6 bellard
628 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
629 fd6ce8f6 bellard
630 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
631 fd6ce8f6 bellard
{
632 fd6ce8f6 bellard
    TranslationBlock *tb;
633 fd6ce8f6 bellard
    int i;
634 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
635 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
636 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
637 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
638 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
639 fd6ce8f6 bellard
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
640 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
641 fd6ce8f6 bellard
            }
642 fd6ce8f6 bellard
        }
643 fd6ce8f6 bellard
    }
644 fd6ce8f6 bellard
}
645 fd6ce8f6 bellard
646 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
647 fd6ce8f6 bellard
static void tb_page_check(void)
648 fd6ce8f6 bellard
{
649 fd6ce8f6 bellard
    TranslationBlock *tb;
650 fd6ce8f6 bellard
    int i, flags1, flags2;
651 3b46e624 ths
652 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
653 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
654 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
655 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
656 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
657 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
658 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
659 fd6ce8f6 bellard
            }
660 fd6ce8f6 bellard
        }
661 fd6ce8f6 bellard
    }
662 fd6ce8f6 bellard
}
663 fd6ce8f6 bellard
664 bdaf78e0 blueswir1
static void tb_jmp_check(TranslationBlock *tb)
665 d4e8164f bellard
{
666 d4e8164f bellard
    TranslationBlock *tb1;
667 d4e8164f bellard
    unsigned int n1;
668 d4e8164f bellard
669 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
670 d4e8164f bellard
    tb1 = tb->jmp_first;
671 d4e8164f bellard
    for(;;) {
672 d4e8164f bellard
        n1 = (long)tb1 & 3;
673 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
674 d4e8164f bellard
        if (n1 == 2)
675 d4e8164f bellard
            break;
676 d4e8164f bellard
        tb1 = tb1->jmp_next[n1];
677 d4e8164f bellard
    }
678 d4e8164f bellard
    /* check end of list */
679 d4e8164f bellard
    if (tb1 != tb) {
680 d4e8164f bellard
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
681 d4e8164f bellard
    }
682 d4e8164f bellard
}
683 d4e8164f bellard
684 fd6ce8f6 bellard
#endif
685 fd6ce8f6 bellard
686 fd6ce8f6 bellard
/* invalidate one TB */
687 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688 fd6ce8f6 bellard
                             int next_offset)
689 fd6ce8f6 bellard
{
690 fd6ce8f6 bellard
    TranslationBlock *tb1;
691 fd6ce8f6 bellard
    for(;;) {
692 fd6ce8f6 bellard
        tb1 = *ptb;
693 fd6ce8f6 bellard
        if (tb1 == tb) {
694 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695 fd6ce8f6 bellard
            break;
696 fd6ce8f6 bellard
        }
697 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698 fd6ce8f6 bellard
    }
699 fd6ce8f6 bellard
}
700 fd6ce8f6 bellard
701 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
702 9fa3e853 bellard
{
703 9fa3e853 bellard
    TranslationBlock *tb1;
704 9fa3e853 bellard
    unsigned int n1;
705 9fa3e853 bellard
706 9fa3e853 bellard
    for(;;) {
707 9fa3e853 bellard
        tb1 = *ptb;
708 9fa3e853 bellard
        n1 = (long)tb1 & 3;
709 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
710 9fa3e853 bellard
        if (tb1 == tb) {
711 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
712 9fa3e853 bellard
            break;
713 9fa3e853 bellard
        }
714 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
715 9fa3e853 bellard
    }
716 9fa3e853 bellard
}
717 9fa3e853 bellard
718 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719 d4e8164f bellard
{
720 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
721 d4e8164f bellard
    unsigned int n1;
722 d4e8164f bellard
723 d4e8164f bellard
    ptb = &tb->jmp_next[n];
724 d4e8164f bellard
    tb1 = *ptb;
725 d4e8164f bellard
    if (tb1) {
726 d4e8164f bellard
        /* find tb(n) in circular list */
727 d4e8164f bellard
        for(;;) {
728 d4e8164f bellard
            tb1 = *ptb;
729 d4e8164f bellard
            n1 = (long)tb1 & 3;
730 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
731 d4e8164f bellard
            if (n1 == n && tb1 == tb)
732 d4e8164f bellard
                break;
733 d4e8164f bellard
            if (n1 == 2) {
734 d4e8164f bellard
                ptb = &tb1->jmp_first;
735 d4e8164f bellard
            } else {
736 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
737 d4e8164f bellard
            }
738 d4e8164f bellard
        }
739 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
740 d4e8164f bellard
        *ptb = tb->jmp_next[n];
741 d4e8164f bellard
742 d4e8164f bellard
        tb->jmp_next[n] = NULL;
743 d4e8164f bellard
    }
744 d4e8164f bellard
}
745 d4e8164f bellard
746 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
747 d4e8164f bellard
   another TB */
748 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
749 d4e8164f bellard
{
750 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751 d4e8164f bellard
}
752 d4e8164f bellard
753 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754 fd6ce8f6 bellard
{
755 6a00d601 bellard
    CPUState *env;
756 8a40a180 bellard
    PageDesc *p;
757 d4e8164f bellard
    unsigned int h, n1;
758 00f82b8a aurel32
    target_phys_addr_t phys_pc;
759 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
760 3b46e624 ths
761 8a40a180 bellard
    /* remove the TB from the hash list */
762 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
764 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
765 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
766 8a40a180 bellard
767 8a40a180 bellard
    /* remove the TB from the page list */
768 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
769 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
771 8a40a180 bellard
        invalidate_page_bitmap(p);
772 8a40a180 bellard
    }
773 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
776 8a40a180 bellard
        invalidate_page_bitmap(p);
777 8a40a180 bellard
    }
778 8a40a180 bellard
779 36bdbe54 bellard
    tb_invalidated_flag = 1;
780 59817ccb bellard
781 fd6ce8f6 bellard
    /* remove the TB from the hash list */
782 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
783 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
784 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
785 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
786 6a00d601 bellard
    }
787 d4e8164f bellard
788 d4e8164f bellard
    /* suppress this TB from the two jump lists */
789 d4e8164f bellard
    tb_jmp_remove(tb, 0);
790 d4e8164f bellard
    tb_jmp_remove(tb, 1);
791 d4e8164f bellard
792 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
793 d4e8164f bellard
    tb1 = tb->jmp_first;
794 d4e8164f bellard
    for(;;) {
795 d4e8164f bellard
        n1 = (long)tb1 & 3;
796 d4e8164f bellard
        if (n1 == 2)
797 d4e8164f bellard
            break;
798 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
799 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
800 d4e8164f bellard
        tb_reset_jump(tb1, n1);
801 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
802 d4e8164f bellard
        tb1 = tb2;
803 d4e8164f bellard
    }
804 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
805 9fa3e853 bellard
806 e3db7226 bellard
    tb_phys_invalidate_count++;
807 9fa3e853 bellard
}
808 9fa3e853 bellard
809 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
810 9fa3e853 bellard
{
811 9fa3e853 bellard
    int end, mask, end1;
812 9fa3e853 bellard
813 9fa3e853 bellard
    end = start + len;
814 9fa3e853 bellard
    tab += start >> 3;
815 9fa3e853 bellard
    mask = 0xff << (start & 7);
816 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
817 9fa3e853 bellard
        if (start < end) {
818 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
819 9fa3e853 bellard
            *tab |= mask;
820 9fa3e853 bellard
        }
821 9fa3e853 bellard
    } else {
822 9fa3e853 bellard
        *tab++ |= mask;
823 9fa3e853 bellard
        start = (start + 8) & ~7;
824 9fa3e853 bellard
        end1 = end & ~7;
825 9fa3e853 bellard
        while (start < end1) {
826 9fa3e853 bellard
            *tab++ = 0xff;
827 9fa3e853 bellard
            start += 8;
828 9fa3e853 bellard
        }
829 9fa3e853 bellard
        if (start < end) {
830 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
831 9fa3e853 bellard
            *tab |= mask;
832 9fa3e853 bellard
        }
833 9fa3e853 bellard
    }
834 9fa3e853 bellard
}
835 9fa3e853 bellard
836 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
837 9fa3e853 bellard
{
838 9fa3e853 bellard
    int n, tb_start, tb_end;
839 9fa3e853 bellard
    TranslationBlock *tb;
840 3b46e624 ths
841 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
842 9fa3e853 bellard
843 9fa3e853 bellard
    tb = p->first_tb;
844 9fa3e853 bellard
    while (tb != NULL) {
845 9fa3e853 bellard
        n = (long)tb & 3;
846 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
847 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
848 9fa3e853 bellard
        if (n == 0) {
849 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
850 9fa3e853 bellard
               it is not a problem */
851 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
852 9fa3e853 bellard
            tb_end = tb_start + tb->size;
853 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
854 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
855 9fa3e853 bellard
        } else {
856 9fa3e853 bellard
            tb_start = 0;
857 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
858 9fa3e853 bellard
        }
859 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860 9fa3e853 bellard
        tb = tb->page_next[n];
861 9fa3e853 bellard
    }
862 9fa3e853 bellard
}
863 9fa3e853 bellard
864 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
865 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
866 2e70f6ef pbrook
                              int flags, int cflags)
867 d720b93d bellard
{
868 d720b93d bellard
    TranslationBlock *tb;
869 d720b93d bellard
    uint8_t *tc_ptr;
870 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
871 d720b93d bellard
    int code_gen_size;
872 d720b93d bellard
873 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
874 c27004ec bellard
    tb = tb_alloc(pc);
875 d720b93d bellard
    if (!tb) {
876 d720b93d bellard
        /* flush must be done */
877 d720b93d bellard
        tb_flush(env);
878 d720b93d bellard
        /* cannot fail at this point */
879 c27004ec bellard
        tb = tb_alloc(pc);
880 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
881 2e70f6ef pbrook
        tb_invalidated_flag = 1;
882 d720b93d bellard
    }
883 d720b93d bellard
    tc_ptr = code_gen_ptr;
884 d720b93d bellard
    tb->tc_ptr = tc_ptr;
885 d720b93d bellard
    tb->cs_base = cs_base;
886 d720b93d bellard
    tb->flags = flags;
887 d720b93d bellard
    tb->cflags = cflags;
888 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
889 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890 3b46e624 ths
891 d720b93d bellard
    /* check next page if needed */
892 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893 d720b93d bellard
    phys_page2 = -1;
894 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
896 d720b93d bellard
    }
897 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
898 2e70f6ef pbrook
    return tb;
899 d720b93d bellard
}
900 3b46e624 ths
901 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
902 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
903 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
904 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
905 d720b93d bellard
   TB if code is modified inside this TB. */
906 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907 d720b93d bellard
                                   int is_cpu_write_access)
908 d720b93d bellard
{
909 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
910 d720b93d bellard
    CPUState *env = cpu_single_env;
911 9fa3e853 bellard
    target_ulong tb_start, tb_end;
912 6b917547 aliguori
    PageDesc *p;
913 6b917547 aliguori
    int n;
914 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
915 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
916 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
917 6b917547 aliguori
    int current_tb_modified = 0;
918 6b917547 aliguori
    target_ulong current_pc = 0;
919 6b917547 aliguori
    target_ulong current_cs_base = 0;
920 6b917547 aliguori
    int current_flags = 0;
921 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
922 9fa3e853 bellard
923 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
924 5fafdf24 ths
    if (!p)
925 9fa3e853 bellard
        return;
926 5fafdf24 ths
    if (!p->code_bitmap &&
927 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928 d720b93d bellard
        is_cpu_write_access) {
929 9fa3e853 bellard
        /* build code bitmap */
930 9fa3e853 bellard
        build_page_bitmap(p);
931 9fa3e853 bellard
    }
932 9fa3e853 bellard
933 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
934 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
935 9fa3e853 bellard
    tb = p->first_tb;
936 9fa3e853 bellard
    while (tb != NULL) {
937 9fa3e853 bellard
        n = (long)tb & 3;
938 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
939 9fa3e853 bellard
        tb_next = tb->page_next[n];
940 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
941 9fa3e853 bellard
        if (n == 0) {
942 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
943 9fa3e853 bellard
               it is not a problem */
944 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 9fa3e853 bellard
            tb_end = tb_start + tb->size;
946 9fa3e853 bellard
        } else {
947 9fa3e853 bellard
            tb_start = tb->page_addr[1];
948 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
949 9fa3e853 bellard
        }
950 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
951 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
952 d720b93d bellard
            if (current_tb_not_found) {
953 d720b93d bellard
                current_tb_not_found = 0;
954 d720b93d bellard
                current_tb = NULL;
955 2e70f6ef pbrook
                if (env->mem_io_pc) {
956 d720b93d bellard
                    /* now we have a real cpu fault */
957 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
958 d720b93d bellard
                }
959 d720b93d bellard
            }
960 d720b93d bellard
            if (current_tb == tb &&
961 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
962 d720b93d bellard
                /* If we are modifying the current TB, we must stop
963 d720b93d bellard
                its execution. We could be more precise by checking
964 d720b93d bellard
                that the modification is after the current PC, but it
965 d720b93d bellard
                would require a specialized function to partially
966 d720b93d bellard
                restore the CPU state */
967 3b46e624 ths
968 d720b93d bellard
                current_tb_modified = 1;
969 5fafdf24 ths
                cpu_restore_state(current_tb, env,
970 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
971 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
972 6b917547 aliguori
                                     &current_flags);
973 d720b93d bellard
            }
974 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
975 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
976 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
977 6f5a9f7e bellard
            saved_tb = NULL;
978 6f5a9f7e bellard
            if (env) {
979 6f5a9f7e bellard
                saved_tb = env->current_tb;
980 6f5a9f7e bellard
                env->current_tb = NULL;
981 6f5a9f7e bellard
            }
982 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
983 6f5a9f7e bellard
            if (env) {
984 6f5a9f7e bellard
                env->current_tb = saved_tb;
985 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
986 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
987 6f5a9f7e bellard
            }
988 9fa3e853 bellard
        }
989 9fa3e853 bellard
        tb = tb_next;
990 9fa3e853 bellard
    }
991 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
992 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
993 9fa3e853 bellard
    if (!p->first_tb) {
994 9fa3e853 bellard
        invalidate_page_bitmap(p);
995 d720b93d bellard
        if (is_cpu_write_access) {
996 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997 d720b93d bellard
        }
998 d720b93d bellard
    }
999 d720b93d bellard
#endif
1000 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1001 d720b93d bellard
    if (current_tb_modified) {
1002 d720b93d bellard
        /* we generate a block containing just the instruction
1003 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1004 d720b93d bellard
           itself */
1005 ea1c1802 bellard
        env->current_tb = NULL;
1006 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1008 9fa3e853 bellard
    }
1009 fd6ce8f6 bellard
#endif
1010 9fa3e853 bellard
}
1011 fd6ce8f6 bellard
1012 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1013 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1014 9fa3e853 bellard
{
1015 9fa3e853 bellard
    PageDesc *p;
1016 9fa3e853 bellard
    int offset, b;
1017 59817ccb bellard
#if 0
1018 a4193c8a bellard
    if (1) {
1019 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1021 93fcfe39 aliguori
                  cpu_single_env->eip,
1022 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023 59817ccb bellard
    }
1024 59817ccb bellard
#endif
1025 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1026 5fafdf24 ths
    if (!p)
1027 9fa3e853 bellard
        return;
1028 9fa3e853 bellard
    if (p->code_bitmap) {
1029 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1030 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1032 9fa3e853 bellard
            goto do_invalidate;
1033 9fa3e853 bellard
    } else {
1034 9fa3e853 bellard
    do_invalidate:
1035 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1036 9fa3e853 bellard
    }
1037 9fa3e853 bellard
}
1038 9fa3e853 bellard
1039 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1040 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041 d720b93d bellard
                                    unsigned long pc, void *puc)
1042 9fa3e853 bellard
{
1043 6b917547 aliguori
    TranslationBlock *tb;
1044 9fa3e853 bellard
    PageDesc *p;
1045 6b917547 aliguori
    int n;
1046 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1047 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1048 d720b93d bellard
    CPUState *env = cpu_single_env;
1049 6b917547 aliguori
    int current_tb_modified = 0;
1050 6b917547 aliguori
    target_ulong current_pc = 0;
1051 6b917547 aliguori
    target_ulong current_cs_base = 0;
1052 6b917547 aliguori
    int current_flags = 0;
1053 d720b93d bellard
#endif
1054 9fa3e853 bellard
1055 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1056 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1057 5fafdf24 ths
    if (!p)
1058 9fa3e853 bellard
        return;
1059 9fa3e853 bellard
    tb = p->first_tb;
1060 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1061 d720b93d bellard
    if (tb && pc != 0) {
1062 d720b93d bellard
        current_tb = tb_find_pc(pc);
1063 d720b93d bellard
    }
1064 d720b93d bellard
#endif
1065 9fa3e853 bellard
    while (tb != NULL) {
1066 9fa3e853 bellard
        n = (long)tb & 3;
1067 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1068 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1069 d720b93d bellard
        if (current_tb == tb &&
1070 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1072 d720b93d bellard
                   its execution. We could be more precise by checking
1073 d720b93d bellard
                   that the modification is after the current PC, but it
1074 d720b93d bellard
                   would require a specialized function to partially
1075 d720b93d bellard
                   restore the CPU state */
1076 3b46e624 ths
1077 d720b93d bellard
            current_tb_modified = 1;
1078 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1079 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1080 6b917547 aliguori
                                 &current_flags);
1081 d720b93d bellard
        }
1082 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1083 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1084 9fa3e853 bellard
        tb = tb->page_next[n];
1085 9fa3e853 bellard
    }
1086 fd6ce8f6 bellard
    p->first_tb = NULL;
1087 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1088 d720b93d bellard
    if (current_tb_modified) {
1089 d720b93d bellard
        /* we generate a block containing just the instruction
1090 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1091 d720b93d bellard
           itself */
1092 ea1c1802 bellard
        env->current_tb = NULL;
1093 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1095 d720b93d bellard
    }
1096 d720b93d bellard
#endif
1097 fd6ce8f6 bellard
}
1098 9fa3e853 bellard
#endif
1099 fd6ce8f6 bellard
1100 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1101 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1102 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1103 fd6ce8f6 bellard
{
1104 fd6ce8f6 bellard
    PageDesc *p;
1105 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1106 9fa3e853 bellard
1107 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1108 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1110 9fa3e853 bellard
    last_first_tb = p->first_tb;
1111 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1112 9fa3e853 bellard
    invalidate_page_bitmap(p);
1113 fd6ce8f6 bellard
1114 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1115 d720b93d bellard
1116 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1117 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1118 53a5960a pbrook
        target_ulong addr;
1119 53a5960a pbrook
        PageDesc *p2;
1120 9fa3e853 bellard
        int prot;
1121 9fa3e853 bellard
1122 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1123 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1124 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1125 fd6ce8f6 bellard
        prot = 0;
1126 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1128 53a5960a pbrook
1129 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1130 53a5960a pbrook
            if (!p2)
1131 53a5960a pbrook
                continue;
1132 53a5960a pbrook
            prot |= p2->flags;
1133 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1134 53a5960a pbrook
            page_get_flags(addr);
1135 53a5960a pbrook
          }
1136 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1137 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1139 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1140 53a5960a pbrook
               page_addr);
1141 fd6ce8f6 bellard
#endif
1142 fd6ce8f6 bellard
    }
1143 9fa3e853 bellard
#else
1144 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1145 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1146 9fa3e853 bellard
       allocated in a physical page */
1147 9fa3e853 bellard
    if (!last_first_tb) {
1148 6a00d601 bellard
        tlb_protect_code(page_addr);
1149 9fa3e853 bellard
    }
1150 9fa3e853 bellard
#endif
1151 d720b93d bellard
1152 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1153 fd6ce8f6 bellard
}
1154 fd6ce8f6 bellard
1155 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1156 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1157 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1158 fd6ce8f6 bellard
{
1159 fd6ce8f6 bellard
    TranslationBlock *tb;
1160 fd6ce8f6 bellard
1161 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1162 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163 d4e8164f bellard
        return NULL;
1164 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1165 fd6ce8f6 bellard
    tb->pc = pc;
1166 b448f2f3 bellard
    tb->cflags = 0;
1167 d4e8164f bellard
    return tb;
1168 d4e8164f bellard
}
1169 d4e8164f bellard
1170 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1171 2e70f6ef pbrook
{
1172 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1173 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1174 2e70f6ef pbrook
       be the last one generated.  */
1175 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1177 2e70f6ef pbrook
        nb_tbs--;
1178 2e70f6ef pbrook
    }
1179 2e70f6ef pbrook
}
1180 2e70f6ef pbrook
1181 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1182 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1183 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1184 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1185 d4e8164f bellard
{
1186 9fa3e853 bellard
    unsigned int h;
1187 9fa3e853 bellard
    TranslationBlock **ptb;
1188 9fa3e853 bellard
1189 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1190 c8a706fe pbrook
       before we are done.  */
1191 c8a706fe pbrook
    mmap_lock();
1192 9fa3e853 bellard
    /* add in the physical hash table */
1193 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1194 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1195 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1196 9fa3e853 bellard
    *ptb = tb;
1197 fd6ce8f6 bellard
1198 fd6ce8f6 bellard
    /* add in the page list */
1199 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200 9fa3e853 bellard
    if (phys_page2 != -1)
1201 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1202 9fa3e853 bellard
    else
1203 9fa3e853 bellard
        tb->page_addr[1] = -1;
1204 9fa3e853 bellard
1205 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1207 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1208 d4e8164f bellard
1209 d4e8164f bellard
    /* init original jump addresses */
1210 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1211 d4e8164f bellard
        tb_reset_jump(tb, 0);
1212 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1213 d4e8164f bellard
        tb_reset_jump(tb, 1);
1214 8a40a180 bellard
1215 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1216 8a40a180 bellard
    tb_page_check();
1217 8a40a180 bellard
#endif
1218 c8a706fe pbrook
    mmap_unlock();
1219 fd6ce8f6 bellard
}
1220 fd6ce8f6 bellard
1221 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1223 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224 fd6ce8f6 bellard
{
1225 9fa3e853 bellard
    int m_min, m_max, m;
1226 9fa3e853 bellard
    unsigned long v;
1227 9fa3e853 bellard
    TranslationBlock *tb;
1228 a513fe19 bellard
1229 a513fe19 bellard
    if (nb_tbs <= 0)
1230 a513fe19 bellard
        return NULL;
1231 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1232 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1233 a513fe19 bellard
        return NULL;
1234 a513fe19 bellard
    /* binary search (cf Knuth) */
1235 a513fe19 bellard
    m_min = 0;
1236 a513fe19 bellard
    m_max = nb_tbs - 1;
1237 a513fe19 bellard
    while (m_min <= m_max) {
1238 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1239 a513fe19 bellard
        tb = &tbs[m];
1240 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1241 a513fe19 bellard
        if (v == tc_ptr)
1242 a513fe19 bellard
            return tb;
1243 a513fe19 bellard
        else if (tc_ptr < v) {
1244 a513fe19 bellard
            m_max = m - 1;
1245 a513fe19 bellard
        } else {
1246 a513fe19 bellard
            m_min = m + 1;
1247 a513fe19 bellard
        }
1248 5fafdf24 ths
    }
1249 a513fe19 bellard
    return &tbs[m_max];
1250 a513fe19 bellard
}
1251 7501267e bellard
1252 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1253 ea041c0e bellard
1254 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255 ea041c0e bellard
{
1256 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1257 ea041c0e bellard
    unsigned int n1;
1258 ea041c0e bellard
1259 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1260 ea041c0e bellard
    if (tb1 != NULL) {
1261 ea041c0e bellard
        /* find head of list */
1262 ea041c0e bellard
        for(;;) {
1263 ea041c0e bellard
            n1 = (long)tb1 & 3;
1264 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265 ea041c0e bellard
            if (n1 == 2)
1266 ea041c0e bellard
                break;
1267 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1268 ea041c0e bellard
        }
1269 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1270 ea041c0e bellard
        tb_next = tb1;
1271 ea041c0e bellard
1272 ea041c0e bellard
        /* remove tb from the jmp_first list */
1273 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1274 ea041c0e bellard
        for(;;) {
1275 ea041c0e bellard
            tb1 = *ptb;
1276 ea041c0e bellard
            n1 = (long)tb1 & 3;
1277 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1279 ea041c0e bellard
                break;
1280 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1281 ea041c0e bellard
        }
1282 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1283 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1284 3b46e624 ths
1285 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1286 ea041c0e bellard
        tb_reset_jump(tb, n);
1287 ea041c0e bellard
1288 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1289 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1290 ea041c0e bellard
    }
1291 ea041c0e bellard
}
1292 ea041c0e bellard
1293 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1294 ea041c0e bellard
{
1295 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1296 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1297 ea041c0e bellard
}
1298 ea041c0e bellard
1299 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1300 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301 d720b93d bellard
{
1302 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1303 9b3c35e0 j_mayer
    target_ulong pd;
1304 c2f07f81 pbrook
    ram_addr_t ram_addr;
1305 c2f07f81 pbrook
    PhysPageDesc *p;
1306 d720b93d bellard
1307 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1308 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309 c2f07f81 pbrook
    if (!p) {
1310 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1311 c2f07f81 pbrook
    } else {
1312 c2f07f81 pbrook
        pd = p->phys_offset;
1313 c2f07f81 pbrook
    }
1314 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316 d720b93d bellard
}
1317 c27004ec bellard
#endif
1318 d720b93d bellard
1319 6658ffb8 pbrook
/* Add a watchpoint.  */
1320 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1322 6658ffb8 pbrook
{
1323 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1324 c0ce998e aliguori
    CPUWatchpoint *wp;
1325 6658ffb8 pbrook
1326 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330 b4051334 aliguori
        return -EINVAL;
1331 b4051334 aliguori
    }
1332 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1333 a1d1bb31 aliguori
1334 a1d1bb31 aliguori
    wp->vaddr = addr;
1335 b4051334 aliguori
    wp->len_mask = len_mask;
1336 a1d1bb31 aliguori
    wp->flags = flags;
1337 a1d1bb31 aliguori
1338 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1339 c0ce998e aliguori
    if (flags & BP_GDB)
1340 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341 c0ce998e aliguori
    else
1342 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343 6658ffb8 pbrook
1344 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1345 a1d1bb31 aliguori
1346 a1d1bb31 aliguori
    if (watchpoint)
1347 a1d1bb31 aliguori
        *watchpoint = wp;
1348 a1d1bb31 aliguori
    return 0;
1349 6658ffb8 pbrook
}
1350 6658ffb8 pbrook
1351 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1352 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353 a1d1bb31 aliguori
                          int flags)
1354 6658ffb8 pbrook
{
1355 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1356 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1357 6658ffb8 pbrook
1358 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1360 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1362 6658ffb8 pbrook
            return 0;
1363 6658ffb8 pbrook
        }
1364 6658ffb8 pbrook
    }
1365 a1d1bb31 aliguori
    return -ENOENT;
1366 6658ffb8 pbrook
}
1367 6658ffb8 pbrook
1368 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1369 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370 a1d1bb31 aliguori
{
1371 c0ce998e aliguori
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1372 7d03f82f edgar_igl
1373 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1374 a1d1bb31 aliguori
1375 a1d1bb31 aliguori
    qemu_free(watchpoint);
1376 a1d1bb31 aliguori
}
1377 a1d1bb31 aliguori
1378 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1379 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380 a1d1bb31 aliguori
{
1381 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1382 a1d1bb31 aliguori
1383 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384 a1d1bb31 aliguori
        if (wp->flags & mask)
1385 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1386 c0ce998e aliguori
    }
1387 7d03f82f edgar_igl
}
1388 7d03f82f edgar_igl
1389 a1d1bb31 aliguori
/* Add a breakpoint.  */
1390 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1392 4c3a88a2 bellard
{
1393 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1394 c0ce998e aliguori
    CPUBreakpoint *bp;
1395 3b46e624 ths
1396 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1397 4c3a88a2 bellard
1398 a1d1bb31 aliguori
    bp->pc = pc;
1399 a1d1bb31 aliguori
    bp->flags = flags;
1400 a1d1bb31 aliguori
1401 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1402 c0ce998e aliguori
    if (flags & BP_GDB)
1403 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404 c0ce998e aliguori
    else
1405 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406 3b46e624 ths
1407 d720b93d bellard
    breakpoint_invalidate(env, pc);
1408 a1d1bb31 aliguori
1409 a1d1bb31 aliguori
    if (breakpoint)
1410 a1d1bb31 aliguori
        *breakpoint = bp;
1411 4c3a88a2 bellard
    return 0;
1412 4c3a88a2 bellard
#else
1413 a1d1bb31 aliguori
    return -ENOSYS;
1414 4c3a88a2 bellard
#endif
1415 4c3a88a2 bellard
}
1416 4c3a88a2 bellard
1417 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1418 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419 a1d1bb31 aliguori
{
1420 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1421 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1422 a1d1bb31 aliguori
1423 c0ce998e aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1425 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1426 a1d1bb31 aliguori
            return 0;
1427 a1d1bb31 aliguori
        }
1428 7d03f82f edgar_igl
    }
1429 a1d1bb31 aliguori
    return -ENOENT;
1430 a1d1bb31 aliguori
#else
1431 a1d1bb31 aliguori
    return -ENOSYS;
1432 7d03f82f edgar_igl
#endif
1433 7d03f82f edgar_igl
}
1434 7d03f82f edgar_igl
1435 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1436 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1437 4c3a88a2 bellard
{
1438 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1439 c0ce998e aliguori
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440 d720b93d bellard
1441 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1442 a1d1bb31 aliguori
1443 a1d1bb31 aliguori
    qemu_free(breakpoint);
1444 a1d1bb31 aliguori
#endif
1445 a1d1bb31 aliguori
}
1446 a1d1bb31 aliguori
1447 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1448 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449 a1d1bb31 aliguori
{
1450 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1451 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1452 a1d1bb31 aliguori
1453 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454 a1d1bb31 aliguori
        if (bp->flags & mask)
1455 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1456 c0ce998e aliguori
    }
1457 4c3a88a2 bellard
#endif
1458 4c3a88a2 bellard
}
1459 4c3a88a2 bellard
1460 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1461 c33a346e bellard
   CPU loop after each instruction */
1462 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1463 c33a346e bellard
{
1464 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1465 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1466 c33a346e bellard
        env->singlestep_enabled = enabled;
1467 e22a25c9 aliguori
        if (kvm_enabled())
1468 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1469 e22a25c9 aliguori
        else {
1470 e22a25c9 aliguori
            /* must flush all the translated code to avoid inconsistancies */
1471 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1472 e22a25c9 aliguori
            tb_flush(env);
1473 e22a25c9 aliguori
        }
1474 c33a346e bellard
    }
1475 c33a346e bellard
#endif
1476 c33a346e bellard
}
1477 c33a346e bellard
1478 34865134 bellard
/* enable or disable low levels log */
1479 34865134 bellard
void cpu_set_log(int log_flags)
1480 34865134 bellard
{
1481 34865134 bellard
    loglevel = log_flags;
1482 34865134 bellard
    if (loglevel && !logfile) {
1483 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1484 34865134 bellard
        if (!logfile) {
1485 34865134 bellard
            perror(logfilename);
1486 34865134 bellard
            _exit(1);
1487 34865134 bellard
        }
1488 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1489 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1490 9fa3e853 bellard
        {
1491 b55266b5 blueswir1
            static char logfile_buf[4096];
1492 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1493 9fa3e853 bellard
        }
1494 9fa3e853 bellard
#else
1495 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1496 9fa3e853 bellard
#endif
1497 e735b91c pbrook
        log_append = 1;
1498 e735b91c pbrook
    }
1499 e735b91c pbrook
    if (!loglevel && logfile) {
1500 e735b91c pbrook
        fclose(logfile);
1501 e735b91c pbrook
        logfile = NULL;
1502 34865134 bellard
    }
1503 34865134 bellard
}
1504 34865134 bellard
1505 34865134 bellard
void cpu_set_log_filename(const char *filename)
1506 34865134 bellard
{
1507 34865134 bellard
    logfilename = strdup(filename);
1508 e735b91c pbrook
    if (logfile) {
1509 e735b91c pbrook
        fclose(logfile);
1510 e735b91c pbrook
        logfile = NULL;
1511 e735b91c pbrook
    }
1512 e735b91c pbrook
    cpu_set_log(loglevel);
1513 34865134 bellard
}
1514 c33a346e bellard
1515 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1516 ea041c0e bellard
{
1517 3098dba0 aurel32
#if defined(USE_NPTL)
1518 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1519 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1520 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1521 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1522 3098dba0 aurel32
#else
1523 ea041c0e bellard
    TranslationBlock *tb;
1524 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1525 59817ccb bellard
1526 3098dba0 aurel32
    tb = env->current_tb;
1527 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1528 3098dba0 aurel32
       all the potentially executing TB */
1529 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1530 3098dba0 aurel32
        env->current_tb = NULL;
1531 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1532 3098dba0 aurel32
        resetlock(&interrupt_lock);
1533 be214e6c aurel32
    }
1534 3098dba0 aurel32
#endif
1535 3098dba0 aurel32
}
1536 3098dba0 aurel32
1537 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1538 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1539 3098dba0 aurel32
{
1540 3098dba0 aurel32
    int old_mask;
1541 be214e6c aurel32
1542 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1543 68a79315 bellard
    env->interrupt_request |= mask;
1544 3098dba0 aurel32
1545 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1546 8edac960 aliguori
    /*
1547 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1548 8edac960 aliguori
     * case its halted.
1549 8edac960 aliguori
     */
1550 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1551 8edac960 aliguori
        qemu_cpu_kick(env);
1552 8edac960 aliguori
        return;
1553 8edac960 aliguori
    }
1554 8edac960 aliguori
#endif
1555 8edac960 aliguori
1556 2e70f6ef pbrook
    if (use_icount) {
1557 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1558 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1559 2e70f6ef pbrook
        if (!can_do_io(env)
1560 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1561 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1562 2e70f6ef pbrook
        }
1563 2e70f6ef pbrook
#endif
1564 2e70f6ef pbrook
    } else {
1565 3098dba0 aurel32
        cpu_unlink_tb(env);
1566 ea041c0e bellard
    }
1567 ea041c0e bellard
}
1568 ea041c0e bellard
1569 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1570 b54ad049 bellard
{
1571 b54ad049 bellard
    env->interrupt_request &= ~mask;
1572 b54ad049 bellard
}
1573 b54ad049 bellard
1574 3098dba0 aurel32
void cpu_exit(CPUState *env)
1575 3098dba0 aurel32
{
1576 3098dba0 aurel32
    env->exit_request = 1;
1577 3098dba0 aurel32
    cpu_unlink_tb(env);
1578 3098dba0 aurel32
}
1579 3098dba0 aurel32
1580 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1581 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1582 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1583 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1584 f193c797 bellard
      "show target assembly code for each compiled TB" },
1585 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1586 57fec1fe bellard
      "show micro ops for each compiled TB" },
1587 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1588 e01a1157 blueswir1
      "show micro ops "
1589 e01a1157 blueswir1
#ifdef TARGET_I386
1590 e01a1157 blueswir1
      "before eflags optimization and "
1591 f193c797 bellard
#endif
1592 e01a1157 blueswir1
      "after liveness analysis" },
1593 f193c797 bellard
    { CPU_LOG_INT, "int",
1594 f193c797 bellard
      "show interrupts/exceptions in short format" },
1595 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1596 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1597 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1598 e91c8a77 ths
      "show CPU state before block translation" },
1599 f193c797 bellard
#ifdef TARGET_I386
1600 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1601 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1602 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1603 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1604 f193c797 bellard
#endif
1605 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1606 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1607 fd872598 bellard
      "show all i/o ports accesses" },
1608 8e3a9fd2 bellard
#endif
1609 f193c797 bellard
    { 0, NULL, NULL },
1610 f193c797 bellard
};
1611 f193c797 bellard
1612 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1613 f193c797 bellard
{
1614 f193c797 bellard
    if (strlen(s2) != n)
1615 f193c797 bellard
        return 0;
1616 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1617 f193c797 bellard
}
1618 3b46e624 ths
1619 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1620 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1621 f193c797 bellard
{
1622 c7cd6a37 blueswir1
    const CPULogItem *item;
1623 f193c797 bellard
    int mask;
1624 f193c797 bellard
    const char *p, *p1;
1625 f193c797 bellard
1626 f193c797 bellard
    p = str;
1627 f193c797 bellard
    mask = 0;
1628 f193c797 bellard
    for(;;) {
1629 f193c797 bellard
        p1 = strchr(p, ',');
1630 f193c797 bellard
        if (!p1)
1631 f193c797 bellard
            p1 = p + strlen(p);
1632 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1633 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1634 8e3a9fd2 bellard
                        mask |= item->mask;
1635 8e3a9fd2 bellard
                }
1636 8e3a9fd2 bellard
        } else {
1637 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1638 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1639 f193c797 bellard
                goto found;
1640 f193c797 bellard
        }
1641 f193c797 bellard
        return 0;
1642 8e3a9fd2 bellard
        }
1643 f193c797 bellard
    found:
1644 f193c797 bellard
        mask |= item->mask;
1645 f193c797 bellard
        if (*p1 != ',')
1646 f193c797 bellard
            break;
1647 f193c797 bellard
        p = p1 + 1;
1648 f193c797 bellard
    }
1649 f193c797 bellard
    return mask;
1650 f193c797 bellard
}
1651 ea041c0e bellard
1652 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1653 7501267e bellard
{
1654 7501267e bellard
    va_list ap;
1655 493ae1f0 pbrook
    va_list ap2;
1656 7501267e bellard
1657 7501267e bellard
    va_start(ap, fmt);
1658 493ae1f0 pbrook
    va_copy(ap2, ap);
1659 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1660 7501267e bellard
    vfprintf(stderr, fmt, ap);
1661 7501267e bellard
    fprintf(stderr, "\n");
1662 7501267e bellard
#ifdef TARGET_I386
1663 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1664 7fe48483 bellard
#else
1665 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1666 7501267e bellard
#endif
1667 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1668 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1669 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1670 93fcfe39 aliguori
        qemu_log("\n");
1671 f9373291 j_mayer
#ifdef TARGET_I386
1672 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1673 f9373291 j_mayer
#else
1674 93fcfe39 aliguori
        log_cpu_state(env, 0);
1675 f9373291 j_mayer
#endif
1676 31b1a7b4 aliguori
        qemu_log_flush();
1677 93fcfe39 aliguori
        qemu_log_close();
1678 924edcae balrog
    }
1679 493ae1f0 pbrook
    va_end(ap2);
1680 f9373291 j_mayer
    va_end(ap);
1681 7501267e bellard
    abort();
1682 7501267e bellard
}
1683 7501267e bellard
1684 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1685 c5be9f08 ths
{
1686 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1687 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1688 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1689 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1690 5a38f081 aliguori
    CPUBreakpoint *bp;
1691 5a38f081 aliguori
    CPUWatchpoint *wp;
1692 5a38f081 aliguori
#endif
1693 5a38f081 aliguori
1694 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1695 5a38f081 aliguori
1696 5a38f081 aliguori
    /* Preserve chaining and index. */
1697 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1698 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1699 5a38f081 aliguori
1700 5a38f081 aliguori
    /* Clone all break/watchpoints.
1701 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1702 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1703 5a38f081 aliguori
    TAILQ_INIT(&env->breakpoints);
1704 5a38f081 aliguori
    TAILQ_INIT(&env->watchpoints);
1705 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1706 5a38f081 aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1707 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1708 5a38f081 aliguori
    }
1709 5a38f081 aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1710 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1711 5a38f081 aliguori
                              wp->flags, NULL);
1712 5a38f081 aliguori
    }
1713 5a38f081 aliguori
#endif
1714 5a38f081 aliguori
1715 c5be9f08 ths
    return new_env;
1716 c5be9f08 ths
}
1717 c5be9f08 ths
1718 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1719 0124311e bellard
1720 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1721 5c751e99 edgar_igl
{
1722 5c751e99 edgar_igl
    unsigned int i;
1723 5c751e99 edgar_igl
1724 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1725 5c751e99 edgar_igl
       overlap the flushed page.  */
1726 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1727 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1728 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1729 5c751e99 edgar_igl
1730 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1731 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1732 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1733 5c751e99 edgar_igl
}
1734 5c751e99 edgar_igl
1735 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1736 ee8b7021 bellard
   implemented yet) */
1737 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1738 33417e70 bellard
{
1739 33417e70 bellard
    int i;
1740 0124311e bellard
1741 9fa3e853 bellard
#if defined(DEBUG_TLB)
1742 9fa3e853 bellard
    printf("tlb_flush:\n");
1743 9fa3e853 bellard
#endif
1744 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1745 0124311e bellard
       links while we are modifying them */
1746 0124311e bellard
    env->current_tb = NULL;
1747 0124311e bellard
1748 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1749 84b7b8e7 bellard
        env->tlb_table[0][i].addr_read = -1;
1750 84b7b8e7 bellard
        env->tlb_table[0][i].addr_write = -1;
1751 84b7b8e7 bellard
        env->tlb_table[0][i].addr_code = -1;
1752 84b7b8e7 bellard
        env->tlb_table[1][i].addr_read = -1;
1753 84b7b8e7 bellard
        env->tlb_table[1][i].addr_write = -1;
1754 84b7b8e7 bellard
        env->tlb_table[1][i].addr_code = -1;
1755 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1756 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_read = -1;
1757 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_write = -1;
1758 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_code = -1;
1759 e37e6ee6 aurel32
#endif
1760 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1761 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_read = -1;
1762 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_write = -1;
1763 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_code = -1;
1764 6fa4cea9 j_mayer
#endif
1765 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1766 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_read = -1;
1767 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_write = -1;
1768 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_code = -1;
1769 6fa4cea9 j_mayer
#endif
1770 e37e6ee6 aurel32
1771 33417e70 bellard
    }
1772 9fa3e853 bellard
1773 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1774 9fa3e853 bellard
1775 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1776 0a962c02 bellard
    if (env->kqemu_enabled) {
1777 0a962c02 bellard
        kqemu_flush(env, flush_global);
1778 0a962c02 bellard
    }
1779 0a962c02 bellard
#endif
1780 e3db7226 bellard
    tlb_flush_count++;
1781 33417e70 bellard
}
1782 33417e70 bellard
1783 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1784 61382a50 bellard
{
1785 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1786 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1788 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1789 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1790 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1791 84b7b8e7 bellard
        tlb_entry->addr_read = -1;
1792 84b7b8e7 bellard
        tlb_entry->addr_write = -1;
1793 84b7b8e7 bellard
        tlb_entry->addr_code = -1;
1794 84b7b8e7 bellard
    }
1795 61382a50 bellard
}
1796 61382a50 bellard
1797 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1798 33417e70 bellard
{
1799 8a40a180 bellard
    int i;
1800 0124311e bellard
1801 9fa3e853 bellard
#if defined(DEBUG_TLB)
1802 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1803 9fa3e853 bellard
#endif
1804 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1805 0124311e bellard
       links while we are modifying them */
1806 0124311e bellard
    env->current_tb = NULL;
1807 61382a50 bellard
1808 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1809 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1810 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1811 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1812 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1813 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1814 e37e6ee6 aurel32
#endif
1815 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1816 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1817 6fa4cea9 j_mayer
#endif
1818 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1819 e37e6ee6 aurel32
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1820 6fa4cea9 j_mayer
#endif
1821 0124311e bellard
1822 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1823 9fa3e853 bellard
1824 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1825 0a962c02 bellard
    if (env->kqemu_enabled) {
1826 0a962c02 bellard
        kqemu_flush_page(env, addr);
1827 0a962c02 bellard
    }
1828 0a962c02 bellard
#endif
1829 9fa3e853 bellard
}
1830 9fa3e853 bellard
1831 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1832 9fa3e853 bellard
   can be detected */
1833 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1834 9fa3e853 bellard
{
1835 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1836 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1837 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1838 9fa3e853 bellard
}
1839 9fa3e853 bellard
1840 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1841 3a7d929e bellard
   tested for self modifying code */
1842 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1843 3a7d929e bellard
                                    target_ulong vaddr)
1844 9fa3e853 bellard
{
1845 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1846 1ccde1cb bellard
}
1847 1ccde1cb bellard
1848 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1849 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1850 1ccde1cb bellard
{
1851 1ccde1cb bellard
    unsigned long addr;
1852 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1853 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1854 1ccde1cb bellard
        if ((addr - start) < length) {
1855 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1856 1ccde1cb bellard
        }
1857 1ccde1cb bellard
    }
1858 1ccde1cb bellard
}
1859 1ccde1cb bellard
1860 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1861 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1862 0a962c02 bellard
                                     int dirty_flags)
1863 1ccde1cb bellard
{
1864 1ccde1cb bellard
    CPUState *env;
1865 4f2ac237 bellard
    unsigned long length, start1;
1866 0a962c02 bellard
    int i, mask, len;
1867 0a962c02 bellard
    uint8_t *p;
1868 1ccde1cb bellard
1869 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1870 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1871 1ccde1cb bellard
1872 1ccde1cb bellard
    length = end - start;
1873 1ccde1cb bellard
    if (length == 0)
1874 1ccde1cb bellard
        return;
1875 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1876 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1877 6a00d601 bellard
    /* XXX: should not depend on cpu context */
1878 6a00d601 bellard
    env = first_cpu;
1879 3a7d929e bellard
    if (env->kqemu_enabled) {
1880 f23db169 bellard
        ram_addr_t addr;
1881 f23db169 bellard
        addr = start;
1882 f23db169 bellard
        for(i = 0; i < len; i++) {
1883 f23db169 bellard
            kqemu_set_notdirty(env, addr);
1884 f23db169 bellard
            addr += TARGET_PAGE_SIZE;
1885 f23db169 bellard
        }
1886 3a7d929e bellard
    }
1887 3a7d929e bellard
#endif
1888 f23db169 bellard
    mask = ~dirty_flags;
1889 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1890 f23db169 bellard
    for(i = 0; i < len; i++)
1891 f23db169 bellard
        p[i] &= mask;
1892 f23db169 bellard
1893 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1894 1ccde1cb bellard
       when accessing the range */
1895 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1896 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1897 5579c7f3 pbrook
       address comparisons below.  */
1898 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1899 5579c7f3 pbrook
            != (end - 1) - start) {
1900 5579c7f3 pbrook
        abort();
1901 5579c7f3 pbrook
    }
1902 5579c7f3 pbrook
1903 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1904 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1905 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1906 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1907 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1908 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1909 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1911 e37e6ee6 aurel32
#endif
1912 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1913 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1914 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1915 6fa4cea9 j_mayer
#endif
1916 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1917 e37e6ee6 aurel32
        for(i = 0; i < CPU_TLB_SIZE; i++)
1918 e37e6ee6 aurel32
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1919 6fa4cea9 j_mayer
#endif
1920 6a00d601 bellard
    }
1921 1ccde1cb bellard
}
1922 1ccde1cb bellard
1923 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1924 74576198 aliguori
{
1925 74576198 aliguori
    in_migration = enable;
1926 74576198 aliguori
    return 0;
1927 74576198 aliguori
}
1928 74576198 aliguori
1929 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1930 74576198 aliguori
{
1931 74576198 aliguori
    return in_migration;
1932 74576198 aliguori
}
1933 74576198 aliguori
1934 2bec46dc aliguori
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1935 2bec46dc aliguori
{
1936 2bec46dc aliguori
    if (kvm_enabled())
1937 2bec46dc aliguori
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1938 2bec46dc aliguori
}
1939 2bec46dc aliguori
1940 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1941 3a7d929e bellard
{
1942 3a7d929e bellard
    ram_addr_t ram_addr;
1943 5579c7f3 pbrook
    void *p;
1944 3a7d929e bellard
1945 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1946 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1947 5579c7f3 pbrook
            + tlb_entry->addend);
1948 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1949 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1950 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1951 3a7d929e bellard
        }
1952 3a7d929e bellard
    }
1953 3a7d929e bellard
}
1954 3a7d929e bellard
1955 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1956 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1957 3a7d929e bellard
{
1958 3a7d929e bellard
    int i;
1959 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1960 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[0][i]);
1961 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1962 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[1][i]);
1963 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1964 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1965 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[2][i]);
1966 e37e6ee6 aurel32
#endif
1967 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1968 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1969 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[3][i]);
1970 6fa4cea9 j_mayer
#endif
1971 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1972 e37e6ee6 aurel32
    for(i = 0; i < CPU_TLB_SIZE; i++)
1973 e37e6ee6 aurel32
        tlb_update_dirty(&env->tlb_table[4][i]);
1974 6fa4cea9 j_mayer
#endif
1975 3a7d929e bellard
}
1976 3a7d929e bellard
1977 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1978 1ccde1cb bellard
{
1979 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1980 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1981 1ccde1cb bellard
}
1982 1ccde1cb bellard
1983 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1984 0f459d16 pbrook
   so that it is no longer dirty */
1985 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1986 1ccde1cb bellard
{
1987 1ccde1cb bellard
    int i;
1988 1ccde1cb bellard
1989 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1990 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1991 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1992 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1993 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1994 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1995 e37e6ee6 aurel32
#endif
1996 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1997 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1998 6fa4cea9 j_mayer
#endif
1999 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
2000 e37e6ee6 aurel32
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2001 6fa4cea9 j_mayer
#endif
2002 9fa3e853 bellard
}
2003 9fa3e853 bellard
2004 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
2005 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2006 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
2007 59817ccb bellard
   conflicting with the host address space). */
2008 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2009 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2010 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2011 9fa3e853 bellard
{
2012 92e873b9 bellard
    PhysPageDesc *p;
2013 4f2ac237 bellard
    unsigned long pd;
2014 9fa3e853 bellard
    unsigned int index;
2015 4f2ac237 bellard
    target_ulong address;
2016 0f459d16 pbrook
    target_ulong code_address;
2017 108c49b8 bellard
    target_phys_addr_t addend;
2018 9fa3e853 bellard
    int ret;
2019 84b7b8e7 bellard
    CPUTLBEntry *te;
2020 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2021 0f459d16 pbrook
    target_phys_addr_t iotlb;
2022 9fa3e853 bellard
2023 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2024 9fa3e853 bellard
    if (!p) {
2025 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2026 9fa3e853 bellard
    } else {
2027 9fa3e853 bellard
        pd = p->phys_offset;
2028 9fa3e853 bellard
    }
2029 9fa3e853 bellard
#if defined(DEBUG_TLB)
2030 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2031 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2032 9fa3e853 bellard
#endif
2033 9fa3e853 bellard
2034 9fa3e853 bellard
    ret = 0;
2035 0f459d16 pbrook
    address = vaddr;
2036 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2037 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2038 0f459d16 pbrook
        address |= TLB_MMIO;
2039 0f459d16 pbrook
    }
2040 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2041 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2042 0f459d16 pbrook
        /* Normal RAM.  */
2043 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2044 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2045 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2046 0f459d16 pbrook
        else
2047 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2048 0f459d16 pbrook
    } else {
2049 0f459d16 pbrook
        /* IO handlers are currently passed a phsical address.
2050 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2051 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2052 0f459d16 pbrook
           and avoid full address decoding in every device.
2053 0f459d16 pbrook
           We can't use the high bits of pd for this because
2054 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2055 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2056 8da3ff18 pbrook
        if (p) {
2057 8da3ff18 pbrook
            iotlb += p->region_offset;
2058 8da3ff18 pbrook
        } else {
2059 8da3ff18 pbrook
            iotlb += paddr;
2060 8da3ff18 pbrook
        }
2061 0f459d16 pbrook
    }
2062 0f459d16 pbrook
2063 0f459d16 pbrook
    code_address = address;
2064 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2065 0f459d16 pbrook
       watchpoint trap routines.  */
2066 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2067 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2068 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2069 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2070 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2071 0f459d16 pbrook
            address |= TLB_MMIO;
2072 6658ffb8 pbrook
        }
2073 0f459d16 pbrook
    }
2074 d79acba4 balrog
2075 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2076 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2077 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2078 0f459d16 pbrook
    te->addend = addend - vaddr;
2079 0f459d16 pbrook
    if (prot & PAGE_READ) {
2080 0f459d16 pbrook
        te->addr_read = address;
2081 0f459d16 pbrook
    } else {
2082 0f459d16 pbrook
        te->addr_read = -1;
2083 0f459d16 pbrook
    }
2084 5c751e99 edgar_igl
2085 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2086 0f459d16 pbrook
        te->addr_code = code_address;
2087 0f459d16 pbrook
    } else {
2088 0f459d16 pbrook
        te->addr_code = -1;
2089 0f459d16 pbrook
    }
2090 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2091 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2092 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2093 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2094 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2095 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2096 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2097 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2098 9fa3e853 bellard
        } else {
2099 0f459d16 pbrook
            te->addr_write = address;
2100 9fa3e853 bellard
        }
2101 0f459d16 pbrook
    } else {
2102 0f459d16 pbrook
        te->addr_write = -1;
2103 9fa3e853 bellard
    }
2104 9fa3e853 bellard
    return ret;
2105 9fa3e853 bellard
}
2106 9fa3e853 bellard
2107 0124311e bellard
#else
2108 0124311e bellard
2109 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2110 0124311e bellard
{
2111 0124311e bellard
}
2112 0124311e bellard
2113 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2114 0124311e bellard
{
2115 0124311e bellard
}
2116 0124311e bellard
2117 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2118 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2119 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2120 9fa3e853 bellard
{
2121 9fa3e853 bellard
    return 0;
2122 9fa3e853 bellard
}
2123 0124311e bellard
2124 9fa3e853 bellard
/* dump memory mappings */
2125 9fa3e853 bellard
void page_dump(FILE *f)
2126 33417e70 bellard
{
2127 9fa3e853 bellard
    unsigned long start, end;
2128 9fa3e853 bellard
    int i, j, prot, prot1;
2129 9fa3e853 bellard
    PageDesc *p;
2130 33417e70 bellard
2131 9fa3e853 bellard
    fprintf(f, "%-8s %-8s %-8s %s\n",
2132 9fa3e853 bellard
            "start", "end", "size", "prot");
2133 9fa3e853 bellard
    start = -1;
2134 9fa3e853 bellard
    end = -1;
2135 9fa3e853 bellard
    prot = 0;
2136 9fa3e853 bellard
    for(i = 0; i <= L1_SIZE; i++) {
2137 9fa3e853 bellard
        if (i < L1_SIZE)
2138 9fa3e853 bellard
            p = l1_map[i];
2139 9fa3e853 bellard
        else
2140 9fa3e853 bellard
            p = NULL;
2141 9fa3e853 bellard
        for(j = 0;j < L2_SIZE; j++) {
2142 9fa3e853 bellard
            if (!p)
2143 9fa3e853 bellard
                prot1 = 0;
2144 9fa3e853 bellard
            else
2145 9fa3e853 bellard
                prot1 = p[j].flags;
2146 9fa3e853 bellard
            if (prot1 != prot) {
2147 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2148 9fa3e853 bellard
                if (start != -1) {
2149 9fa3e853 bellard
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2150 5fafdf24 ths
                            start, end, end - start,
2151 9fa3e853 bellard
                            prot & PAGE_READ ? 'r' : '-',
2152 9fa3e853 bellard
                            prot & PAGE_WRITE ? 'w' : '-',
2153 9fa3e853 bellard
                            prot & PAGE_EXEC ? 'x' : '-');
2154 9fa3e853 bellard
                }
2155 9fa3e853 bellard
                if (prot1 != 0)
2156 9fa3e853 bellard
                    start = end;
2157 9fa3e853 bellard
                else
2158 9fa3e853 bellard
                    start = -1;
2159 9fa3e853 bellard
                prot = prot1;
2160 9fa3e853 bellard
            }
2161 9fa3e853 bellard
            if (!p)
2162 9fa3e853 bellard
                break;
2163 9fa3e853 bellard
        }
2164 33417e70 bellard
    }
2165 33417e70 bellard
}
2166 33417e70 bellard
2167 53a5960a pbrook
int page_get_flags(target_ulong address)
2168 33417e70 bellard
{
2169 9fa3e853 bellard
    PageDesc *p;
2170 9fa3e853 bellard
2171 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2172 33417e70 bellard
    if (!p)
2173 9fa3e853 bellard
        return 0;
2174 9fa3e853 bellard
    return p->flags;
2175 9fa3e853 bellard
}
2176 9fa3e853 bellard
2177 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2178 9fa3e853 bellard
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2179 9fa3e853 bellard
   depending on PAGE_WRITE */
2180 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2181 9fa3e853 bellard
{
2182 9fa3e853 bellard
    PageDesc *p;
2183 53a5960a pbrook
    target_ulong addr;
2184 9fa3e853 bellard
2185 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2186 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2187 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2188 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2189 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2190 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2191 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2192 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2193 17e2377a pbrook
           address space.  */
2194 17e2377a pbrook
        if (!p)
2195 17e2377a pbrook
            return;
2196 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2197 9fa3e853 bellard
           inside */
2198 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2199 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2200 9fa3e853 bellard
            p->first_tb) {
2201 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2202 9fa3e853 bellard
        }
2203 9fa3e853 bellard
        p->flags = flags;
2204 9fa3e853 bellard
    }
2205 33417e70 bellard
}
2206 33417e70 bellard
2207 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2208 3d97b40b ths
{
2209 3d97b40b ths
    PageDesc *p;
2210 3d97b40b ths
    target_ulong end;
2211 3d97b40b ths
    target_ulong addr;
2212 3d97b40b ths
2213 55f280c9 balrog
    if (start + len < start)
2214 55f280c9 balrog
        /* we've wrapped around */
2215 55f280c9 balrog
        return -1;
2216 55f280c9 balrog
2217 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2218 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2219 3d97b40b ths
2220 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2221 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2222 3d97b40b ths
        if( !p )
2223 3d97b40b ths
            return -1;
2224 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2225 3d97b40b ths
            return -1;
2226 3d97b40b ths
2227 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2228 3d97b40b ths
            return -1;
2229 dae3270c bellard
        if (flags & PAGE_WRITE) {
2230 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2231 dae3270c bellard
                return -1;
2232 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2233 dae3270c bellard
               contains translated code */
2234 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2235 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2236 dae3270c bellard
                    return -1;
2237 dae3270c bellard
            }
2238 dae3270c bellard
            return 0;
2239 dae3270c bellard
        }
2240 3d97b40b ths
    }
2241 3d97b40b ths
    return 0;
2242 3d97b40b ths
}
2243 3d97b40b ths
2244 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2245 9fa3e853 bellard
   page. Return TRUE if the fault was succesfully handled. */
2246 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2247 9fa3e853 bellard
{
2248 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2249 9fa3e853 bellard
    PageDesc *p, *p1;
2250 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2251 9fa3e853 bellard
2252 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2253 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2254 c8a706fe pbrook
       practice it seems to be ok.  */
2255 c8a706fe pbrook
    mmap_lock();
2256 c8a706fe pbrook
2257 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2258 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2259 9fa3e853 bellard
    p1 = page_find(page_index);
2260 c8a706fe pbrook
    if (!p1) {
2261 c8a706fe pbrook
        mmap_unlock();
2262 9fa3e853 bellard
        return 0;
2263 c8a706fe pbrook
    }
2264 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2265 9fa3e853 bellard
    p = p1;
2266 9fa3e853 bellard
    prot = 0;
2267 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2268 9fa3e853 bellard
        prot |= p->flags;
2269 9fa3e853 bellard
        p++;
2270 9fa3e853 bellard
    }
2271 9fa3e853 bellard
    /* if the page was really writable, then we change its
2272 9fa3e853 bellard
       protection back to writable */
2273 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2274 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2275 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2276 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2277 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2278 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2279 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2280 9fa3e853 bellard
               the corresponding translated code. */
2281 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2282 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2283 9fa3e853 bellard
            tb_invalidate_check(address);
2284 9fa3e853 bellard
#endif
2285 c8a706fe pbrook
            mmap_unlock();
2286 9fa3e853 bellard
            return 1;
2287 9fa3e853 bellard
        }
2288 9fa3e853 bellard
    }
2289 c8a706fe pbrook
    mmap_unlock();
2290 9fa3e853 bellard
    return 0;
2291 9fa3e853 bellard
}
2292 9fa3e853 bellard
2293 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2294 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2295 1ccde1cb bellard
{
2296 1ccde1cb bellard
}
2297 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2298 9fa3e853 bellard
2299 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2300 8da3ff18 pbrook
2301 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2302 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2303 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2304 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2305 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2306 db7b5426 blueswir1
                      need_subpage)                                     \
2307 db7b5426 blueswir1
    do {                                                                \
2308 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2309 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2310 db7b5426 blueswir1
        else {                                                          \
2311 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2312 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2313 db7b5426 blueswir1
                need_subpage = 1;                                       \
2314 db7b5426 blueswir1
        }                                                               \
2315 db7b5426 blueswir1
                                                                        \
2316 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2317 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2318 db7b5426 blueswir1
        else {                                                          \
2319 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2320 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2321 db7b5426 blueswir1
                need_subpage = 1;                                       \
2322 db7b5426 blueswir1
        }                                                               \
2323 db7b5426 blueswir1
    } while (0)
2324 db7b5426 blueswir1
2325 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2326 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2327 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2328 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2329 8da3ff18 pbrook
   start_region and regon_offset are rounded down to a page boundary
2330 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2331 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2332 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2333 8da3ff18 pbrook
                                         ram_addr_t size,
2334 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2335 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2336 33417e70 bellard
{
2337 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2338 92e873b9 bellard
    PhysPageDesc *p;
2339 9d42037b bellard
    CPUState *env;
2340 00f82b8a aurel32
    ram_addr_t orig_size = size;
2341 db7b5426 blueswir1
    void *subpage;
2342 33417e70 bellard
2343 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2344 da260249 bellard
    /* XXX: should not depend on cpu context */
2345 da260249 bellard
    env = first_cpu;
2346 da260249 bellard
    if (env->kqemu_enabled) {
2347 da260249 bellard
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2348 da260249 bellard
    }
2349 da260249 bellard
#endif
2350 7ba1e619 aliguori
    if (kvm_enabled())
2351 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2352 7ba1e619 aliguori
2353 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2354 67c4d23c pbrook
        region_offset = start_addr;
2355 67c4d23c pbrook
    }
2356 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2357 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2358 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2359 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2360 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2362 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2363 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2364 db7b5426 blueswir1
            int need_subpage = 0;
2365 db7b5426 blueswir1
2366 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2367 db7b5426 blueswir1
                          need_subpage);
2368 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2369 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2370 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2371 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2372 8da3ff18 pbrook
                                           p->region_offset);
2373 db7b5426 blueswir1
                } else {
2374 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2375 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2376 db7b5426 blueswir1
                }
2377 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2378 8da3ff18 pbrook
                                 region_offset);
2379 8da3ff18 pbrook
                p->region_offset = 0;
2380 db7b5426 blueswir1
            } else {
2381 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2382 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2383 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2384 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2385 db7b5426 blueswir1
            }
2386 db7b5426 blueswir1
        } else {
2387 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2388 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2389 8da3ff18 pbrook
            p->region_offset = region_offset;
2390 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2391 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2392 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2393 0e8f0967 pbrook
            } else {
2394 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2395 db7b5426 blueswir1
                int need_subpage = 0;
2396 db7b5426 blueswir1
2397 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2398 db7b5426 blueswir1
                              end_addr2, need_subpage);
2399 db7b5426 blueswir1
2400 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2401 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2402 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2403 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2404 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2405 8da3ff18 pbrook
                                     phys_offset, region_offset);
2406 8da3ff18 pbrook
                    p->region_offset = 0;
2407 db7b5426 blueswir1
                }
2408 db7b5426 blueswir1
            }
2409 db7b5426 blueswir1
        }
2410 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2411 33417e70 bellard
    }
2412 3b46e624 ths
2413 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2414 9d42037b bellard
       reset the modified entries */
2415 9d42037b bellard
    /* XXX: slow ! */
2416 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2417 9d42037b bellard
        tlb_flush(env, 1);
2418 9d42037b bellard
    }
2419 33417e70 bellard
}
2420 33417e70 bellard
2421 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2422 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2423 ba863458 bellard
{
2424 ba863458 bellard
    PhysPageDesc *p;
2425 ba863458 bellard
2426 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2427 ba863458 bellard
    if (!p)
2428 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2429 ba863458 bellard
    return p->phys_offset;
2430 ba863458 bellard
}
2431 ba863458 bellard
2432 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2433 f65ed4c1 aliguori
{
2434 f65ed4c1 aliguori
    if (kvm_enabled())
2435 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2436 f65ed4c1 aliguori
}
2437 f65ed4c1 aliguori
2438 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2439 f65ed4c1 aliguori
{
2440 f65ed4c1 aliguori
    if (kvm_enabled())
2441 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2442 f65ed4c1 aliguori
}
2443 f65ed4c1 aliguori
2444 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2445 e9a1ab19 bellard
/* XXX: better than nothing */
2446 94a6b54f pbrook
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2447 e9a1ab19 bellard
{
2448 e9a1ab19 bellard
    ram_addr_t addr;
2449 94a6b54f pbrook
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2450 012a7045 ths
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2451 94a6b54f pbrook
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2452 e9a1ab19 bellard
        abort();
2453 e9a1ab19 bellard
    }
2454 94a6b54f pbrook
    addr = last_ram_offset;
2455 94a6b54f pbrook
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2456 e9a1ab19 bellard
    return addr;
2457 e9a1ab19 bellard
}
2458 94a6b54f pbrook
#endif
2459 94a6b54f pbrook
2460 94a6b54f pbrook
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2461 94a6b54f pbrook
{
2462 94a6b54f pbrook
    RAMBlock *new_block;
2463 94a6b54f pbrook
2464 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2465 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2466 94a6b54f pbrook
        return kqemu_ram_alloc(size);
2467 94a6b54f pbrook
    }
2468 94a6b54f pbrook
#endif
2469 94a6b54f pbrook
2470 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2471 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2472 94a6b54f pbrook
2473 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2474 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2475 94a6b54f pbrook
    new_block->length = size;
2476 94a6b54f pbrook
2477 94a6b54f pbrook
    new_block->next = ram_blocks;
2478 94a6b54f pbrook
    ram_blocks = new_block;
2479 94a6b54f pbrook
2480 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2481 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2482 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2483 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2484 94a6b54f pbrook
2485 94a6b54f pbrook
    last_ram_offset += size;
2486 94a6b54f pbrook
2487 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2488 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2489 6f0437e8 Jan Kiszka
2490 94a6b54f pbrook
    return new_block->offset;
2491 94a6b54f pbrook
}
2492 e9a1ab19 bellard
2493 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2494 e9a1ab19 bellard
{
2495 94a6b54f pbrook
    /* TODO: implement this.  */
2496 e9a1ab19 bellard
}
2497 e9a1ab19 bellard
2498 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2499 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2500 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2501 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2502 5579c7f3 pbrook

2503 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2504 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2505 5579c7f3 pbrook
 */
2506 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2507 dc828ca1 pbrook
{
2508 94a6b54f pbrook
    RAMBlock *prev;
2509 94a6b54f pbrook
    RAMBlock **prevp;
2510 94a6b54f pbrook
    RAMBlock *block;
2511 94a6b54f pbrook
2512 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2513 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2514 94a6b54f pbrook
        return kqemu_phys_ram_base + addr;
2515 94a6b54f pbrook
    }
2516 94a6b54f pbrook
#endif
2517 94a6b54f pbrook
2518 94a6b54f pbrook
    prev = NULL;
2519 94a6b54f pbrook
    prevp = &ram_blocks;
2520 94a6b54f pbrook
    block = ram_blocks;
2521 94a6b54f pbrook
    while (block && (block->offset > addr
2522 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2523 94a6b54f pbrook
        if (prev)
2524 94a6b54f pbrook
          prevp = &prev->next;
2525 94a6b54f pbrook
        prev = block;
2526 94a6b54f pbrook
        block = block->next;
2527 94a6b54f pbrook
    }
2528 94a6b54f pbrook
    if (!block) {
2529 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2530 94a6b54f pbrook
        abort();
2531 94a6b54f pbrook
    }
2532 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2533 94a6b54f pbrook
    if (prev) {
2534 94a6b54f pbrook
        prev->next = block->next;
2535 94a6b54f pbrook
        block->next = *prevp;
2536 94a6b54f pbrook
        *prevp = block;
2537 94a6b54f pbrook
    }
2538 94a6b54f pbrook
    return block->host + (addr - block->offset);
2539 dc828ca1 pbrook
}
2540 dc828ca1 pbrook
2541 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2542 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2543 5579c7f3 pbrook
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2544 5579c7f3 pbrook
{
2545 94a6b54f pbrook
    RAMBlock *prev;
2546 94a6b54f pbrook
    RAMBlock **prevp;
2547 94a6b54f pbrook
    RAMBlock *block;
2548 94a6b54f pbrook
    uint8_t *host = ptr;
2549 94a6b54f pbrook
2550 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2551 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2552 94a6b54f pbrook
        return host - kqemu_phys_ram_base;
2553 94a6b54f pbrook
    }
2554 94a6b54f pbrook
#endif
2555 94a6b54f pbrook
2556 94a6b54f pbrook
    prev = NULL;
2557 94a6b54f pbrook
    prevp = &ram_blocks;
2558 94a6b54f pbrook
    block = ram_blocks;
2559 94a6b54f pbrook
    while (block && (block->host > host
2560 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2561 94a6b54f pbrook
        if (prev)
2562 94a6b54f pbrook
          prevp = &prev->next;
2563 94a6b54f pbrook
        prev = block;
2564 94a6b54f pbrook
        block = block->next;
2565 94a6b54f pbrook
    }
2566 94a6b54f pbrook
    if (!block) {
2567 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2568 94a6b54f pbrook
        abort();
2569 94a6b54f pbrook
    }
2570 94a6b54f pbrook
    return block->offset + (host - block->host);
2571 5579c7f3 pbrook
}
2572 5579c7f3 pbrook
2573 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2574 33417e70 bellard
{
2575 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2576 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2577 67d3b957 pbrook
#endif
2578 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2579 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2580 e18231a3 blueswir1
#endif
2581 e18231a3 blueswir1
    return 0;
2582 e18231a3 blueswir1
}
2583 e18231a3 blueswir1
2584 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2585 e18231a3 blueswir1
{
2586 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2587 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2588 e18231a3 blueswir1
#endif
2589 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2590 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2591 e18231a3 blueswir1
#endif
2592 e18231a3 blueswir1
    return 0;
2593 e18231a3 blueswir1
}
2594 e18231a3 blueswir1
2595 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2596 e18231a3 blueswir1
{
2597 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2598 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2599 e18231a3 blueswir1
#endif
2600 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2601 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2602 b4f0a316 blueswir1
#endif
2603 33417e70 bellard
    return 0;
2604 33417e70 bellard
}
2605 33417e70 bellard
2606 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2607 33417e70 bellard
{
2608 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2609 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2610 67d3b957 pbrook
#endif
2611 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2612 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2613 e18231a3 blueswir1
#endif
2614 e18231a3 blueswir1
}
2615 e18231a3 blueswir1
2616 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2617 e18231a3 blueswir1
{
2618 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2619 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2620 e18231a3 blueswir1
#endif
2621 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2622 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2623 e18231a3 blueswir1
#endif
2624 e18231a3 blueswir1
}
2625 e18231a3 blueswir1
2626 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2627 e18231a3 blueswir1
{
2628 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2629 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2630 e18231a3 blueswir1
#endif
2631 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2632 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2633 b4f0a316 blueswir1
#endif
2634 33417e70 bellard
}
2635 33417e70 bellard
2636 33417e70 bellard
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2637 33417e70 bellard
    unassigned_mem_readb,
2638 e18231a3 blueswir1
    unassigned_mem_readw,
2639 e18231a3 blueswir1
    unassigned_mem_readl,
2640 33417e70 bellard
};
2641 33417e70 bellard
2642 33417e70 bellard
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2643 33417e70 bellard
    unassigned_mem_writeb,
2644 e18231a3 blueswir1
    unassigned_mem_writew,
2645 e18231a3 blueswir1
    unassigned_mem_writel,
2646 33417e70 bellard
};
2647 33417e70 bellard
2648 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2649 0f459d16 pbrook
                                uint32_t val)
2650 9fa3e853 bellard
{
2651 3a7d929e bellard
    int dirty_flags;
2652 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2653 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2654 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2655 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2656 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2657 9fa3e853 bellard
#endif
2658 3a7d929e bellard
    }
2659 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2660 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2661 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2662 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2663 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2664 f32fc648 bellard
#endif
2665 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2666 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2667 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2668 f23db169 bellard
       flushed */
2669 f23db169 bellard
    if (dirty_flags == 0xff)
2670 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2671 9fa3e853 bellard
}
2672 9fa3e853 bellard
2673 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2674 0f459d16 pbrook
                                uint32_t val)
2675 9fa3e853 bellard
{
2676 3a7d929e bellard
    int dirty_flags;
2677 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2678 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2679 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2680 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2681 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2682 9fa3e853 bellard
#endif
2683 3a7d929e bellard
    }
2684 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2685 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2686 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2687 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2688 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2689 f32fc648 bellard
#endif
2690 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2691 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2692 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2693 f23db169 bellard
       flushed */
2694 f23db169 bellard
    if (dirty_flags == 0xff)
2695 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2696 9fa3e853 bellard
}
2697 9fa3e853 bellard
2698 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2699 0f459d16 pbrook
                                uint32_t val)
2700 9fa3e853 bellard
{
2701 3a7d929e bellard
    int dirty_flags;
2702 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2703 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2704 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2705 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2706 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2707 9fa3e853 bellard
#endif
2708 3a7d929e bellard
    }
2709 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2710 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2711 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2712 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2713 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2714 f32fc648 bellard
#endif
2715 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2716 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2717 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2718 f23db169 bellard
       flushed */
2719 f23db169 bellard
    if (dirty_flags == 0xff)
2720 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2721 9fa3e853 bellard
}
2722 9fa3e853 bellard
2723 3a7d929e bellard
static CPUReadMemoryFunc *error_mem_read[3] = {
2724 9fa3e853 bellard
    NULL, /* never used */
2725 9fa3e853 bellard
    NULL, /* never used */
2726 9fa3e853 bellard
    NULL, /* never used */
2727 9fa3e853 bellard
};
2728 9fa3e853 bellard
2729 1ccde1cb bellard
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2730 1ccde1cb bellard
    notdirty_mem_writeb,
2731 1ccde1cb bellard
    notdirty_mem_writew,
2732 1ccde1cb bellard
    notdirty_mem_writel,
2733 1ccde1cb bellard
};
2734 1ccde1cb bellard
2735 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2736 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2737 0f459d16 pbrook
{
2738 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2739 06d55cc1 aliguori
    target_ulong pc, cs_base;
2740 06d55cc1 aliguori
    TranslationBlock *tb;
2741 0f459d16 pbrook
    target_ulong vaddr;
2742 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2743 06d55cc1 aliguori
    int cpu_flags;
2744 0f459d16 pbrook
2745 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2746 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2747 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2748 06d55cc1 aliguori
         * current instruction. */
2749 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2750 06d55cc1 aliguori
        return;
2751 06d55cc1 aliguori
    }
2752 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2753 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2754 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2755 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2756 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2757 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2758 6e140f28 aliguori
                env->watchpoint_hit = wp;
2759 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2760 6e140f28 aliguori
                if (!tb) {
2761 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2762 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2763 6e140f28 aliguori
                }
2764 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2765 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2766 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2767 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2768 6e140f28 aliguori
                } else {
2769 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2770 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2771 6e140f28 aliguori
                }
2772 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2773 06d55cc1 aliguori
            }
2774 6e140f28 aliguori
        } else {
2775 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2776 0f459d16 pbrook
        }
2777 0f459d16 pbrook
    }
2778 0f459d16 pbrook
}
2779 0f459d16 pbrook
2780 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2781 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2782 6658ffb8 pbrook
   phys routines.  */
2783 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2784 6658ffb8 pbrook
{
2785 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2786 6658ffb8 pbrook
    return ldub_phys(addr);
2787 6658ffb8 pbrook
}
2788 6658ffb8 pbrook
2789 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2790 6658ffb8 pbrook
{
2791 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2792 6658ffb8 pbrook
    return lduw_phys(addr);
2793 6658ffb8 pbrook
}
2794 6658ffb8 pbrook
2795 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2796 6658ffb8 pbrook
{
2797 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2798 6658ffb8 pbrook
    return ldl_phys(addr);
2799 6658ffb8 pbrook
}
2800 6658ffb8 pbrook
2801 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2802 6658ffb8 pbrook
                             uint32_t val)
2803 6658ffb8 pbrook
{
2804 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2805 6658ffb8 pbrook
    stb_phys(addr, val);
2806 6658ffb8 pbrook
}
2807 6658ffb8 pbrook
2808 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2809 6658ffb8 pbrook
                             uint32_t val)
2810 6658ffb8 pbrook
{
2811 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2812 6658ffb8 pbrook
    stw_phys(addr, val);
2813 6658ffb8 pbrook
}
2814 6658ffb8 pbrook
2815 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2816 6658ffb8 pbrook
                             uint32_t val)
2817 6658ffb8 pbrook
{
2818 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2819 6658ffb8 pbrook
    stl_phys(addr, val);
2820 6658ffb8 pbrook
}
2821 6658ffb8 pbrook
2822 6658ffb8 pbrook
static CPUReadMemoryFunc *watch_mem_read[3] = {
2823 6658ffb8 pbrook
    watch_mem_readb,
2824 6658ffb8 pbrook
    watch_mem_readw,
2825 6658ffb8 pbrook
    watch_mem_readl,
2826 6658ffb8 pbrook
};
2827 6658ffb8 pbrook
2828 6658ffb8 pbrook
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2829 6658ffb8 pbrook
    watch_mem_writeb,
2830 6658ffb8 pbrook
    watch_mem_writew,
2831 6658ffb8 pbrook
    watch_mem_writel,
2832 6658ffb8 pbrook
};
2833 6658ffb8 pbrook
2834 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2835 db7b5426 blueswir1
                                 unsigned int len)
2836 db7b5426 blueswir1
{
2837 db7b5426 blueswir1
    uint32_t ret;
2838 db7b5426 blueswir1
    unsigned int idx;
2839 db7b5426 blueswir1
2840 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2841 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2842 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2843 db7b5426 blueswir1
           mmio, len, addr, idx);
2844 db7b5426 blueswir1
#endif
2845 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2846 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2847 db7b5426 blueswir1
2848 db7b5426 blueswir1
    return ret;
2849 db7b5426 blueswir1
}
2850 db7b5426 blueswir1
2851 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2852 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2853 db7b5426 blueswir1
{
2854 db7b5426 blueswir1
    unsigned int idx;
2855 db7b5426 blueswir1
2856 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2857 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2858 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2859 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2860 db7b5426 blueswir1
#endif
2861 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2862 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2863 8da3ff18 pbrook
                                  value);
2864 db7b5426 blueswir1
}
2865 db7b5426 blueswir1
2866 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2867 db7b5426 blueswir1
{
2868 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2869 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2870 db7b5426 blueswir1
#endif
2871 db7b5426 blueswir1
2872 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2873 db7b5426 blueswir1
}
2874 db7b5426 blueswir1
2875 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2876 db7b5426 blueswir1
                            uint32_t value)
2877 db7b5426 blueswir1
{
2878 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2879 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2880 db7b5426 blueswir1
#endif
2881 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2882 db7b5426 blueswir1
}
2883 db7b5426 blueswir1
2884 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2885 db7b5426 blueswir1
{
2886 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2887 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2888 db7b5426 blueswir1
#endif
2889 db7b5426 blueswir1
2890 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2891 db7b5426 blueswir1
}
2892 db7b5426 blueswir1
2893 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2894 db7b5426 blueswir1
                            uint32_t value)
2895 db7b5426 blueswir1
{
2896 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2897 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2898 db7b5426 blueswir1
#endif
2899 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2900 db7b5426 blueswir1
}
2901 db7b5426 blueswir1
2902 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2903 db7b5426 blueswir1
{
2904 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2905 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2906 db7b5426 blueswir1
#endif
2907 db7b5426 blueswir1
2908 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2909 db7b5426 blueswir1
}
2910 db7b5426 blueswir1
2911 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2912 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2913 db7b5426 blueswir1
{
2914 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2915 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2916 db7b5426 blueswir1
#endif
2917 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2918 db7b5426 blueswir1
}
2919 db7b5426 blueswir1
2920 db7b5426 blueswir1
static CPUReadMemoryFunc *subpage_read[] = {
2921 db7b5426 blueswir1
    &subpage_readb,
2922 db7b5426 blueswir1
    &subpage_readw,
2923 db7b5426 blueswir1
    &subpage_readl,
2924 db7b5426 blueswir1
};
2925 db7b5426 blueswir1
2926 db7b5426 blueswir1
static CPUWriteMemoryFunc *subpage_write[] = {
2927 db7b5426 blueswir1
    &subpage_writeb,
2928 db7b5426 blueswir1
    &subpage_writew,
2929 db7b5426 blueswir1
    &subpage_writel,
2930 db7b5426 blueswir1
};
2931 db7b5426 blueswir1
2932 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2933 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2934 db7b5426 blueswir1
{
2935 db7b5426 blueswir1
    int idx, eidx;
2936 4254fab8 blueswir1
    unsigned int i;
2937 db7b5426 blueswir1
2938 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2939 db7b5426 blueswir1
        return -1;
2940 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2941 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2942 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2943 db7b5426 blueswir1
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2944 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2945 db7b5426 blueswir1
#endif
2946 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2947 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2948 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2949 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2950 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2951 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2952 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2953 3ee89922 blueswir1
            }
2954 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2955 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2956 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2957 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2958 3ee89922 blueswir1
            }
2959 4254fab8 blueswir1
        }
2960 db7b5426 blueswir1
    }
2961 db7b5426 blueswir1
2962 db7b5426 blueswir1
    return 0;
2963 db7b5426 blueswir1
}
2964 db7b5426 blueswir1
2965 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2966 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2967 db7b5426 blueswir1
{
2968 db7b5426 blueswir1
    subpage_t *mmio;
2969 db7b5426 blueswir1
    int subpage_memory;
2970 db7b5426 blueswir1
2971 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2972 1eec614b aliguori
2973 1eec614b aliguori
    mmio->base = base;
2974 1eec614b aliguori
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2975 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2976 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2977 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2978 db7b5426 blueswir1
#endif
2979 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2980 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2981 8da3ff18 pbrook
                         region_offset);
2982 db7b5426 blueswir1
2983 db7b5426 blueswir1
    return mmio;
2984 db7b5426 blueswir1
}
2985 db7b5426 blueswir1
2986 88715657 aliguori
static int get_free_io_mem_idx(void)
2987 88715657 aliguori
{
2988 88715657 aliguori
    int i;
2989 88715657 aliguori
2990 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2991 88715657 aliguori
        if (!io_mem_used[i]) {
2992 88715657 aliguori
            io_mem_used[i] = 1;
2993 88715657 aliguori
            return i;
2994 88715657 aliguori
        }
2995 88715657 aliguori
2996 88715657 aliguori
    return -1;
2997 88715657 aliguori
}
2998 88715657 aliguori
2999 33417e70 bellard
static void io_mem_init(void)
3000 33417e70 bellard
{
3001 88715657 aliguori
    int i;
3002 88715657 aliguori
3003 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3004 a4193c8a bellard
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3005 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3006 88715657 aliguori
    for (i=0; i<5; i++)
3007 88715657 aliguori
        io_mem_used[i] = 1;
3008 1ccde1cb bellard
3009 0f459d16 pbrook
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3010 6658ffb8 pbrook
                                          watch_mem_write, NULL);
3011 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
3012 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
3013 94a6b54f pbrook
        /* alloc dirty bits array */
3014 94a6b54f pbrook
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3015 94a6b54f pbrook
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3016 94a6b54f pbrook
    }
3017 94a6b54f pbrook
#endif
3018 33417e70 bellard
}
3019 33417e70 bellard
3020 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3021 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3022 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3023 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3024 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3025 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3026 4254fab8 blueswir1
   returned if error. */
3027 33417e70 bellard
int cpu_register_io_memory(int io_index,
3028 33417e70 bellard
                           CPUReadMemoryFunc **mem_read,
3029 a4193c8a bellard
                           CPUWriteMemoryFunc **mem_write,
3030 a4193c8a bellard
                           void *opaque)
3031 33417e70 bellard
{
3032 4254fab8 blueswir1
    int i, subwidth = 0;
3033 33417e70 bellard
3034 33417e70 bellard
    if (io_index <= 0) {
3035 88715657 aliguori
        io_index = get_free_io_mem_idx();
3036 88715657 aliguori
        if (io_index == -1)
3037 88715657 aliguori
            return io_index;
3038 33417e70 bellard
    } else {
3039 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3040 33417e70 bellard
            return -1;
3041 33417e70 bellard
    }
3042 b5ff1b31 bellard
3043 33417e70 bellard
    for(i = 0;i < 3; i++) {
3044 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3045 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3046 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3047 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3048 33417e70 bellard
    }
3049 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3050 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3051 33417e70 bellard
}
3052 61382a50 bellard
3053 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3054 88715657 aliguori
{
3055 88715657 aliguori
    int i;
3056 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3057 88715657 aliguori
3058 88715657 aliguori
    for (i=0;i < 3; i++) {
3059 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3060 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3061 88715657 aliguori
    }
3062 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3063 88715657 aliguori
    io_mem_used[io_index] = 0;
3064 88715657 aliguori
}
3065 88715657 aliguori
3066 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3067 e2eef170 pbrook
3068 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3069 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3070 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3071 13eb76e0 bellard
                            int len, int is_write)
3072 13eb76e0 bellard
{
3073 13eb76e0 bellard
    int l, flags;
3074 13eb76e0 bellard
    target_ulong page;
3075 53a5960a pbrook
    void * p;
3076 13eb76e0 bellard
3077 13eb76e0 bellard
    while (len > 0) {
3078 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3079 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3080 13eb76e0 bellard
        if (l > len)
3081 13eb76e0 bellard
            l = len;
3082 13eb76e0 bellard
        flags = page_get_flags(page);
3083 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3084 13eb76e0 bellard
            return;
3085 13eb76e0 bellard
        if (is_write) {
3086 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3087 13eb76e0 bellard
                return;
3088 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3089 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3090 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3091 579a97f7 bellard
                return;
3092 72fb7daa aurel32
            memcpy(p, buf, l);
3093 72fb7daa aurel32
            unlock_user(p, addr, l);
3094 13eb76e0 bellard
        } else {
3095 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3096 13eb76e0 bellard
                return;
3097 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3098 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3099 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3100 579a97f7 bellard
                return;
3101 72fb7daa aurel32
            memcpy(buf, p, l);
3102 5b257578 aurel32
            unlock_user(p, addr, 0);
3103 13eb76e0 bellard
        }
3104 13eb76e0 bellard
        len -= l;
3105 13eb76e0 bellard
        buf += l;
3106 13eb76e0 bellard
        addr += l;
3107 13eb76e0 bellard
    }
3108 13eb76e0 bellard
}
3109 8df1cd07 bellard
3110 13eb76e0 bellard
#else
3111 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3112 13eb76e0 bellard
                            int len, int is_write)
3113 13eb76e0 bellard
{
3114 13eb76e0 bellard
    int l, io_index;
3115 13eb76e0 bellard
    uint8_t *ptr;
3116 13eb76e0 bellard
    uint32_t val;
3117 2e12669a bellard
    target_phys_addr_t page;
3118 2e12669a bellard
    unsigned long pd;
3119 92e873b9 bellard
    PhysPageDesc *p;
3120 3b46e624 ths
3121 13eb76e0 bellard
    while (len > 0) {
3122 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3123 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3124 13eb76e0 bellard
        if (l > len)
3125 13eb76e0 bellard
            l = len;
3126 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3127 13eb76e0 bellard
        if (!p) {
3128 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3129 13eb76e0 bellard
        } else {
3130 13eb76e0 bellard
            pd = p->phys_offset;
3131 13eb76e0 bellard
        }
3132 3b46e624 ths
3133 13eb76e0 bellard
        if (is_write) {
3134 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3135 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3136 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3137 8da3ff18 pbrook
                if (p)
3138 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3139 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3140 6a00d601 bellard
                   potential bugs */
3141 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3142 1c213d19 bellard
                    /* 32 bit write access */
3143 c27004ec bellard
                    val = ldl_p(buf);
3144 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3145 13eb76e0 bellard
                    l = 4;
3146 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3147 1c213d19 bellard
                    /* 16 bit write access */
3148 c27004ec bellard
                    val = lduw_p(buf);
3149 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3150 13eb76e0 bellard
                    l = 2;
3151 13eb76e0 bellard
                } else {
3152 1c213d19 bellard
                    /* 8 bit write access */
3153 c27004ec bellard
                    val = ldub_p(buf);
3154 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3155 13eb76e0 bellard
                    l = 1;
3156 13eb76e0 bellard
                }
3157 13eb76e0 bellard
            } else {
3158 b448f2f3 bellard
                unsigned long addr1;
3159 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3160 13eb76e0 bellard
                /* RAM case */
3161 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3162 13eb76e0 bellard
                memcpy(ptr, buf, l);
3163 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3164 3a7d929e bellard
                    /* invalidate code */
3165 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3166 3a7d929e bellard
                    /* set dirty bit */
3167 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3168 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3169 3a7d929e bellard
                }
3170 13eb76e0 bellard
            }
3171 13eb76e0 bellard
        } else {
3172 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3173 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3174 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3175 13eb76e0 bellard
                /* I/O case */
3176 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3177 8da3ff18 pbrook
                if (p)
3178 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3179 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3180 13eb76e0 bellard
                    /* 32 bit read access */
3181 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3182 c27004ec bellard
                    stl_p(buf, val);
3183 13eb76e0 bellard
                    l = 4;
3184 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3185 13eb76e0 bellard
                    /* 16 bit read access */
3186 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3187 c27004ec bellard
                    stw_p(buf, val);
3188 13eb76e0 bellard
                    l = 2;
3189 13eb76e0 bellard
                } else {
3190 1c213d19 bellard
                    /* 8 bit read access */
3191 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3192 c27004ec bellard
                    stb_p(buf, val);
3193 13eb76e0 bellard
                    l = 1;
3194 13eb76e0 bellard
                }
3195 13eb76e0 bellard
            } else {
3196 13eb76e0 bellard
                /* RAM case */
3197 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3198 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3199 13eb76e0 bellard
                memcpy(buf, ptr, l);
3200 13eb76e0 bellard
            }
3201 13eb76e0 bellard
        }
3202 13eb76e0 bellard
        len -= l;
3203 13eb76e0 bellard
        buf += l;
3204 13eb76e0 bellard
        addr += l;
3205 13eb76e0 bellard
    }
3206 13eb76e0 bellard
}
3207 8df1cd07 bellard
3208 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3209 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3210 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3211 d0ecd2aa bellard
{
3212 d0ecd2aa bellard
    int l;
3213 d0ecd2aa bellard
    uint8_t *ptr;
3214 d0ecd2aa bellard
    target_phys_addr_t page;
3215 d0ecd2aa bellard
    unsigned long pd;
3216 d0ecd2aa bellard
    PhysPageDesc *p;
3217 3b46e624 ths
3218 d0ecd2aa bellard
    while (len > 0) {
3219 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3220 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3221 d0ecd2aa bellard
        if (l > len)
3222 d0ecd2aa bellard
            l = len;
3223 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3224 d0ecd2aa bellard
        if (!p) {
3225 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3226 d0ecd2aa bellard
        } else {
3227 d0ecd2aa bellard
            pd = p->phys_offset;
3228 d0ecd2aa bellard
        }
3229 3b46e624 ths
3230 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3231 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3232 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3233 d0ecd2aa bellard
            /* do nothing */
3234 d0ecd2aa bellard
        } else {
3235 d0ecd2aa bellard
            unsigned long addr1;
3236 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3237 d0ecd2aa bellard
            /* ROM/RAM case */
3238 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3239 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3240 d0ecd2aa bellard
        }
3241 d0ecd2aa bellard
        len -= l;
3242 d0ecd2aa bellard
        buf += l;
3243 d0ecd2aa bellard
        addr += l;
3244 d0ecd2aa bellard
    }
3245 d0ecd2aa bellard
}
3246 d0ecd2aa bellard
3247 6d16c2f8 aliguori
typedef struct {
3248 6d16c2f8 aliguori
    void *buffer;
3249 6d16c2f8 aliguori
    target_phys_addr_t addr;
3250 6d16c2f8 aliguori
    target_phys_addr_t len;
3251 6d16c2f8 aliguori
} BounceBuffer;
3252 6d16c2f8 aliguori
3253 6d16c2f8 aliguori
static BounceBuffer bounce;
3254 6d16c2f8 aliguori
3255 ba223c29 aliguori
typedef struct MapClient {
3256 ba223c29 aliguori
    void *opaque;
3257 ba223c29 aliguori
    void (*callback)(void *opaque);
3258 ba223c29 aliguori
    LIST_ENTRY(MapClient) link;
3259 ba223c29 aliguori
} MapClient;
3260 ba223c29 aliguori
3261 ba223c29 aliguori
static LIST_HEAD(map_client_list, MapClient) map_client_list
3262 ba223c29 aliguori
    = LIST_HEAD_INITIALIZER(map_client_list);
3263 ba223c29 aliguori
3264 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3265 ba223c29 aliguori
{
3266 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3267 ba223c29 aliguori
3268 ba223c29 aliguori
    client->opaque = opaque;
3269 ba223c29 aliguori
    client->callback = callback;
3270 ba223c29 aliguori
    LIST_INSERT_HEAD(&map_client_list, client, link);
3271 ba223c29 aliguori
    return client;
3272 ba223c29 aliguori
}
3273 ba223c29 aliguori
3274 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3275 ba223c29 aliguori
{
3276 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3277 ba223c29 aliguori
3278 ba223c29 aliguori
    LIST_REMOVE(client, link);
3279 ba223c29 aliguori
}
3280 ba223c29 aliguori
3281 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3282 ba223c29 aliguori
{
3283 ba223c29 aliguori
    MapClient *client;
3284 ba223c29 aliguori
3285 ba223c29 aliguori
    while (!LIST_EMPTY(&map_client_list)) {
3286 ba223c29 aliguori
        client = LIST_FIRST(&map_client_list);
3287 ba223c29 aliguori
        client->callback(client->opaque);
3288 ba223c29 aliguori
        LIST_REMOVE(client, link);
3289 ba223c29 aliguori
    }
3290 ba223c29 aliguori
}
3291 ba223c29 aliguori
3292 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3293 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3294 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3295 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3296 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3297 ba223c29 aliguori
 * likely to succeed.
3298 6d16c2f8 aliguori
 */
3299 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3300 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3301 6d16c2f8 aliguori
                              int is_write)
3302 6d16c2f8 aliguori
{
3303 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3304 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3305 6d16c2f8 aliguori
    int l;
3306 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3307 6d16c2f8 aliguori
    uint8_t *ptr;
3308 6d16c2f8 aliguori
    target_phys_addr_t page;
3309 6d16c2f8 aliguori
    unsigned long pd;
3310 6d16c2f8 aliguori
    PhysPageDesc *p;
3311 6d16c2f8 aliguori
    unsigned long addr1;
3312 6d16c2f8 aliguori
3313 6d16c2f8 aliguori
    while (len > 0) {
3314 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3315 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3316 6d16c2f8 aliguori
        if (l > len)
3317 6d16c2f8 aliguori
            l = len;
3318 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3319 6d16c2f8 aliguori
        if (!p) {
3320 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3321 6d16c2f8 aliguori
        } else {
3322 6d16c2f8 aliguori
            pd = p->phys_offset;
3323 6d16c2f8 aliguori
        }
3324 6d16c2f8 aliguori
3325 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3326 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3327 6d16c2f8 aliguori
                break;
3328 6d16c2f8 aliguori
            }
3329 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3330 6d16c2f8 aliguori
            bounce.addr = addr;
3331 6d16c2f8 aliguori
            bounce.len = l;
3332 6d16c2f8 aliguori
            if (!is_write) {
3333 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3334 6d16c2f8 aliguori
            }
3335 6d16c2f8 aliguori
            ptr = bounce.buffer;
3336 6d16c2f8 aliguori
        } else {
3337 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3338 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3339 6d16c2f8 aliguori
        }
3340 6d16c2f8 aliguori
        if (!done) {
3341 6d16c2f8 aliguori
            ret = ptr;
3342 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3343 6d16c2f8 aliguori
            break;
3344 6d16c2f8 aliguori
        }
3345 6d16c2f8 aliguori
3346 6d16c2f8 aliguori
        len -= l;
3347 6d16c2f8 aliguori
        addr += l;
3348 6d16c2f8 aliguori
        done += l;
3349 6d16c2f8 aliguori
    }
3350 6d16c2f8 aliguori
    *plen = done;
3351 6d16c2f8 aliguori
    return ret;
3352 6d16c2f8 aliguori
}
3353 6d16c2f8 aliguori
3354 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3355 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3356 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3357 6d16c2f8 aliguori
 */
3358 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3359 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3360 6d16c2f8 aliguori
{
3361 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3362 6d16c2f8 aliguori
        if (is_write) {
3363 5579c7f3 pbrook
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3364 6d16c2f8 aliguori
            while (access_len) {
3365 6d16c2f8 aliguori
                unsigned l;
3366 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3367 6d16c2f8 aliguori
                if (l > access_len)
3368 6d16c2f8 aliguori
                    l = access_len;
3369 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3370 6d16c2f8 aliguori
                    /* invalidate code */
3371 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3372 6d16c2f8 aliguori
                    /* set dirty bit */
3373 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3374 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3375 6d16c2f8 aliguori
                }
3376 6d16c2f8 aliguori
                addr1 += l;
3377 6d16c2f8 aliguori
                access_len -= l;
3378 6d16c2f8 aliguori
            }
3379 6d16c2f8 aliguori
        }
3380 6d16c2f8 aliguori
        return;
3381 6d16c2f8 aliguori
    }
3382 6d16c2f8 aliguori
    if (is_write) {
3383 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3384 6d16c2f8 aliguori
    }
3385 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3386 6d16c2f8 aliguori
    bounce.buffer = NULL;
3387 ba223c29 aliguori
    cpu_notify_map_clients();
3388 6d16c2f8 aliguori
}
3389 d0ecd2aa bellard
3390 8df1cd07 bellard
/* warning: addr must be aligned */
3391 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3392 8df1cd07 bellard
{
3393 8df1cd07 bellard
    int io_index;
3394 8df1cd07 bellard
    uint8_t *ptr;
3395 8df1cd07 bellard
    uint32_t val;
3396 8df1cd07 bellard
    unsigned long pd;
3397 8df1cd07 bellard
    PhysPageDesc *p;
3398 8df1cd07 bellard
3399 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3400 8df1cd07 bellard
    if (!p) {
3401 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3402 8df1cd07 bellard
    } else {
3403 8df1cd07 bellard
        pd = p->phys_offset;
3404 8df1cd07 bellard
    }
3405 3b46e624 ths
3406 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3407 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3408 8df1cd07 bellard
        /* I/O case */
3409 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3410 8da3ff18 pbrook
        if (p)
3411 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3412 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3413 8df1cd07 bellard
    } else {
3414 8df1cd07 bellard
        /* RAM case */
3415 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3416 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3417 8df1cd07 bellard
        val = ldl_p(ptr);
3418 8df1cd07 bellard
    }
3419 8df1cd07 bellard
    return val;
3420 8df1cd07 bellard
}
3421 8df1cd07 bellard
3422 84b7b8e7 bellard
/* warning: addr must be aligned */
3423 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3424 84b7b8e7 bellard
{
3425 84b7b8e7 bellard
    int io_index;
3426 84b7b8e7 bellard
    uint8_t *ptr;
3427 84b7b8e7 bellard
    uint64_t val;
3428 84b7b8e7 bellard
    unsigned long pd;
3429 84b7b8e7 bellard
    PhysPageDesc *p;
3430 84b7b8e7 bellard
3431 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3432 84b7b8e7 bellard
    if (!p) {
3433 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3434 84b7b8e7 bellard
    } else {
3435 84b7b8e7 bellard
        pd = p->phys_offset;
3436 84b7b8e7 bellard
    }
3437 3b46e624 ths
3438 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3439 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3440 84b7b8e7 bellard
        /* I/O case */
3441 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3442 8da3ff18 pbrook
        if (p)
3443 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3444 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3445 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3446 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3447 84b7b8e7 bellard
#else
3448 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3449 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3450 84b7b8e7 bellard
#endif
3451 84b7b8e7 bellard
    } else {
3452 84b7b8e7 bellard
        /* RAM case */
3453 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3454 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3455 84b7b8e7 bellard
        val = ldq_p(ptr);
3456 84b7b8e7 bellard
    }
3457 84b7b8e7 bellard
    return val;
3458 84b7b8e7 bellard
}
3459 84b7b8e7 bellard
3460 aab33094 bellard
/* XXX: optimize */
3461 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3462 aab33094 bellard
{
3463 aab33094 bellard
    uint8_t val;
3464 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3465 aab33094 bellard
    return val;
3466 aab33094 bellard
}
3467 aab33094 bellard
3468 aab33094 bellard
/* XXX: optimize */
3469 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3470 aab33094 bellard
{
3471 aab33094 bellard
    uint16_t val;
3472 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3473 aab33094 bellard
    return tswap16(val);
3474 aab33094 bellard
}
3475 aab33094 bellard
3476 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3477 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3478 8df1cd07 bellard
   bits are used to track modified PTEs */
3479 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3480 8df1cd07 bellard
{
3481 8df1cd07 bellard
    int io_index;
3482 8df1cd07 bellard
    uint8_t *ptr;
3483 8df1cd07 bellard
    unsigned long pd;
3484 8df1cd07 bellard
    PhysPageDesc *p;
3485 8df1cd07 bellard
3486 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3487 8df1cd07 bellard
    if (!p) {
3488 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3489 8df1cd07 bellard
    } else {
3490 8df1cd07 bellard
        pd = p->phys_offset;
3491 8df1cd07 bellard
    }
3492 3b46e624 ths
3493 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3494 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3495 8da3ff18 pbrook
        if (p)
3496 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3497 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3498 8df1cd07 bellard
    } else {
3499 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3500 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3501 8df1cd07 bellard
        stl_p(ptr, val);
3502 74576198 aliguori
3503 74576198 aliguori
        if (unlikely(in_migration)) {
3504 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3505 74576198 aliguori
                /* invalidate code */
3506 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3507 74576198 aliguori
                /* set dirty bit */
3508 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3509 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3510 74576198 aliguori
            }
3511 74576198 aliguori
        }
3512 8df1cd07 bellard
    }
3513 8df1cd07 bellard
}
3514 8df1cd07 bellard
3515 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3516 bc98a7ef j_mayer
{
3517 bc98a7ef j_mayer
    int io_index;
3518 bc98a7ef j_mayer
    uint8_t *ptr;
3519 bc98a7ef j_mayer
    unsigned long pd;
3520 bc98a7ef j_mayer
    PhysPageDesc *p;
3521 bc98a7ef j_mayer
3522 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3523 bc98a7ef j_mayer
    if (!p) {
3524 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3525 bc98a7ef j_mayer
    } else {
3526 bc98a7ef j_mayer
        pd = p->phys_offset;
3527 bc98a7ef j_mayer
    }
3528 3b46e624 ths
3529 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3530 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3531 8da3ff18 pbrook
        if (p)
3532 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3533 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3534 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3535 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3536 bc98a7ef j_mayer
#else
3537 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3538 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3539 bc98a7ef j_mayer
#endif
3540 bc98a7ef j_mayer
    } else {
3541 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3542 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3543 bc98a7ef j_mayer
        stq_p(ptr, val);
3544 bc98a7ef j_mayer
    }
3545 bc98a7ef j_mayer
}
3546 bc98a7ef j_mayer
3547 8df1cd07 bellard
/* warning: addr must be aligned */
3548 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3549 8df1cd07 bellard
{
3550 8df1cd07 bellard
    int io_index;
3551 8df1cd07 bellard
    uint8_t *ptr;
3552 8df1cd07 bellard
    unsigned long pd;
3553 8df1cd07 bellard
    PhysPageDesc *p;
3554 8df1cd07 bellard
3555 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3556 8df1cd07 bellard
    if (!p) {
3557 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3558 8df1cd07 bellard
    } else {
3559 8df1cd07 bellard
        pd = p->phys_offset;
3560 8df1cd07 bellard
    }
3561 3b46e624 ths
3562 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3563 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3564 8da3ff18 pbrook
        if (p)
3565 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3566 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3567 8df1cd07 bellard
    } else {
3568 8df1cd07 bellard
        unsigned long addr1;
3569 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3570 8df1cd07 bellard
        /* RAM case */
3571 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3572 8df1cd07 bellard
        stl_p(ptr, val);
3573 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3574 3a7d929e bellard
            /* invalidate code */
3575 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3576 3a7d929e bellard
            /* set dirty bit */
3577 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3578 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3579 3a7d929e bellard
        }
3580 8df1cd07 bellard
    }
3581 8df1cd07 bellard
}
3582 8df1cd07 bellard
3583 aab33094 bellard
/* XXX: optimize */
3584 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3585 aab33094 bellard
{
3586 aab33094 bellard
    uint8_t v = val;
3587 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3588 aab33094 bellard
}
3589 aab33094 bellard
3590 aab33094 bellard
/* XXX: optimize */
3591 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3592 aab33094 bellard
{
3593 aab33094 bellard
    uint16_t v = tswap16(val);
3594 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3595 aab33094 bellard
}
3596 aab33094 bellard
3597 aab33094 bellard
/* XXX: optimize */
3598 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3599 aab33094 bellard
{
3600 aab33094 bellard
    val = tswap64(val);
3601 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3602 aab33094 bellard
}
3603 aab33094 bellard
3604 13eb76e0 bellard
#endif
3605 13eb76e0 bellard
3606 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3607 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3608 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3609 13eb76e0 bellard
{
3610 13eb76e0 bellard
    int l;
3611 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3612 9b3c35e0 j_mayer
    target_ulong page;
3613 13eb76e0 bellard
3614 13eb76e0 bellard
    while (len > 0) {
3615 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3616 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3617 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3618 13eb76e0 bellard
        if (phys_addr == -1)
3619 13eb76e0 bellard
            return -1;
3620 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3621 13eb76e0 bellard
        if (l > len)
3622 13eb76e0 bellard
            l = len;
3623 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3624 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3625 5e2972fd aliguori
        if (is_write)
3626 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3627 5e2972fd aliguori
        else
3628 5e2972fd aliguori
#endif
3629 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3630 13eb76e0 bellard
        len -= l;
3631 13eb76e0 bellard
        buf += l;
3632 13eb76e0 bellard
        addr += l;
3633 13eb76e0 bellard
    }
3634 13eb76e0 bellard
    return 0;
3635 13eb76e0 bellard
}
3636 13eb76e0 bellard
3637 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3638 2e70f6ef pbrook
   must be at the end of the TB */
3639 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3640 2e70f6ef pbrook
{
3641 2e70f6ef pbrook
    TranslationBlock *tb;
3642 2e70f6ef pbrook
    uint32_t n, cflags;
3643 2e70f6ef pbrook
    target_ulong pc, cs_base;
3644 2e70f6ef pbrook
    uint64_t flags;
3645 2e70f6ef pbrook
3646 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3647 2e70f6ef pbrook
    if (!tb) {
3648 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3649 2e70f6ef pbrook
                  retaddr);
3650 2e70f6ef pbrook
    }
3651 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3652 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3653 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3654 bf20dc07 ths
       occurred.  */
3655 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3656 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3657 2e70f6ef pbrook
    n++;
3658 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3659 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3660 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3661 2e70f6ef pbrook
       branch.  */
3662 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3663 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3664 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3665 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3666 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3667 2e70f6ef pbrook
    }
3668 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3669 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3670 2e70f6ef pbrook
            && n > 1) {
3671 2e70f6ef pbrook
        env->pc -= 2;
3672 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3673 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3674 2e70f6ef pbrook
    }
3675 2e70f6ef pbrook
#endif
3676 2e70f6ef pbrook
    /* This should never happen.  */
3677 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3678 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3679 2e70f6ef pbrook
3680 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3681 2e70f6ef pbrook
    pc = tb->pc;
3682 2e70f6ef pbrook
    cs_base = tb->cs_base;
3683 2e70f6ef pbrook
    flags = tb->flags;
3684 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3685 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3686 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3687 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3688 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3689 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3690 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3691 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3692 2e70f6ef pbrook
       second new TB.  */
3693 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3694 2e70f6ef pbrook
}
3695 2e70f6ef pbrook
3696 e3db7226 bellard
void dump_exec_info(FILE *f,
3697 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3698 e3db7226 bellard
{
3699 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3700 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3701 e3db7226 bellard
    TranslationBlock *tb;
3702 3b46e624 ths
3703 e3db7226 bellard
    target_code_size = 0;
3704 e3db7226 bellard
    max_target_code_size = 0;
3705 e3db7226 bellard
    cross_page = 0;
3706 e3db7226 bellard
    direct_jmp_count = 0;
3707 e3db7226 bellard
    direct_jmp2_count = 0;
3708 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3709 e3db7226 bellard
        tb = &tbs[i];
3710 e3db7226 bellard
        target_code_size += tb->size;
3711 e3db7226 bellard
        if (tb->size > max_target_code_size)
3712 e3db7226 bellard
            max_target_code_size = tb->size;
3713 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3714 e3db7226 bellard
            cross_page++;
3715 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3716 e3db7226 bellard
            direct_jmp_count++;
3717 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3718 e3db7226 bellard
                direct_jmp2_count++;
3719 e3db7226 bellard
            }
3720 e3db7226 bellard
        }
3721 e3db7226 bellard
    }
3722 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3723 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3724 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3725 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3726 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3727 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3728 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3729 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3730 e3db7226 bellard
                max_target_code_size);
3731 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3732 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3733 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3734 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3735 5fafdf24 ths
            cross_page,
3736 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3737 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3738 5fafdf24 ths
                direct_jmp_count,
3739 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3740 e3db7226 bellard
                direct_jmp2_count,
3741 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3742 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3743 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3744 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3745 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3746 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3747 e3db7226 bellard
}
3748 e3db7226 bellard
3749 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3750 61382a50 bellard
3751 61382a50 bellard
#define MMUSUFFIX _cmmu
3752 61382a50 bellard
#define GETPC() NULL
3753 61382a50 bellard
#define env cpu_single_env
3754 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3755 61382a50 bellard
3756 61382a50 bellard
#define SHIFT 0
3757 61382a50 bellard
#include "softmmu_template.h"
3758 61382a50 bellard
3759 61382a50 bellard
#define SHIFT 1
3760 61382a50 bellard
#include "softmmu_template.h"
3761 61382a50 bellard
3762 61382a50 bellard
#define SHIFT 2
3763 61382a50 bellard
#include "softmmu_template.h"
3764 61382a50 bellard
3765 61382a50 bellard
#define SHIFT 3
3766 61382a50 bellard
#include "softmmu_template.h"
3767 61382a50 bellard
3768 61382a50 bellard
#undef env
3769 61382a50 bellard
3770 61382a50 bellard
#endif