Statistics
| Branch: | Revision:

root / exec.c @ b0a46a33

History | View | Annotate | Download (112.2 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 54936004 bellard
 * License along with this library; if not, write to the Free Software
18 fad6cb1a aurel32
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19 54936004 bellard
 */
20 67b915a5 bellard
#include "config.h"
21 d5a8f07c bellard
#ifdef _WIN32
22 d5a8f07c bellard
#include <windows.h>
23 d5a8f07c bellard
#else
24 a98d49b1 bellard
#include <sys/types.h>
25 d5a8f07c bellard
#include <sys/mman.h>
26 d5a8f07c bellard
#endif
27 54936004 bellard
#include <stdlib.h>
28 54936004 bellard
#include <stdio.h>
29 54936004 bellard
#include <stdarg.h>
30 54936004 bellard
#include <string.h>
31 54936004 bellard
#include <errno.h>
32 54936004 bellard
#include <unistd.h>
33 54936004 bellard
#include <inttypes.h>
34 54936004 bellard
35 6180a181 bellard
#include "cpu.h"
36 6180a181 bellard
#include "exec-all.h"
37 ca10f867 aurel32
#include "qemu-common.h"
38 b67d9a52 bellard
#include "tcg.h"
39 b3c7724c pbrook
#include "hw/hw.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 640f42e4 blueswir1
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 640f42e4 blueswir1
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81 108c49b8 bellard
#endif
82 108c49b8 bellard
83 bdaf78e0 blueswir1
static TranslationBlock *tbs;
84 26a5f13b bellard
int code_gen_max_blocks;
85 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 bdaf78e0 blueswir1
static int nb_tbs;
87 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
88 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 fd6ce8f6 bellard
90 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
91 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
93 d03d860b blueswir1
 section close to code segment. */
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
96 d03d860b blueswir1
    __attribute__((aligned (32)))
97 d03d860b blueswir1
#else
98 d03d860b blueswir1
#define code_gen_section                                \
99 d03d860b blueswir1
    __attribute__((aligned (32)))
100 d03d860b blueswir1
#endif
101 d03d860b blueswir1
102 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
103 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
105 26a5f13b bellard
/* threshold to flush the translated code buffer */
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
107 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
108 fd6ce8f6 bellard
109 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
110 9fa3e853 bellard
int phys_ram_fd;
111 1ccde1cb bellard
uint8_t *phys_ram_dirty;
112 74576198 aliguori
static int in_migration;
113 94a6b54f pbrook
114 94a6b54f pbrook
typedef struct RAMBlock {
115 94a6b54f pbrook
    uint8_t *host;
116 94a6b54f pbrook
    ram_addr_t offset;
117 94a6b54f pbrook
    ram_addr_t length;
118 94a6b54f pbrook
    struct RAMBlock *next;
119 94a6b54f pbrook
} RAMBlock;
120 94a6b54f pbrook
121 94a6b54f pbrook
static RAMBlock *ram_blocks;
122 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
124 94a6b54f pbrook
   of this variable will break.  */
125 94a6b54f pbrook
ram_addr_t last_ram_offset;
126 e2eef170 pbrook
#endif
127 9fa3e853 bellard
128 6a00d601 bellard
CPUState *first_cpu;
129 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
130 6a00d601 bellard
   cpu_exec() */
131 5fafdf24 ths
CPUState *cpu_single_env;
132 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
133 bf20dc07 ths
   1 = Precise instruction counting.
134 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
135 2e70f6ef pbrook
int use_icount = 0;
136 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
137 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
138 2e70f6ef pbrook
int64_t qemu_icount;
139 6a00d601 bellard
140 54936004 bellard
typedef struct PageDesc {
141 92e873b9 bellard
    /* list of TBs intersecting this ram page */
142 fd6ce8f6 bellard
    TranslationBlock *first_tb;
143 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
144 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
145 9fa3e853 bellard
    unsigned int code_write_count;
146 9fa3e853 bellard
    uint8_t *code_bitmap;
147 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
148 9fa3e853 bellard
    unsigned long flags;
149 9fa3e853 bellard
#endif
150 54936004 bellard
} PageDesc;
151 54936004 bellard
152 92e873b9 bellard
typedef struct PhysPageDesc {
153 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
154 00f82b8a aurel32
    ram_addr_t phys_offset;
155 8da3ff18 pbrook
    ram_addr_t region_offset;
156 92e873b9 bellard
} PhysPageDesc;
157 92e873b9 bellard
158 54936004 bellard
#define L2_BITS 10
159 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
161 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
162 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
163 bedb69ea j_mayer
 */
164 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165 bedb69ea j_mayer
#else
166 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167 bedb69ea j_mayer
#endif
168 54936004 bellard
169 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
170 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
171 54936004 bellard
172 83fb7adf bellard
unsigned long qemu_real_host_page_size;
173 83fb7adf bellard
unsigned long qemu_host_page_bits;
174 83fb7adf bellard
unsigned long qemu_host_page_size;
175 83fb7adf bellard
unsigned long qemu_host_page_mask;
176 54936004 bellard
177 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
178 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
179 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
180 54936004 bellard
181 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
182 e2eef170 pbrook
static void io_mem_init(void);
183 e2eef170 pbrook
184 33417e70 bellard
/* io memory support */
185 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
189 6658ffb8 pbrook
static int io_mem_watch;
190 6658ffb8 pbrook
#endif
191 33417e70 bellard
192 34865134 bellard
/* log support */
193 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
194 34865134 bellard
FILE *logfile;
195 34865134 bellard
int loglevel;
196 e735b91c pbrook
static int log_append = 0;
197 34865134 bellard
198 e3db7226 bellard
/* statistics */
199 e3db7226 bellard
static int tlb_flush_count;
200 e3db7226 bellard
static int tb_flush_count;
201 e3db7226 bellard
static int tb_phys_invalidate_count;
202 e3db7226 bellard
203 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 db7b5426 blueswir1
typedef struct subpage_t {
205 db7b5426 blueswir1
    target_phys_addr_t base;
206 3ee89922 blueswir1
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207 3ee89922 blueswir1
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
209 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210 db7b5426 blueswir1
} subpage_t;
211 db7b5426 blueswir1
212 7cb69cae bellard
#ifdef _WIN32
213 7cb69cae bellard
static void map_exec(void *addr, long size)
214 7cb69cae bellard
{
215 7cb69cae bellard
    DWORD old_protect;
216 7cb69cae bellard
    VirtualProtect(addr, size,
217 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
218 7cb69cae bellard
    
219 7cb69cae bellard
}
220 7cb69cae bellard
#else
221 7cb69cae bellard
static void map_exec(void *addr, long size)
222 7cb69cae bellard
{
223 4369415f bellard
    unsigned long start, end, page_size;
224 7cb69cae bellard
    
225 4369415f bellard
    page_size = getpagesize();
226 7cb69cae bellard
    start = (unsigned long)addr;
227 4369415f bellard
    start &= ~(page_size - 1);
228 7cb69cae bellard
    
229 7cb69cae bellard
    end = (unsigned long)addr + size;
230 4369415f bellard
    end += page_size - 1;
231 4369415f bellard
    end &= ~(page_size - 1);
232 7cb69cae bellard
    
233 7cb69cae bellard
    mprotect((void *)start, end - start,
234 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
235 7cb69cae bellard
}
236 7cb69cae bellard
#endif
237 7cb69cae bellard
238 b346ff46 bellard
static void page_init(void)
239 54936004 bellard
{
240 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
241 54936004 bellard
       TARGET_PAGE_SIZE */
242 c2b48b69 aliguori
#ifdef _WIN32
243 c2b48b69 aliguori
    {
244 c2b48b69 aliguori
        SYSTEM_INFO system_info;
245 c2b48b69 aliguori
246 c2b48b69 aliguori
        GetSystemInfo(&system_info);
247 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
248 c2b48b69 aliguori
    }
249 c2b48b69 aliguori
#else
250 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
251 c2b48b69 aliguori
#endif
252 83fb7adf bellard
    if (qemu_host_page_size == 0)
253 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
254 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
256 83fb7adf bellard
    qemu_host_page_bits = 0;
257 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 83fb7adf bellard
        qemu_host_page_bits++;
259 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 50a9569b balrog
263 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 50a9569b balrog
    {
265 50a9569b balrog
        long long startaddr, endaddr;
266 50a9569b balrog
        FILE *f;
267 50a9569b balrog
        int n;
268 50a9569b balrog
269 c8a706fe pbrook
        mmap_lock();
270 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
271 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
272 50a9569b balrog
        if (f) {
273 50a9569b balrog
            do {
274 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 50a9569b balrog
                if (n == 2) {
276 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
277 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
279 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
282 50a9569b balrog
                                   PAGE_RESERVED); 
283 50a9569b balrog
                }
284 50a9569b balrog
            } while (!feof(f));
285 50a9569b balrog
            fclose(f);
286 50a9569b balrog
        }
287 c8a706fe pbrook
        mmap_unlock();
288 50a9569b balrog
    }
289 50a9569b balrog
#endif
290 54936004 bellard
}
291 54936004 bellard
292 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
293 54936004 bellard
{
294 17e2377a pbrook
#if TARGET_LONG_BITS > 32
295 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
296 17e2377a pbrook
       excluded high addresses.  */
297 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298 17e2377a pbrook
        return NULL;
299 17e2377a pbrook
#endif
300 434929bf aliguori
    return &l1_map[index >> L2_BITS];
301 434929bf aliguori
}
302 434929bf aliguori
303 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
304 434929bf aliguori
{
305 434929bf aliguori
    PageDesc **lp, *p;
306 434929bf aliguori
    lp = page_l1_map(index);
307 434929bf aliguori
    if (!lp)
308 434929bf aliguori
        return NULL;
309 434929bf aliguori
310 54936004 bellard
    p = *lp;
311 54936004 bellard
    if (!p) {
312 54936004 bellard
        /* allocate if not found */
313 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
314 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
315 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
316 17e2377a pbrook
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318 54936004 bellard
        *lp = p;
319 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
320 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
321 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
322 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
323 17e2377a pbrook
                           PAGE_RESERVED); 
324 17e2377a pbrook
        }
325 17e2377a pbrook
#else
326 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327 17e2377a pbrook
        *lp = p;
328 17e2377a pbrook
#endif
329 54936004 bellard
    }
330 54936004 bellard
    return p + (index & (L2_SIZE - 1));
331 54936004 bellard
}
332 54936004 bellard
333 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
334 54936004 bellard
{
335 434929bf aliguori
    PageDesc **lp, *p;
336 434929bf aliguori
    lp = page_l1_map(index);
337 434929bf aliguori
    if (!lp)
338 434929bf aliguori
        return NULL;
339 54936004 bellard
340 434929bf aliguori
    p = *lp;
341 54936004 bellard
    if (!p)
342 54936004 bellard
        return 0;
343 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
344 fd6ce8f6 bellard
}
345 fd6ce8f6 bellard
346 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347 92e873b9 bellard
{
348 108c49b8 bellard
    void **lp, **p;
349 e3f4e2a4 pbrook
    PhysPageDesc *pd;
350 92e873b9 bellard
351 108c49b8 bellard
    p = (void **)l1_phys_map;
352 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353 108c49b8 bellard
354 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 108c49b8 bellard
#endif
357 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358 92e873b9 bellard
    p = *lp;
359 92e873b9 bellard
    if (!p) {
360 92e873b9 bellard
        /* allocate if not found */
361 108c49b8 bellard
        if (!alloc)
362 108c49b8 bellard
            return NULL;
363 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
365 108c49b8 bellard
        *lp = p;
366 108c49b8 bellard
    }
367 108c49b8 bellard
#endif
368 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369 e3f4e2a4 pbrook
    pd = *lp;
370 e3f4e2a4 pbrook
    if (!pd) {
371 e3f4e2a4 pbrook
        int i;
372 108c49b8 bellard
        /* allocate if not found */
373 108c49b8 bellard
        if (!alloc)
374 108c49b8 bellard
            return NULL;
375 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 e3f4e2a4 pbrook
        *lp = pd;
377 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
378 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 67c4d23c pbrook
        }
381 92e873b9 bellard
    }
382 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 92e873b9 bellard
}
384 92e873b9 bellard
385 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 92e873b9 bellard
{
387 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
388 92e873b9 bellard
}
389 92e873b9 bellard
390 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
391 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
392 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 3a7d929e bellard
                                    target_ulong vaddr);
394 c8a706fe pbrook
#define mmap_lock() do { } while(0)
395 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
396 9fa3e853 bellard
#endif
397 fd6ce8f6 bellard
398 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 4369415f bellard
400 4369415f bellard
#if defined(CONFIG_USER_ONLY)
401 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
402 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
403 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
404 4369415f bellard
#endif
405 4369415f bellard
406 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
407 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 4369415f bellard
#endif
409 4369415f bellard
410 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
411 26a5f13b bellard
{
412 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
413 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
414 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
416 4369415f bellard
#else
417 26a5f13b bellard
    code_gen_buffer_size = tb_size;
418 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
419 4369415f bellard
#if defined(CONFIG_USER_ONLY)
420 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
421 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 4369415f bellard
#else
423 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
424 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425 4369415f bellard
#endif
426 26a5f13b bellard
    }
427 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
430 26a5f13b bellard
       the host cpu and OS */
431 26a5f13b bellard
#if defined(__linux__) 
432 26a5f13b bellard
    {
433 26a5f13b bellard
        int flags;
434 141ac468 blueswir1
        void *start = NULL;
435 141ac468 blueswir1
436 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437 26a5f13b bellard
#if defined(__x86_64__)
438 26a5f13b bellard
        flags |= MAP_32BIT;
439 26a5f13b bellard
        /* Cannot map more than that */
440 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
442 141ac468 blueswir1
#elif defined(__sparc_v9__)
443 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
444 141ac468 blueswir1
        flags |= MAP_FIXED;
445 141ac468 blueswir1
        start = (void *) 0x60000000UL;
446 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
448 1cb0661e balrog
#elif defined(__arm__)
449 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
450 1cb0661e balrog
        flags |= MAP_FIXED;
451 1cb0661e balrog
        start = (void *) 0x01000000UL;
452 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
454 26a5f13b bellard
#endif
455 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457 26a5f13b bellard
                               flags, -1, 0);
458 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
459 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460 26a5f13b bellard
            exit(1);
461 26a5f13b bellard
        }
462 26a5f13b bellard
    }
463 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464 06e67a82 aliguori
    {
465 06e67a82 aliguori
        int flags;
466 06e67a82 aliguori
        void *addr = NULL;
467 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468 06e67a82 aliguori
#if defined(__x86_64__)
469 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 06e67a82 aliguori
         * 0x40000000 is free */
471 06e67a82 aliguori
        flags |= MAP_FIXED;
472 06e67a82 aliguori
        addr = (void *)0x40000000;
473 06e67a82 aliguori
        /* Cannot map more than that */
474 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
476 06e67a82 aliguori
#endif
477 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479 06e67a82 aliguori
                               flags, -1, 0);
480 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
481 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 06e67a82 aliguori
            exit(1);
483 06e67a82 aliguori
        }
484 06e67a82 aliguori
    }
485 26a5f13b bellard
#else
486 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
488 26a5f13b bellard
#endif
489 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
492 26a5f13b bellard
        code_gen_max_block_size();
493 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495 26a5f13b bellard
}
496 26a5f13b bellard
497 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
499 26a5f13b bellard
   size. */
500 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
501 26a5f13b bellard
{
502 26a5f13b bellard
    cpu_gen_init();
503 26a5f13b bellard
    code_gen_alloc(tb_size);
504 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
505 4369415f bellard
    page_init();
506 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
507 26a5f13b bellard
    io_mem_init();
508 e2eef170 pbrook
#endif
509 26a5f13b bellard
}
510 26a5f13b bellard
511 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 9656f324 pbrook
513 9656f324 pbrook
#define CPU_COMMON_SAVE_VERSION 1
514 9656f324 pbrook
515 9656f324 pbrook
static void cpu_common_save(QEMUFile *f, void *opaque)
516 9656f324 pbrook
{
517 9656f324 pbrook
    CPUState *env = opaque;
518 9656f324 pbrook
519 b0a46a33 Jan Kiszka
    cpu_synchronize_state(env, 0);
520 b0a46a33 Jan Kiszka
521 9656f324 pbrook
    qemu_put_be32s(f, &env->halted);
522 9656f324 pbrook
    qemu_put_be32s(f, &env->interrupt_request);
523 9656f324 pbrook
}
524 9656f324 pbrook
525 9656f324 pbrook
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
526 9656f324 pbrook
{
527 9656f324 pbrook
    CPUState *env = opaque;
528 9656f324 pbrook
529 9656f324 pbrook
    if (version_id != CPU_COMMON_SAVE_VERSION)
530 9656f324 pbrook
        return -EINVAL;
531 9656f324 pbrook
532 9656f324 pbrook
    qemu_get_be32s(f, &env->halted);
533 75f482ae pbrook
    qemu_get_be32s(f, &env->interrupt_request);
534 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 3098dba0 aurel32
       version_id is increased. */
536 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
537 9656f324 pbrook
    tlb_flush(env, 1);
538 b0a46a33 Jan Kiszka
    cpu_synchronize_state(env, 1);
539 9656f324 pbrook
540 9656f324 pbrook
    return 0;
541 9656f324 pbrook
}
542 9656f324 pbrook
#endif
543 9656f324 pbrook
544 6a00d601 bellard
void cpu_exec_init(CPUState *env)
545 fd6ce8f6 bellard
{
546 6a00d601 bellard
    CPUState **penv;
547 6a00d601 bellard
    int cpu_index;
548 6a00d601 bellard
549 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
550 c2764719 pbrook
    cpu_list_lock();
551 c2764719 pbrook
#endif
552 6a00d601 bellard
    env->next_cpu = NULL;
553 6a00d601 bellard
    penv = &first_cpu;
554 6a00d601 bellard
    cpu_index = 0;
555 6a00d601 bellard
    while (*penv != NULL) {
556 6a00d601 bellard
        penv = (CPUState **)&(*penv)->next_cpu;
557 6a00d601 bellard
        cpu_index++;
558 6a00d601 bellard
    }
559 6a00d601 bellard
    env->cpu_index = cpu_index;
560 268a362c aliguori
    env->numa_node = 0;
561 c0ce998e aliguori
    TAILQ_INIT(&env->breakpoints);
562 c0ce998e aliguori
    TAILQ_INIT(&env->watchpoints);
563 6a00d601 bellard
    *penv = env;
564 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
565 c2764719 pbrook
    cpu_list_unlock();
566 c2764719 pbrook
#endif
567 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568 9656f324 pbrook
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
569 9656f324 pbrook
                    cpu_common_save, cpu_common_load, env);
570 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
571 b3c7724c pbrook
                    cpu_save, cpu_load, env);
572 b3c7724c pbrook
#endif
573 fd6ce8f6 bellard
}
574 fd6ce8f6 bellard
575 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
576 9fa3e853 bellard
{
577 9fa3e853 bellard
    if (p->code_bitmap) {
578 59817ccb bellard
        qemu_free(p->code_bitmap);
579 9fa3e853 bellard
        p->code_bitmap = NULL;
580 9fa3e853 bellard
    }
581 9fa3e853 bellard
    p->code_write_count = 0;
582 9fa3e853 bellard
}
583 9fa3e853 bellard
584 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
585 fd6ce8f6 bellard
static void page_flush_tb(void)
586 fd6ce8f6 bellard
{
587 fd6ce8f6 bellard
    int i, j;
588 fd6ce8f6 bellard
    PageDesc *p;
589 fd6ce8f6 bellard
590 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
591 fd6ce8f6 bellard
        p = l1_map[i];
592 fd6ce8f6 bellard
        if (p) {
593 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
594 9fa3e853 bellard
                p->first_tb = NULL;
595 9fa3e853 bellard
                invalidate_page_bitmap(p);
596 9fa3e853 bellard
                p++;
597 9fa3e853 bellard
            }
598 fd6ce8f6 bellard
        }
599 fd6ce8f6 bellard
    }
600 fd6ce8f6 bellard
}
601 fd6ce8f6 bellard
602 fd6ce8f6 bellard
/* flush all the translation blocks */
603 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
604 6a00d601 bellard
void tb_flush(CPUState *env1)
605 fd6ce8f6 bellard
{
606 6a00d601 bellard
    CPUState *env;
607 0124311e bellard
#if defined(DEBUG_FLUSH)
608 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
610 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
611 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
612 fd6ce8f6 bellard
#endif
613 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
614 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
615 a208e54a pbrook
616 fd6ce8f6 bellard
    nb_tbs = 0;
617 3b46e624 ths
618 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
619 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
620 6a00d601 bellard
    }
621 9fa3e853 bellard
622 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
623 fd6ce8f6 bellard
    page_flush_tb();
624 9fa3e853 bellard
625 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
626 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
627 d4e8164f bellard
       expensive */
628 e3db7226 bellard
    tb_flush_count++;
629 fd6ce8f6 bellard
}
630 fd6ce8f6 bellard
631 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
632 fd6ce8f6 bellard
633 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
634 fd6ce8f6 bellard
{
635 fd6ce8f6 bellard
    TranslationBlock *tb;
636 fd6ce8f6 bellard
    int i;
637 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
638 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
641 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
642 fd6ce8f6 bellard
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
643 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
644 fd6ce8f6 bellard
            }
645 fd6ce8f6 bellard
        }
646 fd6ce8f6 bellard
    }
647 fd6ce8f6 bellard
}
648 fd6ce8f6 bellard
649 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
650 fd6ce8f6 bellard
static void tb_page_check(void)
651 fd6ce8f6 bellard
{
652 fd6ce8f6 bellard
    TranslationBlock *tb;
653 fd6ce8f6 bellard
    int i, flags1, flags2;
654 3b46e624 ths
655 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
658 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
659 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
660 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
661 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
662 fd6ce8f6 bellard
            }
663 fd6ce8f6 bellard
        }
664 fd6ce8f6 bellard
    }
665 fd6ce8f6 bellard
}
666 fd6ce8f6 bellard
667 bdaf78e0 blueswir1
static void tb_jmp_check(TranslationBlock *tb)
668 d4e8164f bellard
{
669 d4e8164f bellard
    TranslationBlock *tb1;
670 d4e8164f bellard
    unsigned int n1;
671 d4e8164f bellard
672 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
673 d4e8164f bellard
    tb1 = tb->jmp_first;
674 d4e8164f bellard
    for(;;) {
675 d4e8164f bellard
        n1 = (long)tb1 & 3;
676 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
677 d4e8164f bellard
        if (n1 == 2)
678 d4e8164f bellard
            break;
679 d4e8164f bellard
        tb1 = tb1->jmp_next[n1];
680 d4e8164f bellard
    }
681 d4e8164f bellard
    /* check end of list */
682 d4e8164f bellard
    if (tb1 != tb) {
683 d4e8164f bellard
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
684 d4e8164f bellard
    }
685 d4e8164f bellard
}
686 d4e8164f bellard
687 fd6ce8f6 bellard
#endif
688 fd6ce8f6 bellard
689 fd6ce8f6 bellard
/* invalidate one TB */
690 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
691 fd6ce8f6 bellard
                             int next_offset)
692 fd6ce8f6 bellard
{
693 fd6ce8f6 bellard
    TranslationBlock *tb1;
694 fd6ce8f6 bellard
    for(;;) {
695 fd6ce8f6 bellard
        tb1 = *ptb;
696 fd6ce8f6 bellard
        if (tb1 == tb) {
697 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
698 fd6ce8f6 bellard
            break;
699 fd6ce8f6 bellard
        }
700 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
701 fd6ce8f6 bellard
    }
702 fd6ce8f6 bellard
}
703 fd6ce8f6 bellard
704 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
705 9fa3e853 bellard
{
706 9fa3e853 bellard
    TranslationBlock *tb1;
707 9fa3e853 bellard
    unsigned int n1;
708 9fa3e853 bellard
709 9fa3e853 bellard
    for(;;) {
710 9fa3e853 bellard
        tb1 = *ptb;
711 9fa3e853 bellard
        n1 = (long)tb1 & 3;
712 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
713 9fa3e853 bellard
        if (tb1 == tb) {
714 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
715 9fa3e853 bellard
            break;
716 9fa3e853 bellard
        }
717 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
718 9fa3e853 bellard
    }
719 9fa3e853 bellard
}
720 9fa3e853 bellard
721 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
722 d4e8164f bellard
{
723 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
724 d4e8164f bellard
    unsigned int n1;
725 d4e8164f bellard
726 d4e8164f bellard
    ptb = &tb->jmp_next[n];
727 d4e8164f bellard
    tb1 = *ptb;
728 d4e8164f bellard
    if (tb1) {
729 d4e8164f bellard
        /* find tb(n) in circular list */
730 d4e8164f bellard
        for(;;) {
731 d4e8164f bellard
            tb1 = *ptb;
732 d4e8164f bellard
            n1 = (long)tb1 & 3;
733 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
734 d4e8164f bellard
            if (n1 == n && tb1 == tb)
735 d4e8164f bellard
                break;
736 d4e8164f bellard
            if (n1 == 2) {
737 d4e8164f bellard
                ptb = &tb1->jmp_first;
738 d4e8164f bellard
            } else {
739 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
740 d4e8164f bellard
            }
741 d4e8164f bellard
        }
742 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
743 d4e8164f bellard
        *ptb = tb->jmp_next[n];
744 d4e8164f bellard
745 d4e8164f bellard
        tb->jmp_next[n] = NULL;
746 d4e8164f bellard
    }
747 d4e8164f bellard
}
748 d4e8164f bellard
749 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
750 d4e8164f bellard
   another TB */
751 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
752 d4e8164f bellard
{
753 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
754 d4e8164f bellard
}
755 d4e8164f bellard
756 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
757 fd6ce8f6 bellard
{
758 6a00d601 bellard
    CPUState *env;
759 8a40a180 bellard
    PageDesc *p;
760 d4e8164f bellard
    unsigned int h, n1;
761 00f82b8a aurel32
    target_phys_addr_t phys_pc;
762 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
763 3b46e624 ths
764 8a40a180 bellard
    /* remove the TB from the hash list */
765 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
766 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
767 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
768 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
769 8a40a180 bellard
770 8a40a180 bellard
    /* remove the TB from the page list */
771 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
772 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
773 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
774 8a40a180 bellard
        invalidate_page_bitmap(p);
775 8a40a180 bellard
    }
776 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
777 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
778 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
779 8a40a180 bellard
        invalidate_page_bitmap(p);
780 8a40a180 bellard
    }
781 8a40a180 bellard
782 36bdbe54 bellard
    tb_invalidated_flag = 1;
783 59817ccb bellard
784 fd6ce8f6 bellard
    /* remove the TB from the hash list */
785 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
786 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
787 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
788 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
789 6a00d601 bellard
    }
790 d4e8164f bellard
791 d4e8164f bellard
    /* suppress this TB from the two jump lists */
792 d4e8164f bellard
    tb_jmp_remove(tb, 0);
793 d4e8164f bellard
    tb_jmp_remove(tb, 1);
794 d4e8164f bellard
795 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
796 d4e8164f bellard
    tb1 = tb->jmp_first;
797 d4e8164f bellard
    for(;;) {
798 d4e8164f bellard
        n1 = (long)tb1 & 3;
799 d4e8164f bellard
        if (n1 == 2)
800 d4e8164f bellard
            break;
801 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
802 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
803 d4e8164f bellard
        tb_reset_jump(tb1, n1);
804 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
805 d4e8164f bellard
        tb1 = tb2;
806 d4e8164f bellard
    }
807 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
808 9fa3e853 bellard
809 e3db7226 bellard
    tb_phys_invalidate_count++;
810 9fa3e853 bellard
}
811 9fa3e853 bellard
812 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
813 9fa3e853 bellard
{
814 9fa3e853 bellard
    int end, mask, end1;
815 9fa3e853 bellard
816 9fa3e853 bellard
    end = start + len;
817 9fa3e853 bellard
    tab += start >> 3;
818 9fa3e853 bellard
    mask = 0xff << (start & 7);
819 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
820 9fa3e853 bellard
        if (start < end) {
821 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
822 9fa3e853 bellard
            *tab |= mask;
823 9fa3e853 bellard
        }
824 9fa3e853 bellard
    } else {
825 9fa3e853 bellard
        *tab++ |= mask;
826 9fa3e853 bellard
        start = (start + 8) & ~7;
827 9fa3e853 bellard
        end1 = end & ~7;
828 9fa3e853 bellard
        while (start < end1) {
829 9fa3e853 bellard
            *tab++ = 0xff;
830 9fa3e853 bellard
            start += 8;
831 9fa3e853 bellard
        }
832 9fa3e853 bellard
        if (start < end) {
833 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
834 9fa3e853 bellard
            *tab |= mask;
835 9fa3e853 bellard
        }
836 9fa3e853 bellard
    }
837 9fa3e853 bellard
}
838 9fa3e853 bellard
839 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
840 9fa3e853 bellard
{
841 9fa3e853 bellard
    int n, tb_start, tb_end;
842 9fa3e853 bellard
    TranslationBlock *tb;
843 3b46e624 ths
844 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
845 9fa3e853 bellard
846 9fa3e853 bellard
    tb = p->first_tb;
847 9fa3e853 bellard
    while (tb != NULL) {
848 9fa3e853 bellard
        n = (long)tb & 3;
849 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
850 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
851 9fa3e853 bellard
        if (n == 0) {
852 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
853 9fa3e853 bellard
               it is not a problem */
854 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
855 9fa3e853 bellard
            tb_end = tb_start + tb->size;
856 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
857 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
858 9fa3e853 bellard
        } else {
859 9fa3e853 bellard
            tb_start = 0;
860 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
861 9fa3e853 bellard
        }
862 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
863 9fa3e853 bellard
        tb = tb->page_next[n];
864 9fa3e853 bellard
    }
865 9fa3e853 bellard
}
866 9fa3e853 bellard
867 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
868 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
869 2e70f6ef pbrook
                              int flags, int cflags)
870 d720b93d bellard
{
871 d720b93d bellard
    TranslationBlock *tb;
872 d720b93d bellard
    uint8_t *tc_ptr;
873 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
874 d720b93d bellard
    int code_gen_size;
875 d720b93d bellard
876 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
877 c27004ec bellard
    tb = tb_alloc(pc);
878 d720b93d bellard
    if (!tb) {
879 d720b93d bellard
        /* flush must be done */
880 d720b93d bellard
        tb_flush(env);
881 d720b93d bellard
        /* cannot fail at this point */
882 c27004ec bellard
        tb = tb_alloc(pc);
883 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
884 2e70f6ef pbrook
        tb_invalidated_flag = 1;
885 d720b93d bellard
    }
886 d720b93d bellard
    tc_ptr = code_gen_ptr;
887 d720b93d bellard
    tb->tc_ptr = tc_ptr;
888 d720b93d bellard
    tb->cs_base = cs_base;
889 d720b93d bellard
    tb->flags = flags;
890 d720b93d bellard
    tb->cflags = cflags;
891 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
892 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
893 3b46e624 ths
894 d720b93d bellard
    /* check next page if needed */
895 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
896 d720b93d bellard
    phys_page2 = -1;
897 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
898 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
899 d720b93d bellard
    }
900 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
901 2e70f6ef pbrook
    return tb;
902 d720b93d bellard
}
903 3b46e624 ths
904 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
905 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
906 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
907 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
908 d720b93d bellard
   TB if code is modified inside this TB. */
909 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
910 d720b93d bellard
                                   int is_cpu_write_access)
911 d720b93d bellard
{
912 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
913 d720b93d bellard
    CPUState *env = cpu_single_env;
914 9fa3e853 bellard
    target_ulong tb_start, tb_end;
915 6b917547 aliguori
    PageDesc *p;
916 6b917547 aliguori
    int n;
917 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
918 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
919 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
920 6b917547 aliguori
    int current_tb_modified = 0;
921 6b917547 aliguori
    target_ulong current_pc = 0;
922 6b917547 aliguori
    target_ulong current_cs_base = 0;
923 6b917547 aliguori
    int current_flags = 0;
924 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
925 9fa3e853 bellard
926 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
927 5fafdf24 ths
    if (!p)
928 9fa3e853 bellard
        return;
929 5fafdf24 ths
    if (!p->code_bitmap &&
930 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
931 d720b93d bellard
        is_cpu_write_access) {
932 9fa3e853 bellard
        /* build code bitmap */
933 9fa3e853 bellard
        build_page_bitmap(p);
934 9fa3e853 bellard
    }
935 9fa3e853 bellard
936 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
937 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
938 9fa3e853 bellard
    tb = p->first_tb;
939 9fa3e853 bellard
    while (tb != NULL) {
940 9fa3e853 bellard
        n = (long)tb & 3;
941 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
942 9fa3e853 bellard
        tb_next = tb->page_next[n];
943 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
944 9fa3e853 bellard
        if (n == 0) {
945 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
946 9fa3e853 bellard
               it is not a problem */
947 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948 9fa3e853 bellard
            tb_end = tb_start + tb->size;
949 9fa3e853 bellard
        } else {
950 9fa3e853 bellard
            tb_start = tb->page_addr[1];
951 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
952 9fa3e853 bellard
        }
953 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
954 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
955 d720b93d bellard
            if (current_tb_not_found) {
956 d720b93d bellard
                current_tb_not_found = 0;
957 d720b93d bellard
                current_tb = NULL;
958 2e70f6ef pbrook
                if (env->mem_io_pc) {
959 d720b93d bellard
                    /* now we have a real cpu fault */
960 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
961 d720b93d bellard
                }
962 d720b93d bellard
            }
963 d720b93d bellard
            if (current_tb == tb &&
964 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
965 d720b93d bellard
                /* If we are modifying the current TB, we must stop
966 d720b93d bellard
                its execution. We could be more precise by checking
967 d720b93d bellard
                that the modification is after the current PC, but it
968 d720b93d bellard
                would require a specialized function to partially
969 d720b93d bellard
                restore the CPU state */
970 3b46e624 ths
971 d720b93d bellard
                current_tb_modified = 1;
972 5fafdf24 ths
                cpu_restore_state(current_tb, env,
973 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
974 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
975 6b917547 aliguori
                                     &current_flags);
976 d720b93d bellard
            }
977 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
978 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
979 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
980 6f5a9f7e bellard
            saved_tb = NULL;
981 6f5a9f7e bellard
            if (env) {
982 6f5a9f7e bellard
                saved_tb = env->current_tb;
983 6f5a9f7e bellard
                env->current_tb = NULL;
984 6f5a9f7e bellard
            }
985 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
986 6f5a9f7e bellard
            if (env) {
987 6f5a9f7e bellard
                env->current_tb = saved_tb;
988 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
989 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
990 6f5a9f7e bellard
            }
991 9fa3e853 bellard
        }
992 9fa3e853 bellard
        tb = tb_next;
993 9fa3e853 bellard
    }
994 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
995 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
996 9fa3e853 bellard
    if (!p->first_tb) {
997 9fa3e853 bellard
        invalidate_page_bitmap(p);
998 d720b93d bellard
        if (is_cpu_write_access) {
999 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1000 d720b93d bellard
        }
1001 d720b93d bellard
    }
1002 d720b93d bellard
#endif
1003 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1004 d720b93d bellard
    if (current_tb_modified) {
1005 d720b93d bellard
        /* we generate a block containing just the instruction
1006 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1007 d720b93d bellard
           itself */
1008 ea1c1802 bellard
        env->current_tb = NULL;
1009 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1010 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1011 9fa3e853 bellard
    }
1012 fd6ce8f6 bellard
#endif
1013 9fa3e853 bellard
}
1014 fd6ce8f6 bellard
1015 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1016 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1017 9fa3e853 bellard
{
1018 9fa3e853 bellard
    PageDesc *p;
1019 9fa3e853 bellard
    int offset, b;
1020 59817ccb bellard
#if 0
1021 a4193c8a bellard
    if (1) {
1022 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1023 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1024 93fcfe39 aliguori
                  cpu_single_env->eip,
1025 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1026 59817ccb bellard
    }
1027 59817ccb bellard
#endif
1028 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1029 5fafdf24 ths
    if (!p)
1030 9fa3e853 bellard
        return;
1031 9fa3e853 bellard
    if (p->code_bitmap) {
1032 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1033 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1034 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1035 9fa3e853 bellard
            goto do_invalidate;
1036 9fa3e853 bellard
    } else {
1037 9fa3e853 bellard
    do_invalidate:
1038 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1039 9fa3e853 bellard
    }
1040 9fa3e853 bellard
}
1041 9fa3e853 bellard
1042 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1043 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1044 d720b93d bellard
                                    unsigned long pc, void *puc)
1045 9fa3e853 bellard
{
1046 6b917547 aliguori
    TranslationBlock *tb;
1047 9fa3e853 bellard
    PageDesc *p;
1048 6b917547 aliguori
    int n;
1049 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1050 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1051 d720b93d bellard
    CPUState *env = cpu_single_env;
1052 6b917547 aliguori
    int current_tb_modified = 0;
1053 6b917547 aliguori
    target_ulong current_pc = 0;
1054 6b917547 aliguori
    target_ulong current_cs_base = 0;
1055 6b917547 aliguori
    int current_flags = 0;
1056 d720b93d bellard
#endif
1057 9fa3e853 bellard
1058 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1059 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1060 5fafdf24 ths
    if (!p)
1061 9fa3e853 bellard
        return;
1062 9fa3e853 bellard
    tb = p->first_tb;
1063 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1064 d720b93d bellard
    if (tb && pc != 0) {
1065 d720b93d bellard
        current_tb = tb_find_pc(pc);
1066 d720b93d bellard
    }
1067 d720b93d bellard
#endif
1068 9fa3e853 bellard
    while (tb != NULL) {
1069 9fa3e853 bellard
        n = (long)tb & 3;
1070 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1071 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1072 d720b93d bellard
        if (current_tb == tb &&
1073 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1074 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1075 d720b93d bellard
                   its execution. We could be more precise by checking
1076 d720b93d bellard
                   that the modification is after the current PC, but it
1077 d720b93d bellard
                   would require a specialized function to partially
1078 d720b93d bellard
                   restore the CPU state */
1079 3b46e624 ths
1080 d720b93d bellard
            current_tb_modified = 1;
1081 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1082 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1083 6b917547 aliguori
                                 &current_flags);
1084 d720b93d bellard
        }
1085 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1086 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1087 9fa3e853 bellard
        tb = tb->page_next[n];
1088 9fa3e853 bellard
    }
1089 fd6ce8f6 bellard
    p->first_tb = NULL;
1090 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1091 d720b93d bellard
    if (current_tb_modified) {
1092 d720b93d bellard
        /* we generate a block containing just the instruction
1093 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1094 d720b93d bellard
           itself */
1095 ea1c1802 bellard
        env->current_tb = NULL;
1096 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1097 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1098 d720b93d bellard
    }
1099 d720b93d bellard
#endif
1100 fd6ce8f6 bellard
}
1101 9fa3e853 bellard
#endif
1102 fd6ce8f6 bellard
1103 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1104 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1105 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1106 fd6ce8f6 bellard
{
1107 fd6ce8f6 bellard
    PageDesc *p;
1108 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1109 9fa3e853 bellard
1110 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1111 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1112 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1113 9fa3e853 bellard
    last_first_tb = p->first_tb;
1114 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1115 9fa3e853 bellard
    invalidate_page_bitmap(p);
1116 fd6ce8f6 bellard
1117 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1118 d720b93d bellard
1119 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1120 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1121 53a5960a pbrook
        target_ulong addr;
1122 53a5960a pbrook
        PageDesc *p2;
1123 9fa3e853 bellard
        int prot;
1124 9fa3e853 bellard
1125 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1126 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1127 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1128 fd6ce8f6 bellard
        prot = 0;
1129 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1130 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1131 53a5960a pbrook
1132 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1133 53a5960a pbrook
            if (!p2)
1134 53a5960a pbrook
                continue;
1135 53a5960a pbrook
            prot |= p2->flags;
1136 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1137 53a5960a pbrook
            page_get_flags(addr);
1138 53a5960a pbrook
          }
1139 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1140 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1141 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1142 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1143 53a5960a pbrook
               page_addr);
1144 fd6ce8f6 bellard
#endif
1145 fd6ce8f6 bellard
    }
1146 9fa3e853 bellard
#else
1147 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1148 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1149 9fa3e853 bellard
       allocated in a physical page */
1150 9fa3e853 bellard
    if (!last_first_tb) {
1151 6a00d601 bellard
        tlb_protect_code(page_addr);
1152 9fa3e853 bellard
    }
1153 9fa3e853 bellard
#endif
1154 d720b93d bellard
1155 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1156 fd6ce8f6 bellard
}
1157 fd6ce8f6 bellard
1158 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1159 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1160 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1161 fd6ce8f6 bellard
{
1162 fd6ce8f6 bellard
    TranslationBlock *tb;
1163 fd6ce8f6 bellard
1164 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1165 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1166 d4e8164f bellard
        return NULL;
1167 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1168 fd6ce8f6 bellard
    tb->pc = pc;
1169 b448f2f3 bellard
    tb->cflags = 0;
1170 d4e8164f bellard
    return tb;
1171 d4e8164f bellard
}
1172 d4e8164f bellard
1173 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1174 2e70f6ef pbrook
{
1175 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1176 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1177 2e70f6ef pbrook
       be the last one generated.  */
1178 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1179 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1180 2e70f6ef pbrook
        nb_tbs--;
1181 2e70f6ef pbrook
    }
1182 2e70f6ef pbrook
}
1183 2e70f6ef pbrook
1184 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1185 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1186 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1187 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1188 d4e8164f bellard
{
1189 9fa3e853 bellard
    unsigned int h;
1190 9fa3e853 bellard
    TranslationBlock **ptb;
1191 9fa3e853 bellard
1192 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1193 c8a706fe pbrook
       before we are done.  */
1194 c8a706fe pbrook
    mmap_lock();
1195 9fa3e853 bellard
    /* add in the physical hash table */
1196 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1197 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1198 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1199 9fa3e853 bellard
    *ptb = tb;
1200 fd6ce8f6 bellard
1201 fd6ce8f6 bellard
    /* add in the page list */
1202 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1203 9fa3e853 bellard
    if (phys_page2 != -1)
1204 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1205 9fa3e853 bellard
    else
1206 9fa3e853 bellard
        tb->page_addr[1] = -1;
1207 9fa3e853 bellard
1208 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1209 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1210 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1211 d4e8164f bellard
1212 d4e8164f bellard
    /* init original jump addresses */
1213 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1214 d4e8164f bellard
        tb_reset_jump(tb, 0);
1215 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1216 d4e8164f bellard
        tb_reset_jump(tb, 1);
1217 8a40a180 bellard
1218 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1219 8a40a180 bellard
    tb_page_check();
1220 8a40a180 bellard
#endif
1221 c8a706fe pbrook
    mmap_unlock();
1222 fd6ce8f6 bellard
}
1223 fd6ce8f6 bellard
1224 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1225 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1226 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1227 fd6ce8f6 bellard
{
1228 9fa3e853 bellard
    int m_min, m_max, m;
1229 9fa3e853 bellard
    unsigned long v;
1230 9fa3e853 bellard
    TranslationBlock *tb;
1231 a513fe19 bellard
1232 a513fe19 bellard
    if (nb_tbs <= 0)
1233 a513fe19 bellard
        return NULL;
1234 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1235 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1236 a513fe19 bellard
        return NULL;
1237 a513fe19 bellard
    /* binary search (cf Knuth) */
1238 a513fe19 bellard
    m_min = 0;
1239 a513fe19 bellard
    m_max = nb_tbs - 1;
1240 a513fe19 bellard
    while (m_min <= m_max) {
1241 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1242 a513fe19 bellard
        tb = &tbs[m];
1243 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1244 a513fe19 bellard
        if (v == tc_ptr)
1245 a513fe19 bellard
            return tb;
1246 a513fe19 bellard
        else if (tc_ptr < v) {
1247 a513fe19 bellard
            m_max = m - 1;
1248 a513fe19 bellard
        } else {
1249 a513fe19 bellard
            m_min = m + 1;
1250 a513fe19 bellard
        }
1251 5fafdf24 ths
    }
1252 a513fe19 bellard
    return &tbs[m_max];
1253 a513fe19 bellard
}
1254 7501267e bellard
1255 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1256 ea041c0e bellard
1257 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1258 ea041c0e bellard
{
1259 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1260 ea041c0e bellard
    unsigned int n1;
1261 ea041c0e bellard
1262 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1263 ea041c0e bellard
    if (tb1 != NULL) {
1264 ea041c0e bellard
        /* find head of list */
1265 ea041c0e bellard
        for(;;) {
1266 ea041c0e bellard
            n1 = (long)tb1 & 3;
1267 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268 ea041c0e bellard
            if (n1 == 2)
1269 ea041c0e bellard
                break;
1270 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1271 ea041c0e bellard
        }
1272 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1273 ea041c0e bellard
        tb_next = tb1;
1274 ea041c0e bellard
1275 ea041c0e bellard
        /* remove tb from the jmp_first list */
1276 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1277 ea041c0e bellard
        for(;;) {
1278 ea041c0e bellard
            tb1 = *ptb;
1279 ea041c0e bellard
            n1 = (long)tb1 & 3;
1280 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1282 ea041c0e bellard
                break;
1283 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1284 ea041c0e bellard
        }
1285 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1286 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1287 3b46e624 ths
1288 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1289 ea041c0e bellard
        tb_reset_jump(tb, n);
1290 ea041c0e bellard
1291 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1292 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1293 ea041c0e bellard
    }
1294 ea041c0e bellard
}
1295 ea041c0e bellard
1296 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1297 ea041c0e bellard
{
1298 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1299 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1300 ea041c0e bellard
}
1301 ea041c0e bellard
1302 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1303 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1304 d720b93d bellard
{
1305 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1306 9b3c35e0 j_mayer
    target_ulong pd;
1307 c2f07f81 pbrook
    ram_addr_t ram_addr;
1308 c2f07f81 pbrook
    PhysPageDesc *p;
1309 d720b93d bellard
1310 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1311 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1312 c2f07f81 pbrook
    if (!p) {
1313 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1314 c2f07f81 pbrook
    } else {
1315 c2f07f81 pbrook
        pd = p->phys_offset;
1316 c2f07f81 pbrook
    }
1317 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1318 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1319 d720b93d bellard
}
1320 c27004ec bellard
#endif
1321 d720b93d bellard
1322 6658ffb8 pbrook
/* Add a watchpoint.  */
1323 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1324 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1325 6658ffb8 pbrook
{
1326 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1327 c0ce998e aliguori
    CPUWatchpoint *wp;
1328 6658ffb8 pbrook
1329 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1330 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1331 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1332 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1333 b4051334 aliguori
        return -EINVAL;
1334 b4051334 aliguori
    }
1335 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1336 a1d1bb31 aliguori
1337 a1d1bb31 aliguori
    wp->vaddr = addr;
1338 b4051334 aliguori
    wp->len_mask = len_mask;
1339 a1d1bb31 aliguori
    wp->flags = flags;
1340 a1d1bb31 aliguori
1341 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1342 c0ce998e aliguori
    if (flags & BP_GDB)
1343 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1344 c0ce998e aliguori
    else
1345 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1346 6658ffb8 pbrook
1347 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1348 a1d1bb31 aliguori
1349 a1d1bb31 aliguori
    if (watchpoint)
1350 a1d1bb31 aliguori
        *watchpoint = wp;
1351 a1d1bb31 aliguori
    return 0;
1352 6658ffb8 pbrook
}
1353 6658ffb8 pbrook
1354 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1355 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1356 a1d1bb31 aliguori
                          int flags)
1357 6658ffb8 pbrook
{
1358 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1359 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1360 6658ffb8 pbrook
1361 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1362 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1363 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1364 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1365 6658ffb8 pbrook
            return 0;
1366 6658ffb8 pbrook
        }
1367 6658ffb8 pbrook
    }
1368 a1d1bb31 aliguori
    return -ENOENT;
1369 6658ffb8 pbrook
}
1370 6658ffb8 pbrook
1371 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1372 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1373 a1d1bb31 aliguori
{
1374 c0ce998e aliguori
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1375 7d03f82f edgar_igl
1376 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1377 a1d1bb31 aliguori
1378 a1d1bb31 aliguori
    qemu_free(watchpoint);
1379 a1d1bb31 aliguori
}
1380 a1d1bb31 aliguori
1381 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1382 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1383 a1d1bb31 aliguori
{
1384 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1385 a1d1bb31 aliguori
1386 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1387 a1d1bb31 aliguori
        if (wp->flags & mask)
1388 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1389 c0ce998e aliguori
    }
1390 7d03f82f edgar_igl
}
1391 7d03f82f edgar_igl
1392 a1d1bb31 aliguori
/* Add a breakpoint.  */
1393 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1394 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1395 4c3a88a2 bellard
{
1396 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1397 c0ce998e aliguori
    CPUBreakpoint *bp;
1398 3b46e624 ths
1399 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1400 4c3a88a2 bellard
1401 a1d1bb31 aliguori
    bp->pc = pc;
1402 a1d1bb31 aliguori
    bp->flags = flags;
1403 a1d1bb31 aliguori
1404 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1405 c0ce998e aliguori
    if (flags & BP_GDB)
1406 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1407 c0ce998e aliguori
    else
1408 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1409 3b46e624 ths
1410 d720b93d bellard
    breakpoint_invalidate(env, pc);
1411 a1d1bb31 aliguori
1412 a1d1bb31 aliguori
    if (breakpoint)
1413 a1d1bb31 aliguori
        *breakpoint = bp;
1414 4c3a88a2 bellard
    return 0;
1415 4c3a88a2 bellard
#else
1416 a1d1bb31 aliguori
    return -ENOSYS;
1417 4c3a88a2 bellard
#endif
1418 4c3a88a2 bellard
}
1419 4c3a88a2 bellard
1420 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1421 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1422 a1d1bb31 aliguori
{
1423 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1424 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1425 a1d1bb31 aliguori
1426 c0ce998e aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1427 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1428 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1429 a1d1bb31 aliguori
            return 0;
1430 a1d1bb31 aliguori
        }
1431 7d03f82f edgar_igl
    }
1432 a1d1bb31 aliguori
    return -ENOENT;
1433 a1d1bb31 aliguori
#else
1434 a1d1bb31 aliguori
    return -ENOSYS;
1435 7d03f82f edgar_igl
#endif
1436 7d03f82f edgar_igl
}
1437 7d03f82f edgar_igl
1438 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1439 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1440 4c3a88a2 bellard
{
1441 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1442 c0ce998e aliguori
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1443 d720b93d bellard
1444 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1445 a1d1bb31 aliguori
1446 a1d1bb31 aliguori
    qemu_free(breakpoint);
1447 a1d1bb31 aliguori
#endif
1448 a1d1bb31 aliguori
}
1449 a1d1bb31 aliguori
1450 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1451 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1452 a1d1bb31 aliguori
{
1453 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1454 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1455 a1d1bb31 aliguori
1456 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1457 a1d1bb31 aliguori
        if (bp->flags & mask)
1458 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1459 c0ce998e aliguori
    }
1460 4c3a88a2 bellard
#endif
1461 4c3a88a2 bellard
}
1462 4c3a88a2 bellard
1463 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1464 c33a346e bellard
   CPU loop after each instruction */
1465 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1466 c33a346e bellard
{
1467 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1468 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1469 c33a346e bellard
        env->singlestep_enabled = enabled;
1470 e22a25c9 aliguori
        if (kvm_enabled())
1471 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1472 e22a25c9 aliguori
        else {
1473 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1474 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1475 e22a25c9 aliguori
            tb_flush(env);
1476 e22a25c9 aliguori
        }
1477 c33a346e bellard
    }
1478 c33a346e bellard
#endif
1479 c33a346e bellard
}
1480 c33a346e bellard
1481 34865134 bellard
/* enable or disable low levels log */
1482 34865134 bellard
void cpu_set_log(int log_flags)
1483 34865134 bellard
{
1484 34865134 bellard
    loglevel = log_flags;
1485 34865134 bellard
    if (loglevel && !logfile) {
1486 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1487 34865134 bellard
        if (!logfile) {
1488 34865134 bellard
            perror(logfilename);
1489 34865134 bellard
            _exit(1);
1490 34865134 bellard
        }
1491 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1492 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1493 9fa3e853 bellard
        {
1494 b55266b5 blueswir1
            static char logfile_buf[4096];
1495 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1496 9fa3e853 bellard
        }
1497 9fa3e853 bellard
#else
1498 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1499 9fa3e853 bellard
#endif
1500 e735b91c pbrook
        log_append = 1;
1501 e735b91c pbrook
    }
1502 e735b91c pbrook
    if (!loglevel && logfile) {
1503 e735b91c pbrook
        fclose(logfile);
1504 e735b91c pbrook
        logfile = NULL;
1505 34865134 bellard
    }
1506 34865134 bellard
}
1507 34865134 bellard
1508 34865134 bellard
void cpu_set_log_filename(const char *filename)
1509 34865134 bellard
{
1510 34865134 bellard
    logfilename = strdup(filename);
1511 e735b91c pbrook
    if (logfile) {
1512 e735b91c pbrook
        fclose(logfile);
1513 e735b91c pbrook
        logfile = NULL;
1514 e735b91c pbrook
    }
1515 e735b91c pbrook
    cpu_set_log(loglevel);
1516 34865134 bellard
}
1517 c33a346e bellard
1518 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1519 ea041c0e bellard
{
1520 3098dba0 aurel32
#if defined(USE_NPTL)
1521 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1522 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1523 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1524 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1525 3098dba0 aurel32
#else
1526 ea041c0e bellard
    TranslationBlock *tb;
1527 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1528 59817ccb bellard
1529 3098dba0 aurel32
    tb = env->current_tb;
1530 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1531 3098dba0 aurel32
       all the potentially executing TB */
1532 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1533 3098dba0 aurel32
        env->current_tb = NULL;
1534 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1535 3098dba0 aurel32
        resetlock(&interrupt_lock);
1536 be214e6c aurel32
    }
1537 3098dba0 aurel32
#endif
1538 3098dba0 aurel32
}
1539 3098dba0 aurel32
1540 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1541 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1542 3098dba0 aurel32
{
1543 3098dba0 aurel32
    int old_mask;
1544 be214e6c aurel32
1545 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1546 68a79315 bellard
    env->interrupt_request |= mask;
1547 3098dba0 aurel32
1548 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1549 8edac960 aliguori
    /*
1550 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1551 8edac960 aliguori
     * case its halted.
1552 8edac960 aliguori
     */
1553 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1554 8edac960 aliguori
        qemu_cpu_kick(env);
1555 8edac960 aliguori
        return;
1556 8edac960 aliguori
    }
1557 8edac960 aliguori
#endif
1558 8edac960 aliguori
1559 2e70f6ef pbrook
    if (use_icount) {
1560 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1561 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1562 2e70f6ef pbrook
        if (!can_do_io(env)
1563 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1564 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1565 2e70f6ef pbrook
        }
1566 2e70f6ef pbrook
#endif
1567 2e70f6ef pbrook
    } else {
1568 3098dba0 aurel32
        cpu_unlink_tb(env);
1569 ea041c0e bellard
    }
1570 ea041c0e bellard
}
1571 ea041c0e bellard
1572 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1573 b54ad049 bellard
{
1574 b54ad049 bellard
    env->interrupt_request &= ~mask;
1575 b54ad049 bellard
}
1576 b54ad049 bellard
1577 3098dba0 aurel32
void cpu_exit(CPUState *env)
1578 3098dba0 aurel32
{
1579 3098dba0 aurel32
    env->exit_request = 1;
1580 3098dba0 aurel32
    cpu_unlink_tb(env);
1581 3098dba0 aurel32
}
1582 3098dba0 aurel32
1583 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1584 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1585 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1586 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1587 f193c797 bellard
      "show target assembly code for each compiled TB" },
1588 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1589 57fec1fe bellard
      "show micro ops for each compiled TB" },
1590 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1591 e01a1157 blueswir1
      "show micro ops "
1592 e01a1157 blueswir1
#ifdef TARGET_I386
1593 e01a1157 blueswir1
      "before eflags optimization and "
1594 f193c797 bellard
#endif
1595 e01a1157 blueswir1
      "after liveness analysis" },
1596 f193c797 bellard
    { CPU_LOG_INT, "int",
1597 f193c797 bellard
      "show interrupts/exceptions in short format" },
1598 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1599 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1600 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1601 e91c8a77 ths
      "show CPU state before block translation" },
1602 f193c797 bellard
#ifdef TARGET_I386
1603 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1604 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1605 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1606 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1607 f193c797 bellard
#endif
1608 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1609 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1610 fd872598 bellard
      "show all i/o ports accesses" },
1611 8e3a9fd2 bellard
#endif
1612 f193c797 bellard
    { 0, NULL, NULL },
1613 f193c797 bellard
};
1614 f193c797 bellard
1615 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1616 f193c797 bellard
{
1617 f193c797 bellard
    if (strlen(s2) != n)
1618 f193c797 bellard
        return 0;
1619 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1620 f193c797 bellard
}
1621 3b46e624 ths
1622 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1623 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1624 f193c797 bellard
{
1625 c7cd6a37 blueswir1
    const CPULogItem *item;
1626 f193c797 bellard
    int mask;
1627 f193c797 bellard
    const char *p, *p1;
1628 f193c797 bellard
1629 f193c797 bellard
    p = str;
1630 f193c797 bellard
    mask = 0;
1631 f193c797 bellard
    for(;;) {
1632 f193c797 bellard
        p1 = strchr(p, ',');
1633 f193c797 bellard
        if (!p1)
1634 f193c797 bellard
            p1 = p + strlen(p);
1635 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1636 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1637 8e3a9fd2 bellard
                        mask |= item->mask;
1638 8e3a9fd2 bellard
                }
1639 8e3a9fd2 bellard
        } else {
1640 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1641 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1642 f193c797 bellard
                goto found;
1643 f193c797 bellard
        }
1644 f193c797 bellard
        return 0;
1645 8e3a9fd2 bellard
        }
1646 f193c797 bellard
    found:
1647 f193c797 bellard
        mask |= item->mask;
1648 f193c797 bellard
        if (*p1 != ',')
1649 f193c797 bellard
            break;
1650 f193c797 bellard
        p = p1 + 1;
1651 f193c797 bellard
    }
1652 f193c797 bellard
    return mask;
1653 f193c797 bellard
}
1654 ea041c0e bellard
1655 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1656 7501267e bellard
{
1657 7501267e bellard
    va_list ap;
1658 493ae1f0 pbrook
    va_list ap2;
1659 7501267e bellard
1660 7501267e bellard
    va_start(ap, fmt);
1661 493ae1f0 pbrook
    va_copy(ap2, ap);
1662 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1663 7501267e bellard
    vfprintf(stderr, fmt, ap);
1664 7501267e bellard
    fprintf(stderr, "\n");
1665 7501267e bellard
#ifdef TARGET_I386
1666 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1667 7fe48483 bellard
#else
1668 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1669 7501267e bellard
#endif
1670 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1671 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1672 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1673 93fcfe39 aliguori
        qemu_log("\n");
1674 f9373291 j_mayer
#ifdef TARGET_I386
1675 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1676 f9373291 j_mayer
#else
1677 93fcfe39 aliguori
        log_cpu_state(env, 0);
1678 f9373291 j_mayer
#endif
1679 31b1a7b4 aliguori
        qemu_log_flush();
1680 93fcfe39 aliguori
        qemu_log_close();
1681 924edcae balrog
    }
1682 493ae1f0 pbrook
    va_end(ap2);
1683 f9373291 j_mayer
    va_end(ap);
1684 7501267e bellard
    abort();
1685 7501267e bellard
}
1686 7501267e bellard
1687 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1688 c5be9f08 ths
{
1689 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1690 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1691 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1692 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1693 5a38f081 aliguori
    CPUBreakpoint *bp;
1694 5a38f081 aliguori
    CPUWatchpoint *wp;
1695 5a38f081 aliguori
#endif
1696 5a38f081 aliguori
1697 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1698 5a38f081 aliguori
1699 5a38f081 aliguori
    /* Preserve chaining and index. */
1700 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1701 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1702 5a38f081 aliguori
1703 5a38f081 aliguori
    /* Clone all break/watchpoints.
1704 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1705 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1706 5a38f081 aliguori
    TAILQ_INIT(&env->breakpoints);
1707 5a38f081 aliguori
    TAILQ_INIT(&env->watchpoints);
1708 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1709 5a38f081 aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1710 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1711 5a38f081 aliguori
    }
1712 5a38f081 aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1713 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1714 5a38f081 aliguori
                              wp->flags, NULL);
1715 5a38f081 aliguori
    }
1716 5a38f081 aliguori
#endif
1717 5a38f081 aliguori
1718 c5be9f08 ths
    return new_env;
1719 c5be9f08 ths
}
1720 c5be9f08 ths
1721 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1722 0124311e bellard
1723 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1724 5c751e99 edgar_igl
{
1725 5c751e99 edgar_igl
    unsigned int i;
1726 5c751e99 edgar_igl
1727 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1728 5c751e99 edgar_igl
       overlap the flushed page.  */
1729 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1730 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1731 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1732 5c751e99 edgar_igl
1733 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1734 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1735 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1736 5c751e99 edgar_igl
}
1737 5c751e99 edgar_igl
1738 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1739 ee8b7021 bellard
   implemented yet) */
1740 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1741 33417e70 bellard
{
1742 33417e70 bellard
    int i;
1743 0124311e bellard
1744 9fa3e853 bellard
#if defined(DEBUG_TLB)
1745 9fa3e853 bellard
    printf("tlb_flush:\n");
1746 9fa3e853 bellard
#endif
1747 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1748 0124311e bellard
       links while we are modifying them */
1749 0124311e bellard
    env->current_tb = NULL;
1750 0124311e bellard
1751 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1752 84b7b8e7 bellard
        env->tlb_table[0][i].addr_read = -1;
1753 84b7b8e7 bellard
        env->tlb_table[0][i].addr_write = -1;
1754 84b7b8e7 bellard
        env->tlb_table[0][i].addr_code = -1;
1755 84b7b8e7 bellard
        env->tlb_table[1][i].addr_read = -1;
1756 84b7b8e7 bellard
        env->tlb_table[1][i].addr_write = -1;
1757 84b7b8e7 bellard
        env->tlb_table[1][i].addr_code = -1;
1758 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1759 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_read = -1;
1760 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_write = -1;
1761 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_code = -1;
1762 e37e6ee6 aurel32
#endif
1763 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1764 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_read = -1;
1765 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_write = -1;
1766 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_code = -1;
1767 6fa4cea9 j_mayer
#endif
1768 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1769 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_read = -1;
1770 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_write = -1;
1771 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_code = -1;
1772 6fa4cea9 j_mayer
#endif
1773 e37e6ee6 aurel32
1774 33417e70 bellard
    }
1775 9fa3e853 bellard
1776 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1777 9fa3e853 bellard
1778 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1779 0a962c02 bellard
    if (env->kqemu_enabled) {
1780 0a962c02 bellard
        kqemu_flush(env, flush_global);
1781 0a962c02 bellard
    }
1782 0a962c02 bellard
#endif
1783 e3db7226 bellard
    tlb_flush_count++;
1784 33417e70 bellard
}
1785 33417e70 bellard
1786 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1787 61382a50 bellard
{
1788 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1789 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1790 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1791 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1792 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1793 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1794 84b7b8e7 bellard
        tlb_entry->addr_read = -1;
1795 84b7b8e7 bellard
        tlb_entry->addr_write = -1;
1796 84b7b8e7 bellard
        tlb_entry->addr_code = -1;
1797 84b7b8e7 bellard
    }
1798 61382a50 bellard
}
1799 61382a50 bellard
1800 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1801 33417e70 bellard
{
1802 8a40a180 bellard
    int i;
1803 0124311e bellard
1804 9fa3e853 bellard
#if defined(DEBUG_TLB)
1805 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1806 9fa3e853 bellard
#endif
1807 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1808 0124311e bellard
       links while we are modifying them */
1809 0124311e bellard
    env->current_tb = NULL;
1810 61382a50 bellard
1811 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1812 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1813 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1814 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1815 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1816 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1817 e37e6ee6 aurel32
#endif
1818 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1819 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1820 6fa4cea9 j_mayer
#endif
1821 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1822 e37e6ee6 aurel32
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1823 6fa4cea9 j_mayer
#endif
1824 0124311e bellard
1825 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1826 9fa3e853 bellard
1827 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1828 0a962c02 bellard
    if (env->kqemu_enabled) {
1829 0a962c02 bellard
        kqemu_flush_page(env, addr);
1830 0a962c02 bellard
    }
1831 0a962c02 bellard
#endif
1832 9fa3e853 bellard
}
1833 9fa3e853 bellard
1834 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1835 9fa3e853 bellard
   can be detected */
1836 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1837 9fa3e853 bellard
{
1838 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1839 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1840 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1841 9fa3e853 bellard
}
1842 9fa3e853 bellard
1843 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1844 3a7d929e bellard
   tested for self modifying code */
1845 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1846 3a7d929e bellard
                                    target_ulong vaddr)
1847 9fa3e853 bellard
{
1848 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1849 1ccde1cb bellard
}
1850 1ccde1cb bellard
1851 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1852 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1853 1ccde1cb bellard
{
1854 1ccde1cb bellard
    unsigned long addr;
1855 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1856 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1857 1ccde1cb bellard
        if ((addr - start) < length) {
1858 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1859 1ccde1cb bellard
        }
1860 1ccde1cb bellard
    }
1861 1ccde1cb bellard
}
1862 1ccde1cb bellard
1863 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1864 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1865 0a962c02 bellard
                                     int dirty_flags)
1866 1ccde1cb bellard
{
1867 1ccde1cb bellard
    CPUState *env;
1868 4f2ac237 bellard
    unsigned long length, start1;
1869 0a962c02 bellard
    int i, mask, len;
1870 0a962c02 bellard
    uint8_t *p;
1871 1ccde1cb bellard
1872 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1873 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1874 1ccde1cb bellard
1875 1ccde1cb bellard
    length = end - start;
1876 1ccde1cb bellard
    if (length == 0)
1877 1ccde1cb bellard
        return;
1878 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1879 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1880 6a00d601 bellard
    /* XXX: should not depend on cpu context */
1881 6a00d601 bellard
    env = first_cpu;
1882 3a7d929e bellard
    if (env->kqemu_enabled) {
1883 f23db169 bellard
        ram_addr_t addr;
1884 f23db169 bellard
        addr = start;
1885 f23db169 bellard
        for(i = 0; i < len; i++) {
1886 f23db169 bellard
            kqemu_set_notdirty(env, addr);
1887 f23db169 bellard
            addr += TARGET_PAGE_SIZE;
1888 f23db169 bellard
        }
1889 3a7d929e bellard
    }
1890 3a7d929e bellard
#endif
1891 f23db169 bellard
    mask = ~dirty_flags;
1892 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1893 f23db169 bellard
    for(i = 0; i < len; i++)
1894 f23db169 bellard
        p[i] &= mask;
1895 f23db169 bellard
1896 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1897 1ccde1cb bellard
       when accessing the range */
1898 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1899 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1900 5579c7f3 pbrook
       address comparisons below.  */
1901 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1902 5579c7f3 pbrook
            != (end - 1) - start) {
1903 5579c7f3 pbrook
        abort();
1904 5579c7f3 pbrook
    }
1905 5579c7f3 pbrook
1906 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1907 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1908 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1909 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1911 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1912 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1913 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1914 e37e6ee6 aurel32
#endif
1915 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1916 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1917 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1918 6fa4cea9 j_mayer
#endif
1919 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1920 e37e6ee6 aurel32
        for(i = 0; i < CPU_TLB_SIZE; i++)
1921 e37e6ee6 aurel32
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1922 6fa4cea9 j_mayer
#endif
1923 6a00d601 bellard
    }
1924 1ccde1cb bellard
}
1925 1ccde1cb bellard
1926 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1927 74576198 aliguori
{
1928 74576198 aliguori
    in_migration = enable;
1929 b0a46a33 Jan Kiszka
    if (kvm_enabled()) {
1930 b0a46a33 Jan Kiszka
        return kvm_set_migration_log(enable);
1931 b0a46a33 Jan Kiszka
    }
1932 74576198 aliguori
    return 0;
1933 74576198 aliguori
}
1934 74576198 aliguori
1935 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1936 74576198 aliguori
{
1937 74576198 aliguori
    return in_migration;
1938 74576198 aliguori
}
1939 74576198 aliguori
1940 151f7749 Jan Kiszka
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1941 151f7749 Jan Kiszka
                                   target_phys_addr_t end_addr)
1942 2bec46dc aliguori
{
1943 151f7749 Jan Kiszka
    int ret = 0;
1944 151f7749 Jan Kiszka
1945 2bec46dc aliguori
    if (kvm_enabled())
1946 151f7749 Jan Kiszka
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1947 151f7749 Jan Kiszka
    return ret;
1948 2bec46dc aliguori
}
1949 2bec46dc aliguori
1950 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1951 3a7d929e bellard
{
1952 3a7d929e bellard
    ram_addr_t ram_addr;
1953 5579c7f3 pbrook
    void *p;
1954 3a7d929e bellard
1955 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1956 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1957 5579c7f3 pbrook
            + tlb_entry->addend);
1958 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1959 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1960 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1961 3a7d929e bellard
        }
1962 3a7d929e bellard
    }
1963 3a7d929e bellard
}
1964 3a7d929e bellard
1965 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1966 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1967 3a7d929e bellard
{
1968 3a7d929e bellard
    int i;
1969 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1970 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[0][i]);
1971 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1972 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[1][i]);
1973 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1974 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1975 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[2][i]);
1976 e37e6ee6 aurel32
#endif
1977 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1978 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1979 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[3][i]);
1980 6fa4cea9 j_mayer
#endif
1981 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1982 e37e6ee6 aurel32
    for(i = 0; i < CPU_TLB_SIZE; i++)
1983 e37e6ee6 aurel32
        tlb_update_dirty(&env->tlb_table[4][i]);
1984 6fa4cea9 j_mayer
#endif
1985 3a7d929e bellard
}
1986 3a7d929e bellard
1987 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1988 1ccde1cb bellard
{
1989 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1990 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1991 1ccde1cb bellard
}
1992 1ccde1cb bellard
1993 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1994 0f459d16 pbrook
   so that it is no longer dirty */
1995 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1996 1ccde1cb bellard
{
1997 1ccde1cb bellard
    int i;
1998 1ccde1cb bellard
1999 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2000 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2001 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2002 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2003 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
2004 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2005 e37e6ee6 aurel32
#endif
2006 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
2007 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2008 6fa4cea9 j_mayer
#endif
2009 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
2010 e37e6ee6 aurel32
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2011 6fa4cea9 j_mayer
#endif
2012 9fa3e853 bellard
}
2013 9fa3e853 bellard
2014 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
2015 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2016 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
2017 59817ccb bellard
   conflicting with the host address space). */
2018 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2019 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2020 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2021 9fa3e853 bellard
{
2022 92e873b9 bellard
    PhysPageDesc *p;
2023 4f2ac237 bellard
    unsigned long pd;
2024 9fa3e853 bellard
    unsigned int index;
2025 4f2ac237 bellard
    target_ulong address;
2026 0f459d16 pbrook
    target_ulong code_address;
2027 108c49b8 bellard
    target_phys_addr_t addend;
2028 9fa3e853 bellard
    int ret;
2029 84b7b8e7 bellard
    CPUTLBEntry *te;
2030 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2031 0f459d16 pbrook
    target_phys_addr_t iotlb;
2032 9fa3e853 bellard
2033 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2034 9fa3e853 bellard
    if (!p) {
2035 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2036 9fa3e853 bellard
    } else {
2037 9fa3e853 bellard
        pd = p->phys_offset;
2038 9fa3e853 bellard
    }
2039 9fa3e853 bellard
#if defined(DEBUG_TLB)
2040 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2041 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2042 9fa3e853 bellard
#endif
2043 9fa3e853 bellard
2044 9fa3e853 bellard
    ret = 0;
2045 0f459d16 pbrook
    address = vaddr;
2046 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2047 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2048 0f459d16 pbrook
        address |= TLB_MMIO;
2049 0f459d16 pbrook
    }
2050 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2051 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2052 0f459d16 pbrook
        /* Normal RAM.  */
2053 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2054 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2055 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2056 0f459d16 pbrook
        else
2057 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2058 0f459d16 pbrook
    } else {
2059 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2060 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2061 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2062 0f459d16 pbrook
           and avoid full address decoding in every device.
2063 0f459d16 pbrook
           We can't use the high bits of pd for this because
2064 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2065 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2066 8da3ff18 pbrook
        if (p) {
2067 8da3ff18 pbrook
            iotlb += p->region_offset;
2068 8da3ff18 pbrook
        } else {
2069 8da3ff18 pbrook
            iotlb += paddr;
2070 8da3ff18 pbrook
        }
2071 0f459d16 pbrook
    }
2072 0f459d16 pbrook
2073 0f459d16 pbrook
    code_address = address;
2074 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2075 0f459d16 pbrook
       watchpoint trap routines.  */
2076 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2077 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2078 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2079 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2080 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2081 0f459d16 pbrook
            address |= TLB_MMIO;
2082 6658ffb8 pbrook
        }
2083 0f459d16 pbrook
    }
2084 d79acba4 balrog
2085 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2086 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2087 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2088 0f459d16 pbrook
    te->addend = addend - vaddr;
2089 0f459d16 pbrook
    if (prot & PAGE_READ) {
2090 0f459d16 pbrook
        te->addr_read = address;
2091 0f459d16 pbrook
    } else {
2092 0f459d16 pbrook
        te->addr_read = -1;
2093 0f459d16 pbrook
    }
2094 5c751e99 edgar_igl
2095 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2096 0f459d16 pbrook
        te->addr_code = code_address;
2097 0f459d16 pbrook
    } else {
2098 0f459d16 pbrook
        te->addr_code = -1;
2099 0f459d16 pbrook
    }
2100 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2101 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2102 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2103 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2104 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2105 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2106 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2107 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2108 9fa3e853 bellard
        } else {
2109 0f459d16 pbrook
            te->addr_write = address;
2110 9fa3e853 bellard
        }
2111 0f459d16 pbrook
    } else {
2112 0f459d16 pbrook
        te->addr_write = -1;
2113 9fa3e853 bellard
    }
2114 9fa3e853 bellard
    return ret;
2115 9fa3e853 bellard
}
2116 9fa3e853 bellard
2117 0124311e bellard
#else
2118 0124311e bellard
2119 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2120 0124311e bellard
{
2121 0124311e bellard
}
2122 0124311e bellard
2123 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2124 0124311e bellard
{
2125 0124311e bellard
}
2126 0124311e bellard
2127 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2128 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2129 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2130 9fa3e853 bellard
{
2131 9fa3e853 bellard
    return 0;
2132 9fa3e853 bellard
}
2133 0124311e bellard
2134 9fa3e853 bellard
/* dump memory mappings */
2135 9fa3e853 bellard
void page_dump(FILE *f)
2136 33417e70 bellard
{
2137 9fa3e853 bellard
    unsigned long start, end;
2138 9fa3e853 bellard
    int i, j, prot, prot1;
2139 9fa3e853 bellard
    PageDesc *p;
2140 33417e70 bellard
2141 9fa3e853 bellard
    fprintf(f, "%-8s %-8s %-8s %s\n",
2142 9fa3e853 bellard
            "start", "end", "size", "prot");
2143 9fa3e853 bellard
    start = -1;
2144 9fa3e853 bellard
    end = -1;
2145 9fa3e853 bellard
    prot = 0;
2146 9fa3e853 bellard
    for(i = 0; i <= L1_SIZE; i++) {
2147 9fa3e853 bellard
        if (i < L1_SIZE)
2148 9fa3e853 bellard
            p = l1_map[i];
2149 9fa3e853 bellard
        else
2150 9fa3e853 bellard
            p = NULL;
2151 9fa3e853 bellard
        for(j = 0;j < L2_SIZE; j++) {
2152 9fa3e853 bellard
            if (!p)
2153 9fa3e853 bellard
                prot1 = 0;
2154 9fa3e853 bellard
            else
2155 9fa3e853 bellard
                prot1 = p[j].flags;
2156 9fa3e853 bellard
            if (prot1 != prot) {
2157 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2158 9fa3e853 bellard
                if (start != -1) {
2159 9fa3e853 bellard
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2160 5fafdf24 ths
                            start, end, end - start,
2161 9fa3e853 bellard
                            prot & PAGE_READ ? 'r' : '-',
2162 9fa3e853 bellard
                            prot & PAGE_WRITE ? 'w' : '-',
2163 9fa3e853 bellard
                            prot & PAGE_EXEC ? 'x' : '-');
2164 9fa3e853 bellard
                }
2165 9fa3e853 bellard
                if (prot1 != 0)
2166 9fa3e853 bellard
                    start = end;
2167 9fa3e853 bellard
                else
2168 9fa3e853 bellard
                    start = -1;
2169 9fa3e853 bellard
                prot = prot1;
2170 9fa3e853 bellard
            }
2171 9fa3e853 bellard
            if (!p)
2172 9fa3e853 bellard
                break;
2173 9fa3e853 bellard
        }
2174 33417e70 bellard
    }
2175 33417e70 bellard
}
2176 33417e70 bellard
2177 53a5960a pbrook
int page_get_flags(target_ulong address)
2178 33417e70 bellard
{
2179 9fa3e853 bellard
    PageDesc *p;
2180 9fa3e853 bellard
2181 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2182 33417e70 bellard
    if (!p)
2183 9fa3e853 bellard
        return 0;
2184 9fa3e853 bellard
    return p->flags;
2185 9fa3e853 bellard
}
2186 9fa3e853 bellard
2187 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2188 ccbb4d44 Stuart Brady
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2189 9fa3e853 bellard
   depending on PAGE_WRITE */
2190 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2191 9fa3e853 bellard
{
2192 9fa3e853 bellard
    PageDesc *p;
2193 53a5960a pbrook
    target_ulong addr;
2194 9fa3e853 bellard
2195 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2196 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2197 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2198 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2199 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2200 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2202 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2203 17e2377a pbrook
           address space.  */
2204 17e2377a pbrook
        if (!p)
2205 17e2377a pbrook
            return;
2206 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2207 9fa3e853 bellard
           inside */
2208 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2209 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2210 9fa3e853 bellard
            p->first_tb) {
2211 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2212 9fa3e853 bellard
        }
2213 9fa3e853 bellard
        p->flags = flags;
2214 9fa3e853 bellard
    }
2215 33417e70 bellard
}
2216 33417e70 bellard
2217 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2218 3d97b40b ths
{
2219 3d97b40b ths
    PageDesc *p;
2220 3d97b40b ths
    target_ulong end;
2221 3d97b40b ths
    target_ulong addr;
2222 3d97b40b ths
2223 55f280c9 balrog
    if (start + len < start)
2224 55f280c9 balrog
        /* we've wrapped around */
2225 55f280c9 balrog
        return -1;
2226 55f280c9 balrog
2227 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2228 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2229 3d97b40b ths
2230 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2231 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2232 3d97b40b ths
        if( !p )
2233 3d97b40b ths
            return -1;
2234 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2235 3d97b40b ths
            return -1;
2236 3d97b40b ths
2237 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2238 3d97b40b ths
            return -1;
2239 dae3270c bellard
        if (flags & PAGE_WRITE) {
2240 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2241 dae3270c bellard
                return -1;
2242 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2243 dae3270c bellard
               contains translated code */
2244 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2245 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2246 dae3270c bellard
                    return -1;
2247 dae3270c bellard
            }
2248 dae3270c bellard
            return 0;
2249 dae3270c bellard
        }
2250 3d97b40b ths
    }
2251 3d97b40b ths
    return 0;
2252 3d97b40b ths
}
2253 3d97b40b ths
2254 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2255 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2256 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2257 9fa3e853 bellard
{
2258 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2259 9fa3e853 bellard
    PageDesc *p, *p1;
2260 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2261 9fa3e853 bellard
2262 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2263 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2264 c8a706fe pbrook
       practice it seems to be ok.  */
2265 c8a706fe pbrook
    mmap_lock();
2266 c8a706fe pbrook
2267 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2268 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2269 9fa3e853 bellard
    p1 = page_find(page_index);
2270 c8a706fe pbrook
    if (!p1) {
2271 c8a706fe pbrook
        mmap_unlock();
2272 9fa3e853 bellard
        return 0;
2273 c8a706fe pbrook
    }
2274 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2275 9fa3e853 bellard
    p = p1;
2276 9fa3e853 bellard
    prot = 0;
2277 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2278 9fa3e853 bellard
        prot |= p->flags;
2279 9fa3e853 bellard
        p++;
2280 9fa3e853 bellard
    }
2281 9fa3e853 bellard
    /* if the page was really writable, then we change its
2282 9fa3e853 bellard
       protection back to writable */
2283 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2284 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2285 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2286 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2287 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2288 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2289 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2290 9fa3e853 bellard
               the corresponding translated code. */
2291 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2292 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2293 9fa3e853 bellard
            tb_invalidate_check(address);
2294 9fa3e853 bellard
#endif
2295 c8a706fe pbrook
            mmap_unlock();
2296 9fa3e853 bellard
            return 1;
2297 9fa3e853 bellard
        }
2298 9fa3e853 bellard
    }
2299 c8a706fe pbrook
    mmap_unlock();
2300 9fa3e853 bellard
    return 0;
2301 9fa3e853 bellard
}
2302 9fa3e853 bellard
2303 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2304 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2305 1ccde1cb bellard
{
2306 1ccde1cb bellard
}
2307 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2308 9fa3e853 bellard
2309 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2310 8da3ff18 pbrook
2311 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2312 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2313 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2314 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2315 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2316 db7b5426 blueswir1
                      need_subpage)                                     \
2317 db7b5426 blueswir1
    do {                                                                \
2318 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2319 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2320 db7b5426 blueswir1
        else {                                                          \
2321 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2322 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2323 db7b5426 blueswir1
                need_subpage = 1;                                       \
2324 db7b5426 blueswir1
        }                                                               \
2325 db7b5426 blueswir1
                                                                        \
2326 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2327 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2328 db7b5426 blueswir1
        else {                                                          \
2329 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2330 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2331 db7b5426 blueswir1
                need_subpage = 1;                                       \
2332 db7b5426 blueswir1
        }                                                               \
2333 db7b5426 blueswir1
    } while (0)
2334 db7b5426 blueswir1
2335 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2336 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2337 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2338 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2339 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2340 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2341 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2342 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2343 8da3ff18 pbrook
                                         ram_addr_t size,
2344 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2345 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2346 33417e70 bellard
{
2347 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2348 92e873b9 bellard
    PhysPageDesc *p;
2349 9d42037b bellard
    CPUState *env;
2350 00f82b8a aurel32
    ram_addr_t orig_size = size;
2351 db7b5426 blueswir1
    void *subpage;
2352 33417e70 bellard
2353 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2354 da260249 bellard
    /* XXX: should not depend on cpu context */
2355 da260249 bellard
    env = first_cpu;
2356 da260249 bellard
    if (env->kqemu_enabled) {
2357 da260249 bellard
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2358 da260249 bellard
    }
2359 da260249 bellard
#endif
2360 7ba1e619 aliguori
    if (kvm_enabled())
2361 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2362 7ba1e619 aliguori
2363 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2364 67c4d23c pbrook
        region_offset = start_addr;
2365 67c4d23c pbrook
    }
2366 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2367 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2368 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2369 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2370 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2371 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2372 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2373 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2374 db7b5426 blueswir1
            int need_subpage = 0;
2375 db7b5426 blueswir1
2376 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2377 db7b5426 blueswir1
                          need_subpage);
2378 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2379 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2380 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2381 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2382 8da3ff18 pbrook
                                           p->region_offset);
2383 db7b5426 blueswir1
                } else {
2384 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2385 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2386 db7b5426 blueswir1
                }
2387 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2388 8da3ff18 pbrook
                                 region_offset);
2389 8da3ff18 pbrook
                p->region_offset = 0;
2390 db7b5426 blueswir1
            } else {
2391 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2392 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2393 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2394 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2395 db7b5426 blueswir1
            }
2396 db7b5426 blueswir1
        } else {
2397 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2398 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2399 8da3ff18 pbrook
            p->region_offset = region_offset;
2400 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2401 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2402 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2403 0e8f0967 pbrook
            } else {
2404 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2405 db7b5426 blueswir1
                int need_subpage = 0;
2406 db7b5426 blueswir1
2407 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2408 db7b5426 blueswir1
                              end_addr2, need_subpage);
2409 db7b5426 blueswir1
2410 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2411 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2412 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2413 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2414 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2415 8da3ff18 pbrook
                                     phys_offset, region_offset);
2416 8da3ff18 pbrook
                    p->region_offset = 0;
2417 db7b5426 blueswir1
                }
2418 db7b5426 blueswir1
            }
2419 db7b5426 blueswir1
        }
2420 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2421 33417e70 bellard
    }
2422 3b46e624 ths
2423 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2424 9d42037b bellard
       reset the modified entries */
2425 9d42037b bellard
    /* XXX: slow ! */
2426 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2427 9d42037b bellard
        tlb_flush(env, 1);
2428 9d42037b bellard
    }
2429 33417e70 bellard
}
2430 33417e70 bellard
2431 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2432 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2433 ba863458 bellard
{
2434 ba863458 bellard
    PhysPageDesc *p;
2435 ba863458 bellard
2436 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2437 ba863458 bellard
    if (!p)
2438 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2439 ba863458 bellard
    return p->phys_offset;
2440 ba863458 bellard
}
2441 ba863458 bellard
2442 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2443 f65ed4c1 aliguori
{
2444 f65ed4c1 aliguori
    if (kvm_enabled())
2445 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2446 f65ed4c1 aliguori
}
2447 f65ed4c1 aliguori
2448 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2449 f65ed4c1 aliguori
{
2450 f65ed4c1 aliguori
    if (kvm_enabled())
2451 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2452 f65ed4c1 aliguori
}
2453 f65ed4c1 aliguori
2454 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2455 e9a1ab19 bellard
/* XXX: better than nothing */
2456 94a6b54f pbrook
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2457 e9a1ab19 bellard
{
2458 e9a1ab19 bellard
    ram_addr_t addr;
2459 94a6b54f pbrook
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2460 012a7045 ths
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2461 94a6b54f pbrook
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2462 e9a1ab19 bellard
        abort();
2463 e9a1ab19 bellard
    }
2464 94a6b54f pbrook
    addr = last_ram_offset;
2465 94a6b54f pbrook
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2466 e9a1ab19 bellard
    return addr;
2467 e9a1ab19 bellard
}
2468 94a6b54f pbrook
#endif
2469 94a6b54f pbrook
2470 94a6b54f pbrook
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2471 94a6b54f pbrook
{
2472 94a6b54f pbrook
    RAMBlock *new_block;
2473 94a6b54f pbrook
2474 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2475 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2476 94a6b54f pbrook
        return kqemu_ram_alloc(size);
2477 94a6b54f pbrook
    }
2478 94a6b54f pbrook
#endif
2479 94a6b54f pbrook
2480 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2481 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2482 94a6b54f pbrook
2483 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2484 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2485 94a6b54f pbrook
    new_block->length = size;
2486 94a6b54f pbrook
2487 94a6b54f pbrook
    new_block->next = ram_blocks;
2488 94a6b54f pbrook
    ram_blocks = new_block;
2489 94a6b54f pbrook
2490 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2491 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2492 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2493 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2494 94a6b54f pbrook
2495 94a6b54f pbrook
    last_ram_offset += size;
2496 94a6b54f pbrook
2497 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2498 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2499 6f0437e8 Jan Kiszka
2500 94a6b54f pbrook
    return new_block->offset;
2501 94a6b54f pbrook
}
2502 e9a1ab19 bellard
2503 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2504 e9a1ab19 bellard
{
2505 94a6b54f pbrook
    /* TODO: implement this.  */
2506 e9a1ab19 bellard
}
2507 e9a1ab19 bellard
2508 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2509 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2510 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2511 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2512 5579c7f3 pbrook

2513 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2514 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2515 5579c7f3 pbrook
 */
2516 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2517 dc828ca1 pbrook
{
2518 94a6b54f pbrook
    RAMBlock *prev;
2519 94a6b54f pbrook
    RAMBlock **prevp;
2520 94a6b54f pbrook
    RAMBlock *block;
2521 94a6b54f pbrook
2522 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2523 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2524 94a6b54f pbrook
        return kqemu_phys_ram_base + addr;
2525 94a6b54f pbrook
    }
2526 94a6b54f pbrook
#endif
2527 94a6b54f pbrook
2528 94a6b54f pbrook
    prev = NULL;
2529 94a6b54f pbrook
    prevp = &ram_blocks;
2530 94a6b54f pbrook
    block = ram_blocks;
2531 94a6b54f pbrook
    while (block && (block->offset > addr
2532 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2533 94a6b54f pbrook
        if (prev)
2534 94a6b54f pbrook
          prevp = &prev->next;
2535 94a6b54f pbrook
        prev = block;
2536 94a6b54f pbrook
        block = block->next;
2537 94a6b54f pbrook
    }
2538 94a6b54f pbrook
    if (!block) {
2539 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2540 94a6b54f pbrook
        abort();
2541 94a6b54f pbrook
    }
2542 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2543 94a6b54f pbrook
    if (prev) {
2544 94a6b54f pbrook
        prev->next = block->next;
2545 94a6b54f pbrook
        block->next = *prevp;
2546 94a6b54f pbrook
        *prevp = block;
2547 94a6b54f pbrook
    }
2548 94a6b54f pbrook
    return block->host + (addr - block->offset);
2549 dc828ca1 pbrook
}
2550 dc828ca1 pbrook
2551 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2552 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2553 5579c7f3 pbrook
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2554 5579c7f3 pbrook
{
2555 94a6b54f pbrook
    RAMBlock *prev;
2556 94a6b54f pbrook
    RAMBlock **prevp;
2557 94a6b54f pbrook
    RAMBlock *block;
2558 94a6b54f pbrook
    uint8_t *host = ptr;
2559 94a6b54f pbrook
2560 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2561 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2562 94a6b54f pbrook
        return host - kqemu_phys_ram_base;
2563 94a6b54f pbrook
    }
2564 94a6b54f pbrook
#endif
2565 94a6b54f pbrook
2566 94a6b54f pbrook
    prev = NULL;
2567 94a6b54f pbrook
    prevp = &ram_blocks;
2568 94a6b54f pbrook
    block = ram_blocks;
2569 94a6b54f pbrook
    while (block && (block->host > host
2570 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2571 94a6b54f pbrook
        if (prev)
2572 94a6b54f pbrook
          prevp = &prev->next;
2573 94a6b54f pbrook
        prev = block;
2574 94a6b54f pbrook
        block = block->next;
2575 94a6b54f pbrook
    }
2576 94a6b54f pbrook
    if (!block) {
2577 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2578 94a6b54f pbrook
        abort();
2579 94a6b54f pbrook
    }
2580 94a6b54f pbrook
    return block->offset + (host - block->host);
2581 5579c7f3 pbrook
}
2582 5579c7f3 pbrook
2583 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2584 33417e70 bellard
{
2585 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2586 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2587 67d3b957 pbrook
#endif
2588 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2589 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2590 e18231a3 blueswir1
#endif
2591 e18231a3 blueswir1
    return 0;
2592 e18231a3 blueswir1
}
2593 e18231a3 blueswir1
2594 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2595 e18231a3 blueswir1
{
2596 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2597 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598 e18231a3 blueswir1
#endif
2599 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2600 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2601 e18231a3 blueswir1
#endif
2602 e18231a3 blueswir1
    return 0;
2603 e18231a3 blueswir1
}
2604 e18231a3 blueswir1
2605 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2606 e18231a3 blueswir1
{
2607 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2608 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2609 e18231a3 blueswir1
#endif
2610 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2611 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2612 b4f0a316 blueswir1
#endif
2613 33417e70 bellard
    return 0;
2614 33417e70 bellard
}
2615 33417e70 bellard
2616 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2617 33417e70 bellard
{
2618 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2619 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2620 67d3b957 pbrook
#endif
2621 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2622 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2623 e18231a3 blueswir1
#endif
2624 e18231a3 blueswir1
}
2625 e18231a3 blueswir1
2626 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2627 e18231a3 blueswir1
{
2628 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2629 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2630 e18231a3 blueswir1
#endif
2631 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2632 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2633 e18231a3 blueswir1
#endif
2634 e18231a3 blueswir1
}
2635 e18231a3 blueswir1
2636 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2637 e18231a3 blueswir1
{
2638 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2639 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2640 e18231a3 blueswir1
#endif
2641 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2642 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2643 b4f0a316 blueswir1
#endif
2644 33417e70 bellard
}
2645 33417e70 bellard
2646 33417e70 bellard
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2647 33417e70 bellard
    unassigned_mem_readb,
2648 e18231a3 blueswir1
    unassigned_mem_readw,
2649 e18231a3 blueswir1
    unassigned_mem_readl,
2650 33417e70 bellard
};
2651 33417e70 bellard
2652 33417e70 bellard
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2653 33417e70 bellard
    unassigned_mem_writeb,
2654 e18231a3 blueswir1
    unassigned_mem_writew,
2655 e18231a3 blueswir1
    unassigned_mem_writel,
2656 33417e70 bellard
};
2657 33417e70 bellard
2658 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2659 0f459d16 pbrook
                                uint32_t val)
2660 9fa3e853 bellard
{
2661 3a7d929e bellard
    int dirty_flags;
2662 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2664 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2665 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2666 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2667 9fa3e853 bellard
#endif
2668 3a7d929e bellard
    }
2669 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2670 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2671 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2672 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2673 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2674 f32fc648 bellard
#endif
2675 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2676 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2677 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2678 f23db169 bellard
       flushed */
2679 f23db169 bellard
    if (dirty_flags == 0xff)
2680 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2681 9fa3e853 bellard
}
2682 9fa3e853 bellard
2683 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2684 0f459d16 pbrook
                                uint32_t val)
2685 9fa3e853 bellard
{
2686 3a7d929e bellard
    int dirty_flags;
2687 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2689 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2690 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2691 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2692 9fa3e853 bellard
#endif
2693 3a7d929e bellard
    }
2694 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2695 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2696 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2697 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2698 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2699 f32fc648 bellard
#endif
2700 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2701 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2702 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2703 f23db169 bellard
       flushed */
2704 f23db169 bellard
    if (dirty_flags == 0xff)
2705 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2706 9fa3e853 bellard
}
2707 9fa3e853 bellard
2708 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2709 0f459d16 pbrook
                                uint32_t val)
2710 9fa3e853 bellard
{
2711 3a7d929e bellard
    int dirty_flags;
2712 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2713 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2714 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2715 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2716 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2717 9fa3e853 bellard
#endif
2718 3a7d929e bellard
    }
2719 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2720 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2721 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2722 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2723 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2724 f32fc648 bellard
#endif
2725 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2726 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2727 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2728 f23db169 bellard
       flushed */
2729 f23db169 bellard
    if (dirty_flags == 0xff)
2730 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2731 9fa3e853 bellard
}
2732 9fa3e853 bellard
2733 3a7d929e bellard
static CPUReadMemoryFunc *error_mem_read[3] = {
2734 9fa3e853 bellard
    NULL, /* never used */
2735 9fa3e853 bellard
    NULL, /* never used */
2736 9fa3e853 bellard
    NULL, /* never used */
2737 9fa3e853 bellard
};
2738 9fa3e853 bellard
2739 1ccde1cb bellard
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2740 1ccde1cb bellard
    notdirty_mem_writeb,
2741 1ccde1cb bellard
    notdirty_mem_writew,
2742 1ccde1cb bellard
    notdirty_mem_writel,
2743 1ccde1cb bellard
};
2744 1ccde1cb bellard
2745 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2746 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2747 0f459d16 pbrook
{
2748 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2749 06d55cc1 aliguori
    target_ulong pc, cs_base;
2750 06d55cc1 aliguori
    TranslationBlock *tb;
2751 0f459d16 pbrook
    target_ulong vaddr;
2752 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2753 06d55cc1 aliguori
    int cpu_flags;
2754 0f459d16 pbrook
2755 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2756 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2757 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2758 06d55cc1 aliguori
         * current instruction. */
2759 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2760 06d55cc1 aliguori
        return;
2761 06d55cc1 aliguori
    }
2762 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2763 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2764 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2765 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2766 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2767 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2768 6e140f28 aliguori
                env->watchpoint_hit = wp;
2769 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2770 6e140f28 aliguori
                if (!tb) {
2771 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2772 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2773 6e140f28 aliguori
                }
2774 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2775 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2776 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2777 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2778 6e140f28 aliguori
                } else {
2779 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2780 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2781 6e140f28 aliguori
                }
2782 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2783 06d55cc1 aliguori
            }
2784 6e140f28 aliguori
        } else {
2785 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2786 0f459d16 pbrook
        }
2787 0f459d16 pbrook
    }
2788 0f459d16 pbrook
}
2789 0f459d16 pbrook
2790 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2791 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2792 6658ffb8 pbrook
   phys routines.  */
2793 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2794 6658ffb8 pbrook
{
2795 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2796 6658ffb8 pbrook
    return ldub_phys(addr);
2797 6658ffb8 pbrook
}
2798 6658ffb8 pbrook
2799 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2800 6658ffb8 pbrook
{
2801 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2802 6658ffb8 pbrook
    return lduw_phys(addr);
2803 6658ffb8 pbrook
}
2804 6658ffb8 pbrook
2805 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2806 6658ffb8 pbrook
{
2807 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2808 6658ffb8 pbrook
    return ldl_phys(addr);
2809 6658ffb8 pbrook
}
2810 6658ffb8 pbrook
2811 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2812 6658ffb8 pbrook
                             uint32_t val)
2813 6658ffb8 pbrook
{
2814 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2815 6658ffb8 pbrook
    stb_phys(addr, val);
2816 6658ffb8 pbrook
}
2817 6658ffb8 pbrook
2818 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2819 6658ffb8 pbrook
                             uint32_t val)
2820 6658ffb8 pbrook
{
2821 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2822 6658ffb8 pbrook
    stw_phys(addr, val);
2823 6658ffb8 pbrook
}
2824 6658ffb8 pbrook
2825 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2826 6658ffb8 pbrook
                             uint32_t val)
2827 6658ffb8 pbrook
{
2828 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2829 6658ffb8 pbrook
    stl_phys(addr, val);
2830 6658ffb8 pbrook
}
2831 6658ffb8 pbrook
2832 6658ffb8 pbrook
static CPUReadMemoryFunc *watch_mem_read[3] = {
2833 6658ffb8 pbrook
    watch_mem_readb,
2834 6658ffb8 pbrook
    watch_mem_readw,
2835 6658ffb8 pbrook
    watch_mem_readl,
2836 6658ffb8 pbrook
};
2837 6658ffb8 pbrook
2838 6658ffb8 pbrook
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2839 6658ffb8 pbrook
    watch_mem_writeb,
2840 6658ffb8 pbrook
    watch_mem_writew,
2841 6658ffb8 pbrook
    watch_mem_writel,
2842 6658ffb8 pbrook
};
2843 6658ffb8 pbrook
2844 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2845 db7b5426 blueswir1
                                 unsigned int len)
2846 db7b5426 blueswir1
{
2847 db7b5426 blueswir1
    uint32_t ret;
2848 db7b5426 blueswir1
    unsigned int idx;
2849 db7b5426 blueswir1
2850 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2851 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2852 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2853 db7b5426 blueswir1
           mmio, len, addr, idx);
2854 db7b5426 blueswir1
#endif
2855 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2856 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2857 db7b5426 blueswir1
2858 db7b5426 blueswir1
    return ret;
2859 db7b5426 blueswir1
}
2860 db7b5426 blueswir1
2861 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2862 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2863 db7b5426 blueswir1
{
2864 db7b5426 blueswir1
    unsigned int idx;
2865 db7b5426 blueswir1
2866 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2867 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2868 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2869 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2870 db7b5426 blueswir1
#endif
2871 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2872 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2873 8da3ff18 pbrook
                                  value);
2874 db7b5426 blueswir1
}
2875 db7b5426 blueswir1
2876 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2877 db7b5426 blueswir1
{
2878 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2879 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2880 db7b5426 blueswir1
#endif
2881 db7b5426 blueswir1
2882 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2883 db7b5426 blueswir1
}
2884 db7b5426 blueswir1
2885 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2886 db7b5426 blueswir1
                            uint32_t value)
2887 db7b5426 blueswir1
{
2888 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2889 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2890 db7b5426 blueswir1
#endif
2891 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2892 db7b5426 blueswir1
}
2893 db7b5426 blueswir1
2894 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2895 db7b5426 blueswir1
{
2896 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2897 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2898 db7b5426 blueswir1
#endif
2899 db7b5426 blueswir1
2900 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2901 db7b5426 blueswir1
}
2902 db7b5426 blueswir1
2903 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2904 db7b5426 blueswir1
                            uint32_t value)
2905 db7b5426 blueswir1
{
2906 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2907 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908 db7b5426 blueswir1
#endif
2909 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2910 db7b5426 blueswir1
}
2911 db7b5426 blueswir1
2912 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2913 db7b5426 blueswir1
{
2914 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2915 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2916 db7b5426 blueswir1
#endif
2917 db7b5426 blueswir1
2918 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2919 db7b5426 blueswir1
}
2920 db7b5426 blueswir1
2921 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2922 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2923 db7b5426 blueswir1
{
2924 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2925 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926 db7b5426 blueswir1
#endif
2927 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2928 db7b5426 blueswir1
}
2929 db7b5426 blueswir1
2930 db7b5426 blueswir1
static CPUReadMemoryFunc *subpage_read[] = {
2931 db7b5426 blueswir1
    &subpage_readb,
2932 db7b5426 blueswir1
    &subpage_readw,
2933 db7b5426 blueswir1
    &subpage_readl,
2934 db7b5426 blueswir1
};
2935 db7b5426 blueswir1
2936 db7b5426 blueswir1
static CPUWriteMemoryFunc *subpage_write[] = {
2937 db7b5426 blueswir1
    &subpage_writeb,
2938 db7b5426 blueswir1
    &subpage_writew,
2939 db7b5426 blueswir1
    &subpage_writel,
2940 db7b5426 blueswir1
};
2941 db7b5426 blueswir1
2942 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2943 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2944 db7b5426 blueswir1
{
2945 db7b5426 blueswir1
    int idx, eidx;
2946 4254fab8 blueswir1
    unsigned int i;
2947 db7b5426 blueswir1
2948 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2949 db7b5426 blueswir1
        return -1;
2950 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2951 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2952 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2953 db7b5426 blueswir1
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2954 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2955 db7b5426 blueswir1
#endif
2956 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2957 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2958 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2959 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2960 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2961 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2962 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2963 3ee89922 blueswir1
            }
2964 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2965 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2966 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2967 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2968 3ee89922 blueswir1
            }
2969 4254fab8 blueswir1
        }
2970 db7b5426 blueswir1
    }
2971 db7b5426 blueswir1
2972 db7b5426 blueswir1
    return 0;
2973 db7b5426 blueswir1
}
2974 db7b5426 blueswir1
2975 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2976 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2977 db7b5426 blueswir1
{
2978 db7b5426 blueswir1
    subpage_t *mmio;
2979 db7b5426 blueswir1
    int subpage_memory;
2980 db7b5426 blueswir1
2981 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2982 1eec614b aliguori
2983 1eec614b aliguori
    mmio->base = base;
2984 1eec614b aliguori
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2985 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2986 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2987 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2988 db7b5426 blueswir1
#endif
2989 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2990 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2991 8da3ff18 pbrook
                         region_offset);
2992 db7b5426 blueswir1
2993 db7b5426 blueswir1
    return mmio;
2994 db7b5426 blueswir1
}
2995 db7b5426 blueswir1
2996 88715657 aliguori
static int get_free_io_mem_idx(void)
2997 88715657 aliguori
{
2998 88715657 aliguori
    int i;
2999 88715657 aliguori
3000 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3001 88715657 aliguori
        if (!io_mem_used[i]) {
3002 88715657 aliguori
            io_mem_used[i] = 1;
3003 88715657 aliguori
            return i;
3004 88715657 aliguori
        }
3005 88715657 aliguori
3006 88715657 aliguori
    return -1;
3007 88715657 aliguori
}
3008 88715657 aliguori
3009 33417e70 bellard
static void io_mem_init(void)
3010 33417e70 bellard
{
3011 88715657 aliguori
    int i;
3012 88715657 aliguori
3013 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3014 a4193c8a bellard
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3015 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3016 88715657 aliguori
    for (i=0; i<5; i++)
3017 88715657 aliguori
        io_mem_used[i] = 1;
3018 1ccde1cb bellard
3019 0f459d16 pbrook
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3020 6658ffb8 pbrook
                                          watch_mem_write, NULL);
3021 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
3022 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
3023 94a6b54f pbrook
        /* alloc dirty bits array */
3024 94a6b54f pbrook
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3025 94a6b54f pbrook
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3026 94a6b54f pbrook
    }
3027 94a6b54f pbrook
#endif
3028 33417e70 bellard
}
3029 33417e70 bellard
3030 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3031 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3032 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3033 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3034 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3035 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3036 4254fab8 blueswir1
   returned if error. */
3037 33417e70 bellard
int cpu_register_io_memory(int io_index,
3038 33417e70 bellard
                           CPUReadMemoryFunc **mem_read,
3039 a4193c8a bellard
                           CPUWriteMemoryFunc **mem_write,
3040 a4193c8a bellard
                           void *opaque)
3041 33417e70 bellard
{
3042 4254fab8 blueswir1
    int i, subwidth = 0;
3043 33417e70 bellard
3044 33417e70 bellard
    if (io_index <= 0) {
3045 88715657 aliguori
        io_index = get_free_io_mem_idx();
3046 88715657 aliguori
        if (io_index == -1)
3047 88715657 aliguori
            return io_index;
3048 33417e70 bellard
    } else {
3049 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3050 33417e70 bellard
            return -1;
3051 33417e70 bellard
    }
3052 b5ff1b31 bellard
3053 33417e70 bellard
    for(i = 0;i < 3; i++) {
3054 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3055 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3056 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3057 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3058 33417e70 bellard
    }
3059 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3060 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3061 33417e70 bellard
}
3062 61382a50 bellard
3063 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3064 88715657 aliguori
{
3065 88715657 aliguori
    int i;
3066 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3067 88715657 aliguori
3068 88715657 aliguori
    for (i=0;i < 3; i++) {
3069 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3070 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3071 88715657 aliguori
    }
3072 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3073 88715657 aliguori
    io_mem_used[io_index] = 0;
3074 88715657 aliguori
}
3075 88715657 aliguori
3076 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3077 e2eef170 pbrook
3078 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3079 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3080 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3081 13eb76e0 bellard
                            int len, int is_write)
3082 13eb76e0 bellard
{
3083 13eb76e0 bellard
    int l, flags;
3084 13eb76e0 bellard
    target_ulong page;
3085 53a5960a pbrook
    void * p;
3086 13eb76e0 bellard
3087 13eb76e0 bellard
    while (len > 0) {
3088 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3089 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3090 13eb76e0 bellard
        if (l > len)
3091 13eb76e0 bellard
            l = len;
3092 13eb76e0 bellard
        flags = page_get_flags(page);
3093 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3094 13eb76e0 bellard
            return;
3095 13eb76e0 bellard
        if (is_write) {
3096 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3097 13eb76e0 bellard
                return;
3098 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3099 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3100 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3101 579a97f7 bellard
                return;
3102 72fb7daa aurel32
            memcpy(p, buf, l);
3103 72fb7daa aurel32
            unlock_user(p, addr, l);
3104 13eb76e0 bellard
        } else {
3105 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3106 13eb76e0 bellard
                return;
3107 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3108 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3109 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3110 579a97f7 bellard
                return;
3111 72fb7daa aurel32
            memcpy(buf, p, l);
3112 5b257578 aurel32
            unlock_user(p, addr, 0);
3113 13eb76e0 bellard
        }
3114 13eb76e0 bellard
        len -= l;
3115 13eb76e0 bellard
        buf += l;
3116 13eb76e0 bellard
        addr += l;
3117 13eb76e0 bellard
    }
3118 13eb76e0 bellard
}
3119 8df1cd07 bellard
3120 13eb76e0 bellard
#else
3121 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3122 13eb76e0 bellard
                            int len, int is_write)
3123 13eb76e0 bellard
{
3124 13eb76e0 bellard
    int l, io_index;
3125 13eb76e0 bellard
    uint8_t *ptr;
3126 13eb76e0 bellard
    uint32_t val;
3127 2e12669a bellard
    target_phys_addr_t page;
3128 2e12669a bellard
    unsigned long pd;
3129 92e873b9 bellard
    PhysPageDesc *p;
3130 3b46e624 ths
3131 13eb76e0 bellard
    while (len > 0) {
3132 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3133 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3134 13eb76e0 bellard
        if (l > len)
3135 13eb76e0 bellard
            l = len;
3136 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3137 13eb76e0 bellard
        if (!p) {
3138 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3139 13eb76e0 bellard
        } else {
3140 13eb76e0 bellard
            pd = p->phys_offset;
3141 13eb76e0 bellard
        }
3142 3b46e624 ths
3143 13eb76e0 bellard
        if (is_write) {
3144 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3145 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3146 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3147 8da3ff18 pbrook
                if (p)
3148 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3149 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3150 6a00d601 bellard
                   potential bugs */
3151 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3152 1c213d19 bellard
                    /* 32 bit write access */
3153 c27004ec bellard
                    val = ldl_p(buf);
3154 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3155 13eb76e0 bellard
                    l = 4;
3156 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3157 1c213d19 bellard
                    /* 16 bit write access */
3158 c27004ec bellard
                    val = lduw_p(buf);
3159 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3160 13eb76e0 bellard
                    l = 2;
3161 13eb76e0 bellard
                } else {
3162 1c213d19 bellard
                    /* 8 bit write access */
3163 c27004ec bellard
                    val = ldub_p(buf);
3164 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3165 13eb76e0 bellard
                    l = 1;
3166 13eb76e0 bellard
                }
3167 13eb76e0 bellard
            } else {
3168 b448f2f3 bellard
                unsigned long addr1;
3169 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3170 13eb76e0 bellard
                /* RAM case */
3171 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3172 13eb76e0 bellard
                memcpy(ptr, buf, l);
3173 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3174 3a7d929e bellard
                    /* invalidate code */
3175 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3176 3a7d929e bellard
                    /* set dirty bit */
3177 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3178 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3179 3a7d929e bellard
                }
3180 13eb76e0 bellard
            }
3181 13eb76e0 bellard
        } else {
3182 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3183 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3184 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3185 13eb76e0 bellard
                /* I/O case */
3186 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3187 8da3ff18 pbrook
                if (p)
3188 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3189 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3190 13eb76e0 bellard
                    /* 32 bit read access */
3191 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3192 c27004ec bellard
                    stl_p(buf, val);
3193 13eb76e0 bellard
                    l = 4;
3194 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3195 13eb76e0 bellard
                    /* 16 bit read access */
3196 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3197 c27004ec bellard
                    stw_p(buf, val);
3198 13eb76e0 bellard
                    l = 2;
3199 13eb76e0 bellard
                } else {
3200 1c213d19 bellard
                    /* 8 bit read access */
3201 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3202 c27004ec bellard
                    stb_p(buf, val);
3203 13eb76e0 bellard
                    l = 1;
3204 13eb76e0 bellard
                }
3205 13eb76e0 bellard
            } else {
3206 13eb76e0 bellard
                /* RAM case */
3207 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3208 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3209 13eb76e0 bellard
                memcpy(buf, ptr, l);
3210 13eb76e0 bellard
            }
3211 13eb76e0 bellard
        }
3212 13eb76e0 bellard
        len -= l;
3213 13eb76e0 bellard
        buf += l;
3214 13eb76e0 bellard
        addr += l;
3215 13eb76e0 bellard
    }
3216 13eb76e0 bellard
}
3217 8df1cd07 bellard
3218 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3219 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3220 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3221 d0ecd2aa bellard
{
3222 d0ecd2aa bellard
    int l;
3223 d0ecd2aa bellard
    uint8_t *ptr;
3224 d0ecd2aa bellard
    target_phys_addr_t page;
3225 d0ecd2aa bellard
    unsigned long pd;
3226 d0ecd2aa bellard
    PhysPageDesc *p;
3227 3b46e624 ths
3228 d0ecd2aa bellard
    while (len > 0) {
3229 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3230 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3231 d0ecd2aa bellard
        if (l > len)
3232 d0ecd2aa bellard
            l = len;
3233 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3234 d0ecd2aa bellard
        if (!p) {
3235 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3236 d0ecd2aa bellard
        } else {
3237 d0ecd2aa bellard
            pd = p->phys_offset;
3238 d0ecd2aa bellard
        }
3239 3b46e624 ths
3240 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3241 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3242 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3243 d0ecd2aa bellard
            /* do nothing */
3244 d0ecd2aa bellard
        } else {
3245 d0ecd2aa bellard
            unsigned long addr1;
3246 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3247 d0ecd2aa bellard
            /* ROM/RAM case */
3248 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3249 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3250 d0ecd2aa bellard
        }
3251 d0ecd2aa bellard
        len -= l;
3252 d0ecd2aa bellard
        buf += l;
3253 d0ecd2aa bellard
        addr += l;
3254 d0ecd2aa bellard
    }
3255 d0ecd2aa bellard
}
3256 d0ecd2aa bellard
3257 6d16c2f8 aliguori
typedef struct {
3258 6d16c2f8 aliguori
    void *buffer;
3259 6d16c2f8 aliguori
    target_phys_addr_t addr;
3260 6d16c2f8 aliguori
    target_phys_addr_t len;
3261 6d16c2f8 aliguori
} BounceBuffer;
3262 6d16c2f8 aliguori
3263 6d16c2f8 aliguori
static BounceBuffer bounce;
3264 6d16c2f8 aliguori
3265 ba223c29 aliguori
typedef struct MapClient {
3266 ba223c29 aliguori
    void *opaque;
3267 ba223c29 aliguori
    void (*callback)(void *opaque);
3268 ba223c29 aliguori
    LIST_ENTRY(MapClient) link;
3269 ba223c29 aliguori
} MapClient;
3270 ba223c29 aliguori
3271 ba223c29 aliguori
static LIST_HEAD(map_client_list, MapClient) map_client_list
3272 ba223c29 aliguori
    = LIST_HEAD_INITIALIZER(map_client_list);
3273 ba223c29 aliguori
3274 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3275 ba223c29 aliguori
{
3276 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3277 ba223c29 aliguori
3278 ba223c29 aliguori
    client->opaque = opaque;
3279 ba223c29 aliguori
    client->callback = callback;
3280 ba223c29 aliguori
    LIST_INSERT_HEAD(&map_client_list, client, link);
3281 ba223c29 aliguori
    return client;
3282 ba223c29 aliguori
}
3283 ba223c29 aliguori
3284 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3285 ba223c29 aliguori
{
3286 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3287 ba223c29 aliguori
3288 ba223c29 aliguori
    LIST_REMOVE(client, link);
3289 ba223c29 aliguori
}
3290 ba223c29 aliguori
3291 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3292 ba223c29 aliguori
{
3293 ba223c29 aliguori
    MapClient *client;
3294 ba223c29 aliguori
3295 ba223c29 aliguori
    while (!LIST_EMPTY(&map_client_list)) {
3296 ba223c29 aliguori
        client = LIST_FIRST(&map_client_list);
3297 ba223c29 aliguori
        client->callback(client->opaque);
3298 ba223c29 aliguori
        LIST_REMOVE(client, link);
3299 ba223c29 aliguori
    }
3300 ba223c29 aliguori
}
3301 ba223c29 aliguori
3302 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3303 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3304 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3305 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3306 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3307 ba223c29 aliguori
 * likely to succeed.
3308 6d16c2f8 aliguori
 */
3309 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3310 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3311 6d16c2f8 aliguori
                              int is_write)
3312 6d16c2f8 aliguori
{
3313 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3314 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3315 6d16c2f8 aliguori
    int l;
3316 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3317 6d16c2f8 aliguori
    uint8_t *ptr;
3318 6d16c2f8 aliguori
    target_phys_addr_t page;
3319 6d16c2f8 aliguori
    unsigned long pd;
3320 6d16c2f8 aliguori
    PhysPageDesc *p;
3321 6d16c2f8 aliguori
    unsigned long addr1;
3322 6d16c2f8 aliguori
3323 6d16c2f8 aliguori
    while (len > 0) {
3324 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3325 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3326 6d16c2f8 aliguori
        if (l > len)
3327 6d16c2f8 aliguori
            l = len;
3328 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3329 6d16c2f8 aliguori
        if (!p) {
3330 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3331 6d16c2f8 aliguori
        } else {
3332 6d16c2f8 aliguori
            pd = p->phys_offset;
3333 6d16c2f8 aliguori
        }
3334 6d16c2f8 aliguori
3335 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3336 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3337 6d16c2f8 aliguori
                break;
3338 6d16c2f8 aliguori
            }
3339 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3340 6d16c2f8 aliguori
            bounce.addr = addr;
3341 6d16c2f8 aliguori
            bounce.len = l;
3342 6d16c2f8 aliguori
            if (!is_write) {
3343 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3344 6d16c2f8 aliguori
            }
3345 6d16c2f8 aliguori
            ptr = bounce.buffer;
3346 6d16c2f8 aliguori
        } else {
3347 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3348 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3349 6d16c2f8 aliguori
        }
3350 6d16c2f8 aliguori
        if (!done) {
3351 6d16c2f8 aliguori
            ret = ptr;
3352 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3353 6d16c2f8 aliguori
            break;
3354 6d16c2f8 aliguori
        }
3355 6d16c2f8 aliguori
3356 6d16c2f8 aliguori
        len -= l;
3357 6d16c2f8 aliguori
        addr += l;
3358 6d16c2f8 aliguori
        done += l;
3359 6d16c2f8 aliguori
    }
3360 6d16c2f8 aliguori
    *plen = done;
3361 6d16c2f8 aliguori
    return ret;
3362 6d16c2f8 aliguori
}
3363 6d16c2f8 aliguori
3364 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3365 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3366 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3367 6d16c2f8 aliguori
 */
3368 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3369 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3370 6d16c2f8 aliguori
{
3371 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3372 6d16c2f8 aliguori
        if (is_write) {
3373 5579c7f3 pbrook
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3374 6d16c2f8 aliguori
            while (access_len) {
3375 6d16c2f8 aliguori
                unsigned l;
3376 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3377 6d16c2f8 aliguori
                if (l > access_len)
3378 6d16c2f8 aliguori
                    l = access_len;
3379 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3380 6d16c2f8 aliguori
                    /* invalidate code */
3381 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3382 6d16c2f8 aliguori
                    /* set dirty bit */
3383 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3384 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3385 6d16c2f8 aliguori
                }
3386 6d16c2f8 aliguori
                addr1 += l;
3387 6d16c2f8 aliguori
                access_len -= l;
3388 6d16c2f8 aliguori
            }
3389 6d16c2f8 aliguori
        }
3390 6d16c2f8 aliguori
        return;
3391 6d16c2f8 aliguori
    }
3392 6d16c2f8 aliguori
    if (is_write) {
3393 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3394 6d16c2f8 aliguori
    }
3395 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3396 6d16c2f8 aliguori
    bounce.buffer = NULL;
3397 ba223c29 aliguori
    cpu_notify_map_clients();
3398 6d16c2f8 aliguori
}
3399 d0ecd2aa bellard
3400 8df1cd07 bellard
/* warning: addr must be aligned */
3401 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3402 8df1cd07 bellard
{
3403 8df1cd07 bellard
    int io_index;
3404 8df1cd07 bellard
    uint8_t *ptr;
3405 8df1cd07 bellard
    uint32_t val;
3406 8df1cd07 bellard
    unsigned long pd;
3407 8df1cd07 bellard
    PhysPageDesc *p;
3408 8df1cd07 bellard
3409 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3410 8df1cd07 bellard
    if (!p) {
3411 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3412 8df1cd07 bellard
    } else {
3413 8df1cd07 bellard
        pd = p->phys_offset;
3414 8df1cd07 bellard
    }
3415 3b46e624 ths
3416 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3417 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3418 8df1cd07 bellard
        /* I/O case */
3419 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3420 8da3ff18 pbrook
        if (p)
3421 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3422 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3423 8df1cd07 bellard
    } else {
3424 8df1cd07 bellard
        /* RAM case */
3425 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3426 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3427 8df1cd07 bellard
        val = ldl_p(ptr);
3428 8df1cd07 bellard
    }
3429 8df1cd07 bellard
    return val;
3430 8df1cd07 bellard
}
3431 8df1cd07 bellard
3432 84b7b8e7 bellard
/* warning: addr must be aligned */
3433 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3434 84b7b8e7 bellard
{
3435 84b7b8e7 bellard
    int io_index;
3436 84b7b8e7 bellard
    uint8_t *ptr;
3437 84b7b8e7 bellard
    uint64_t val;
3438 84b7b8e7 bellard
    unsigned long pd;
3439 84b7b8e7 bellard
    PhysPageDesc *p;
3440 84b7b8e7 bellard
3441 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3442 84b7b8e7 bellard
    if (!p) {
3443 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3444 84b7b8e7 bellard
    } else {
3445 84b7b8e7 bellard
        pd = p->phys_offset;
3446 84b7b8e7 bellard
    }
3447 3b46e624 ths
3448 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3449 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3450 84b7b8e7 bellard
        /* I/O case */
3451 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3452 8da3ff18 pbrook
        if (p)
3453 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3454 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3455 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3456 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3457 84b7b8e7 bellard
#else
3458 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3459 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3460 84b7b8e7 bellard
#endif
3461 84b7b8e7 bellard
    } else {
3462 84b7b8e7 bellard
        /* RAM case */
3463 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3464 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3465 84b7b8e7 bellard
        val = ldq_p(ptr);
3466 84b7b8e7 bellard
    }
3467 84b7b8e7 bellard
    return val;
3468 84b7b8e7 bellard
}
3469 84b7b8e7 bellard
3470 aab33094 bellard
/* XXX: optimize */
3471 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3472 aab33094 bellard
{
3473 aab33094 bellard
    uint8_t val;
3474 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3475 aab33094 bellard
    return val;
3476 aab33094 bellard
}
3477 aab33094 bellard
3478 aab33094 bellard
/* XXX: optimize */
3479 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3480 aab33094 bellard
{
3481 aab33094 bellard
    uint16_t val;
3482 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3483 aab33094 bellard
    return tswap16(val);
3484 aab33094 bellard
}
3485 aab33094 bellard
3486 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3487 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3488 8df1cd07 bellard
   bits are used to track modified PTEs */
3489 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3490 8df1cd07 bellard
{
3491 8df1cd07 bellard
    int io_index;
3492 8df1cd07 bellard
    uint8_t *ptr;
3493 8df1cd07 bellard
    unsigned long pd;
3494 8df1cd07 bellard
    PhysPageDesc *p;
3495 8df1cd07 bellard
3496 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3497 8df1cd07 bellard
    if (!p) {
3498 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3499 8df1cd07 bellard
    } else {
3500 8df1cd07 bellard
        pd = p->phys_offset;
3501 8df1cd07 bellard
    }
3502 3b46e624 ths
3503 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3504 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3505 8da3ff18 pbrook
        if (p)
3506 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3507 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3508 8df1cd07 bellard
    } else {
3509 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3510 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3511 8df1cd07 bellard
        stl_p(ptr, val);
3512 74576198 aliguori
3513 74576198 aliguori
        if (unlikely(in_migration)) {
3514 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3515 74576198 aliguori
                /* invalidate code */
3516 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3517 74576198 aliguori
                /* set dirty bit */
3518 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3519 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3520 74576198 aliguori
            }
3521 74576198 aliguori
        }
3522 8df1cd07 bellard
    }
3523 8df1cd07 bellard
}
3524 8df1cd07 bellard
3525 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3526 bc98a7ef j_mayer
{
3527 bc98a7ef j_mayer
    int io_index;
3528 bc98a7ef j_mayer
    uint8_t *ptr;
3529 bc98a7ef j_mayer
    unsigned long pd;
3530 bc98a7ef j_mayer
    PhysPageDesc *p;
3531 bc98a7ef j_mayer
3532 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 bc98a7ef j_mayer
    if (!p) {
3534 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3535 bc98a7ef j_mayer
    } else {
3536 bc98a7ef j_mayer
        pd = p->phys_offset;
3537 bc98a7ef j_mayer
    }
3538 3b46e624 ths
3539 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3540 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3541 8da3ff18 pbrook
        if (p)
3542 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3543 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3544 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3545 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3546 bc98a7ef j_mayer
#else
3547 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3548 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3549 bc98a7ef j_mayer
#endif
3550 bc98a7ef j_mayer
    } else {
3551 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3552 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3553 bc98a7ef j_mayer
        stq_p(ptr, val);
3554 bc98a7ef j_mayer
    }
3555 bc98a7ef j_mayer
}
3556 bc98a7ef j_mayer
3557 8df1cd07 bellard
/* warning: addr must be aligned */
3558 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3559 8df1cd07 bellard
{
3560 8df1cd07 bellard
    int io_index;
3561 8df1cd07 bellard
    uint8_t *ptr;
3562 8df1cd07 bellard
    unsigned long pd;
3563 8df1cd07 bellard
    PhysPageDesc *p;
3564 8df1cd07 bellard
3565 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3566 8df1cd07 bellard
    if (!p) {
3567 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3568 8df1cd07 bellard
    } else {
3569 8df1cd07 bellard
        pd = p->phys_offset;
3570 8df1cd07 bellard
    }
3571 3b46e624 ths
3572 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3573 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3574 8da3ff18 pbrook
        if (p)
3575 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3576 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3577 8df1cd07 bellard
    } else {
3578 8df1cd07 bellard
        unsigned long addr1;
3579 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3580 8df1cd07 bellard
        /* RAM case */
3581 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3582 8df1cd07 bellard
        stl_p(ptr, val);
3583 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3584 3a7d929e bellard
            /* invalidate code */
3585 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3586 3a7d929e bellard
            /* set dirty bit */
3587 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3588 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3589 3a7d929e bellard
        }
3590 8df1cd07 bellard
    }
3591 8df1cd07 bellard
}
3592 8df1cd07 bellard
3593 aab33094 bellard
/* XXX: optimize */
3594 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3595 aab33094 bellard
{
3596 aab33094 bellard
    uint8_t v = val;
3597 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3598 aab33094 bellard
}
3599 aab33094 bellard
3600 aab33094 bellard
/* XXX: optimize */
3601 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3602 aab33094 bellard
{
3603 aab33094 bellard
    uint16_t v = tswap16(val);
3604 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3605 aab33094 bellard
}
3606 aab33094 bellard
3607 aab33094 bellard
/* XXX: optimize */
3608 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3609 aab33094 bellard
{
3610 aab33094 bellard
    val = tswap64(val);
3611 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3612 aab33094 bellard
}
3613 aab33094 bellard
3614 13eb76e0 bellard
#endif
3615 13eb76e0 bellard
3616 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3617 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3618 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3619 13eb76e0 bellard
{
3620 13eb76e0 bellard
    int l;
3621 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3622 9b3c35e0 j_mayer
    target_ulong page;
3623 13eb76e0 bellard
3624 13eb76e0 bellard
    while (len > 0) {
3625 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3626 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3627 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3628 13eb76e0 bellard
        if (phys_addr == -1)
3629 13eb76e0 bellard
            return -1;
3630 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3631 13eb76e0 bellard
        if (l > len)
3632 13eb76e0 bellard
            l = len;
3633 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3634 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3635 5e2972fd aliguori
        if (is_write)
3636 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3637 5e2972fd aliguori
        else
3638 5e2972fd aliguori
#endif
3639 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3640 13eb76e0 bellard
        len -= l;
3641 13eb76e0 bellard
        buf += l;
3642 13eb76e0 bellard
        addr += l;
3643 13eb76e0 bellard
    }
3644 13eb76e0 bellard
    return 0;
3645 13eb76e0 bellard
}
3646 13eb76e0 bellard
3647 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3648 2e70f6ef pbrook
   must be at the end of the TB */
3649 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3650 2e70f6ef pbrook
{
3651 2e70f6ef pbrook
    TranslationBlock *tb;
3652 2e70f6ef pbrook
    uint32_t n, cflags;
3653 2e70f6ef pbrook
    target_ulong pc, cs_base;
3654 2e70f6ef pbrook
    uint64_t flags;
3655 2e70f6ef pbrook
3656 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3657 2e70f6ef pbrook
    if (!tb) {
3658 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3659 2e70f6ef pbrook
                  retaddr);
3660 2e70f6ef pbrook
    }
3661 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3662 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3663 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3664 bf20dc07 ths
       occurred.  */
3665 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3666 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3667 2e70f6ef pbrook
    n++;
3668 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3669 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3670 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3671 2e70f6ef pbrook
       branch.  */
3672 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3673 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3674 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3675 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3676 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3677 2e70f6ef pbrook
    }
3678 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3679 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3680 2e70f6ef pbrook
            && n > 1) {
3681 2e70f6ef pbrook
        env->pc -= 2;
3682 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3683 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3684 2e70f6ef pbrook
    }
3685 2e70f6ef pbrook
#endif
3686 2e70f6ef pbrook
    /* This should never happen.  */
3687 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3688 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3689 2e70f6ef pbrook
3690 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3691 2e70f6ef pbrook
    pc = tb->pc;
3692 2e70f6ef pbrook
    cs_base = tb->cs_base;
3693 2e70f6ef pbrook
    flags = tb->flags;
3694 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3695 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3696 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3697 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3698 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3699 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3700 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3701 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3702 2e70f6ef pbrook
       second new TB.  */
3703 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3704 2e70f6ef pbrook
}
3705 2e70f6ef pbrook
3706 e3db7226 bellard
void dump_exec_info(FILE *f,
3707 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3708 e3db7226 bellard
{
3709 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3710 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3711 e3db7226 bellard
    TranslationBlock *tb;
3712 3b46e624 ths
3713 e3db7226 bellard
    target_code_size = 0;
3714 e3db7226 bellard
    max_target_code_size = 0;
3715 e3db7226 bellard
    cross_page = 0;
3716 e3db7226 bellard
    direct_jmp_count = 0;
3717 e3db7226 bellard
    direct_jmp2_count = 0;
3718 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3719 e3db7226 bellard
        tb = &tbs[i];
3720 e3db7226 bellard
        target_code_size += tb->size;
3721 e3db7226 bellard
        if (tb->size > max_target_code_size)
3722 e3db7226 bellard
            max_target_code_size = tb->size;
3723 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3724 e3db7226 bellard
            cross_page++;
3725 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3726 e3db7226 bellard
            direct_jmp_count++;
3727 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3728 e3db7226 bellard
                direct_jmp2_count++;
3729 e3db7226 bellard
            }
3730 e3db7226 bellard
        }
3731 e3db7226 bellard
    }
3732 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3733 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3734 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3735 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3736 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3737 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3738 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3739 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3740 e3db7226 bellard
                max_target_code_size);
3741 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3742 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3743 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3744 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3745 5fafdf24 ths
            cross_page,
3746 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3747 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3748 5fafdf24 ths
                direct_jmp_count,
3749 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3750 e3db7226 bellard
                direct_jmp2_count,
3751 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3752 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3753 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3754 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3755 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3756 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3757 e3db7226 bellard
}
3758 e3db7226 bellard
3759 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3760 61382a50 bellard
3761 61382a50 bellard
#define MMUSUFFIX _cmmu
3762 61382a50 bellard
#define GETPC() NULL
3763 61382a50 bellard
#define env cpu_single_env
3764 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3765 61382a50 bellard
3766 61382a50 bellard
#define SHIFT 0
3767 61382a50 bellard
#include "softmmu_template.h"
3768 61382a50 bellard
3769 61382a50 bellard
#define SHIFT 1
3770 61382a50 bellard
#include "softmmu_template.h"
3771 61382a50 bellard
3772 61382a50 bellard
#define SHIFT 2
3773 61382a50 bellard
#include "softmmu_template.h"
3774 61382a50 bellard
3775 61382a50 bellard
#define SHIFT 3
3776 61382a50 bellard
#include "softmmu_template.h"
3777 61382a50 bellard
3778 61382a50 bellard
#undef env
3779 61382a50 bellard
3780 61382a50 bellard
#endif