Statistics
| Branch: | Revision:

root / exec.c @ f8a83245

History | View | Annotate | Download (110 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 53a5960a pbrook
#endif
44 54936004 bellard
45 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
46 66e85a21 bellard
//#define DEBUG_FLUSH
47 9fa3e853 bellard
//#define DEBUG_TLB
48 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
49 fd6ce8f6 bellard
50 fd6ce8f6 bellard
/* make various TB consistency checks */
51 5fafdf24 ths
//#define DEBUG_TB_CHECK
52 5fafdf24 ths
//#define DEBUG_TLB_CHECK
53 fd6ce8f6 bellard
54 1196be37 ths
//#define DEBUG_IOPORT
55 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
56 1196be37 ths
57 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
58 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
59 99773bd4 pbrook
#undef DEBUG_TB_CHECK
60 99773bd4 pbrook
#endif
61 99773bd4 pbrook
62 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
63 9fa3e853 bellard
64 108c49b8 bellard
#if defined(TARGET_SPARC64)
65 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
67 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
69 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71 108c49b8 bellard
#elif defined(TARGET_PPC64)
72 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73 4a1418e0 Anthony Liguori
#elif defined(TARGET_X86_64)
74 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75 4a1418e0 Anthony Liguori
#elif defined(TARGET_I386)
76 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77 108c49b8 bellard
#else
78 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79 108c49b8 bellard
#endif
80 108c49b8 bellard
81 bdaf78e0 blueswir1
static TranslationBlock *tbs;
82 26a5f13b bellard
int code_gen_max_blocks;
83 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 bdaf78e0 blueswir1
static int nb_tbs;
85 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
86 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 fd6ce8f6 bellard
88 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
89 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
91 d03d860b blueswir1
 section close to code segment. */
92 d03d860b blueswir1
#define code_gen_section                                \
93 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
94 d03d860b blueswir1
    __attribute__((aligned (32)))
95 f8e2af11 Stefan Weil
#elif defined(_WIN32)
96 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 94a6b54f pbrook
typedef struct RAMBlock {
117 94a6b54f pbrook
    uint8_t *host;
118 c227f099 Anthony Liguori
    ram_addr_t offset;
119 c227f099 Anthony Liguori
    ram_addr_t length;
120 94a6b54f pbrook
    struct RAMBlock *next;
121 94a6b54f pbrook
} RAMBlock;
122 94a6b54f pbrook
123 94a6b54f pbrook
static RAMBlock *ram_blocks;
124 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
126 94a6b54f pbrook
   of this variable will break.  */
127 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
128 e2eef170 pbrook
#endif
129 9fa3e853 bellard
130 6a00d601 bellard
CPUState *first_cpu;
131 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
132 6a00d601 bellard
   cpu_exec() */
133 5fafdf24 ths
CPUState *cpu_single_env;
134 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
135 bf20dc07 ths
   1 = Precise instruction counting.
136 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
137 2e70f6ef pbrook
int use_icount = 0;
138 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
139 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
140 2e70f6ef pbrook
int64_t qemu_icount;
141 6a00d601 bellard
142 54936004 bellard
typedef struct PageDesc {
143 92e873b9 bellard
    /* list of TBs intersecting this ram page */
144 fd6ce8f6 bellard
    TranslationBlock *first_tb;
145 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
146 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
147 9fa3e853 bellard
    unsigned int code_write_count;
148 9fa3e853 bellard
    uint8_t *code_bitmap;
149 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
150 9fa3e853 bellard
    unsigned long flags;
151 9fa3e853 bellard
#endif
152 54936004 bellard
} PageDesc;
153 54936004 bellard
154 92e873b9 bellard
typedef struct PhysPageDesc {
155 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
156 c227f099 Anthony Liguori
    ram_addr_t phys_offset;
157 c227f099 Anthony Liguori
    ram_addr_t region_offset;
158 92e873b9 bellard
} PhysPageDesc;
159 92e873b9 bellard
160 54936004 bellard
#define L2_BITS 10
161 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
163 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
164 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
165 bedb69ea j_mayer
 */
166 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 bedb69ea j_mayer
#else
168 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 bedb69ea j_mayer
#endif
170 54936004 bellard
171 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
172 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
173 54936004 bellard
174 83fb7adf bellard
unsigned long qemu_real_host_page_size;
175 83fb7adf bellard
unsigned long qemu_host_page_bits;
176 83fb7adf bellard
unsigned long qemu_host_page_size;
177 83fb7adf bellard
unsigned long qemu_host_page_mask;
178 54936004 bellard
179 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
180 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
181 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
182 54936004 bellard
183 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
184 e2eef170 pbrook
static void io_mem_init(void);
185 e2eef170 pbrook
186 33417e70 bellard
/* io memory support */
187 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
191 6658ffb8 pbrook
static int io_mem_watch;
192 6658ffb8 pbrook
#endif
193 33417e70 bellard
194 34865134 bellard
/* log support */
195 1e8b27ca Juha Riihimรคki
#ifdef WIN32
196 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
197 1e8b27ca Juha Riihimรคki
#else
198 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
199 1e8b27ca Juha Riihimรคki
#endif
200 34865134 bellard
FILE *logfile;
201 34865134 bellard
int loglevel;
202 e735b91c pbrook
static int log_append = 0;
203 34865134 bellard
204 e3db7226 bellard
/* statistics */
205 e3db7226 bellard
static int tlb_flush_count;
206 e3db7226 bellard
static int tb_flush_count;
207 e3db7226 bellard
static int tb_phys_invalidate_count;
208 e3db7226 bellard
209 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
210 c227f099 Anthony Liguori
typedef struct subpage_t {
211 c227f099 Anthony Liguori
    target_phys_addr_t base;
212 d60efc6b Blue Swirl
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
213 d60efc6b Blue Swirl
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
214 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
215 c227f099 Anthony Liguori
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
216 c227f099 Anthony Liguori
} subpage_t;
217 db7b5426 blueswir1
218 7cb69cae bellard
#ifdef _WIN32
219 7cb69cae bellard
static void map_exec(void *addr, long size)
220 7cb69cae bellard
{
221 7cb69cae bellard
    DWORD old_protect;
222 7cb69cae bellard
    VirtualProtect(addr, size,
223 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
224 7cb69cae bellard
    
225 7cb69cae bellard
}
226 7cb69cae bellard
#else
227 7cb69cae bellard
static void map_exec(void *addr, long size)
228 7cb69cae bellard
{
229 4369415f bellard
    unsigned long start, end, page_size;
230 7cb69cae bellard
    
231 4369415f bellard
    page_size = getpagesize();
232 7cb69cae bellard
    start = (unsigned long)addr;
233 4369415f bellard
    start &= ~(page_size - 1);
234 7cb69cae bellard
    
235 7cb69cae bellard
    end = (unsigned long)addr + size;
236 4369415f bellard
    end += page_size - 1;
237 4369415f bellard
    end &= ~(page_size - 1);
238 7cb69cae bellard
    
239 7cb69cae bellard
    mprotect((void *)start, end - start,
240 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
241 7cb69cae bellard
}
242 7cb69cae bellard
#endif
243 7cb69cae bellard
244 b346ff46 bellard
static void page_init(void)
245 54936004 bellard
{
246 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
247 54936004 bellard
       TARGET_PAGE_SIZE */
248 c2b48b69 aliguori
#ifdef _WIN32
249 c2b48b69 aliguori
    {
250 c2b48b69 aliguori
        SYSTEM_INFO system_info;
251 c2b48b69 aliguori
252 c2b48b69 aliguori
        GetSystemInfo(&system_info);
253 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
254 c2b48b69 aliguori
    }
255 c2b48b69 aliguori
#else
256 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
257 c2b48b69 aliguori
#endif
258 83fb7adf bellard
    if (qemu_host_page_size == 0)
259 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
260 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
261 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
262 83fb7adf bellard
    qemu_host_page_bits = 0;
263 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
264 83fb7adf bellard
        qemu_host_page_bits++;
265 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
266 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
267 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
268 50a9569b balrog
269 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
270 50a9569b balrog
    {
271 50a9569b balrog
        long long startaddr, endaddr;
272 50a9569b balrog
        FILE *f;
273 50a9569b balrog
        int n;
274 50a9569b balrog
275 c8a706fe pbrook
        mmap_lock();
276 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
277 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
278 50a9569b balrog
        if (f) {
279 50a9569b balrog
            do {
280 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
281 50a9569b balrog
                if (n == 2) {
282 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
283 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
285 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
286 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
287 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
288 50a9569b balrog
                                   PAGE_RESERVED); 
289 50a9569b balrog
                }
290 50a9569b balrog
            } while (!feof(f));
291 50a9569b balrog
            fclose(f);
292 50a9569b balrog
        }
293 c8a706fe pbrook
        mmap_unlock();
294 50a9569b balrog
    }
295 50a9569b balrog
#endif
296 54936004 bellard
}
297 54936004 bellard
298 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
299 54936004 bellard
{
300 17e2377a pbrook
#if TARGET_LONG_BITS > 32
301 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
302 17e2377a pbrook
       excluded high addresses.  */
303 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
304 17e2377a pbrook
        return NULL;
305 17e2377a pbrook
#endif
306 434929bf aliguori
    return &l1_map[index >> L2_BITS];
307 434929bf aliguori
}
308 434929bf aliguori
309 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
310 434929bf aliguori
{
311 434929bf aliguori
    PageDesc **lp, *p;
312 434929bf aliguori
    lp = page_l1_map(index);
313 434929bf aliguori
    if (!lp)
314 434929bf aliguori
        return NULL;
315 434929bf aliguori
316 54936004 bellard
    p = *lp;
317 54936004 bellard
    if (!p) {
318 54936004 bellard
        /* allocate if not found */
319 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
320 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
321 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
322 660f11be Blue Swirl
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
323 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
324 54936004 bellard
        *lp = p;
325 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
326 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
327 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
328 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
329 17e2377a pbrook
                           PAGE_RESERVED); 
330 17e2377a pbrook
        }
331 17e2377a pbrook
#else
332 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
333 17e2377a pbrook
        *lp = p;
334 17e2377a pbrook
#endif
335 54936004 bellard
    }
336 54936004 bellard
    return p + (index & (L2_SIZE - 1));
337 54936004 bellard
}
338 54936004 bellard
339 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
340 54936004 bellard
{
341 434929bf aliguori
    PageDesc **lp, *p;
342 434929bf aliguori
    lp = page_l1_map(index);
343 434929bf aliguori
    if (!lp)
344 434929bf aliguori
        return NULL;
345 54936004 bellard
346 434929bf aliguori
    p = *lp;
347 660f11be Blue Swirl
    if (!p) {
348 660f11be Blue Swirl
        return NULL;
349 660f11be Blue Swirl
    }
350 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
351 fd6ce8f6 bellard
}
352 fd6ce8f6 bellard
353 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
354 92e873b9 bellard
{
355 108c49b8 bellard
    void **lp, **p;
356 e3f4e2a4 pbrook
    PhysPageDesc *pd;
357 92e873b9 bellard
358 108c49b8 bellard
    p = (void **)l1_phys_map;
359 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
360 108c49b8 bellard
361 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
362 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
363 108c49b8 bellard
#endif
364 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
365 92e873b9 bellard
    p = *lp;
366 92e873b9 bellard
    if (!p) {
367 92e873b9 bellard
        /* allocate if not found */
368 108c49b8 bellard
        if (!alloc)
369 108c49b8 bellard
            return NULL;
370 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
371 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
372 108c49b8 bellard
        *lp = p;
373 108c49b8 bellard
    }
374 108c49b8 bellard
#endif
375 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
376 e3f4e2a4 pbrook
    pd = *lp;
377 e3f4e2a4 pbrook
    if (!pd) {
378 e3f4e2a4 pbrook
        int i;
379 108c49b8 bellard
        /* allocate if not found */
380 108c49b8 bellard
        if (!alloc)
381 108c49b8 bellard
            return NULL;
382 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
383 e3f4e2a4 pbrook
        *lp = pd;
384 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
385 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
386 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
387 67c4d23c pbrook
        }
388 92e873b9 bellard
    }
389 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
390 92e873b9 bellard
}
391 92e873b9 bellard
392 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
393 92e873b9 bellard
{
394 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
395 92e873b9 bellard
}
396 92e873b9 bellard
397 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
398 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
399 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
400 3a7d929e bellard
                                    target_ulong vaddr);
401 c8a706fe pbrook
#define mmap_lock() do { } while(0)
402 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
403 9fa3e853 bellard
#endif
404 fd6ce8f6 bellard
405 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
406 4369415f bellard
407 4369415f bellard
#if defined(CONFIG_USER_ONLY)
408 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
409 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
410 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
411 4369415f bellard
#endif
412 4369415f bellard
413 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
414 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
415 4369415f bellard
#endif
416 4369415f bellard
417 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
418 26a5f13b bellard
{
419 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
420 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
421 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
423 4369415f bellard
#else
424 26a5f13b bellard
    code_gen_buffer_size = tb_size;
425 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
426 4369415f bellard
#if defined(CONFIG_USER_ONLY)
427 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
428 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
429 4369415f bellard
#else
430 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
431 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
432 4369415f bellard
#endif
433 26a5f13b bellard
    }
434 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
435 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
436 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
437 26a5f13b bellard
       the host cpu and OS */
438 26a5f13b bellard
#if defined(__linux__) 
439 26a5f13b bellard
    {
440 26a5f13b bellard
        int flags;
441 141ac468 blueswir1
        void *start = NULL;
442 141ac468 blueswir1
443 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
444 26a5f13b bellard
#if defined(__x86_64__)
445 26a5f13b bellard
        flags |= MAP_32BIT;
446 26a5f13b bellard
        /* Cannot map more than that */
447 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
448 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
449 141ac468 blueswir1
#elif defined(__sparc_v9__)
450 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
451 141ac468 blueswir1
        flags |= MAP_FIXED;
452 141ac468 blueswir1
        start = (void *) 0x60000000UL;
453 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
454 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
455 1cb0661e balrog
#elif defined(__arm__)
456 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
457 1cb0661e balrog
        flags |= MAP_FIXED;
458 1cb0661e balrog
        start = (void *) 0x01000000UL;
459 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
460 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
461 26a5f13b bellard
#endif
462 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
463 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
464 26a5f13b bellard
                               flags, -1, 0);
465 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
466 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 26a5f13b bellard
            exit(1);
468 26a5f13b bellard
        }
469 26a5f13b bellard
    }
470 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
471 06e67a82 aliguori
    {
472 06e67a82 aliguori
        int flags;
473 06e67a82 aliguori
        void *addr = NULL;
474 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
475 06e67a82 aliguori
#if defined(__x86_64__)
476 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
477 06e67a82 aliguori
         * 0x40000000 is free */
478 06e67a82 aliguori
        flags |= MAP_FIXED;
479 06e67a82 aliguori
        addr = (void *)0x40000000;
480 06e67a82 aliguori
        /* Cannot map more than that */
481 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
482 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
483 06e67a82 aliguori
#endif
484 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
485 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
486 06e67a82 aliguori
                               flags, -1, 0);
487 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
488 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
489 06e67a82 aliguori
            exit(1);
490 06e67a82 aliguori
        }
491 06e67a82 aliguori
    }
492 26a5f13b bellard
#else
493 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
494 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
495 26a5f13b bellard
#endif
496 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
497 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
498 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
499 26a5f13b bellard
        code_gen_max_block_size();
500 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
501 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
502 26a5f13b bellard
}
503 26a5f13b bellard
504 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
505 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
506 26a5f13b bellard
   size. */
507 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
508 26a5f13b bellard
{
509 26a5f13b bellard
    cpu_gen_init();
510 26a5f13b bellard
    code_gen_alloc(tb_size);
511 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
512 4369415f bellard
    page_init();
513 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
514 26a5f13b bellard
    io_mem_init();
515 e2eef170 pbrook
#endif
516 26a5f13b bellard
}
517 26a5f13b bellard
518 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519 9656f324 pbrook
520 d4bfa4d7 Juan Quintela
static void cpu_common_pre_save(void *opaque)
521 9656f324 pbrook
{
522 d4bfa4d7 Juan Quintela
    CPUState *env = opaque;
523 9656f324 pbrook
524 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
525 9656f324 pbrook
}
526 9656f324 pbrook
527 e7f4eff7 Juan Quintela
static int cpu_common_pre_load(void *opaque)
528 9656f324 pbrook
{
529 9656f324 pbrook
    CPUState *env = opaque;
530 9656f324 pbrook
531 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
532 e7f4eff7 Juan Quintela
    return 0;
533 e7f4eff7 Juan Quintela
}
534 e7f4eff7 Juan Quintela
535 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
536 e7f4eff7 Juan Quintela
{
537 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
538 9656f324 pbrook
539 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
540 3098dba0 aurel32
       version_id is increased. */
541 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
542 9656f324 pbrook
    tlb_flush(env, 1);
543 9656f324 pbrook
544 9656f324 pbrook
    return 0;
545 9656f324 pbrook
}
546 e7f4eff7 Juan Quintela
547 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
548 e7f4eff7 Juan Quintela
    .name = "cpu_common",
549 e7f4eff7 Juan Quintela
    .version_id = 1,
550 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
551 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
552 e7f4eff7 Juan Quintela
    .pre_save = cpu_common_pre_save,
553 e7f4eff7 Juan Quintela
    .pre_load = cpu_common_pre_load,
554 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
555 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
556 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
557 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
558 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
559 e7f4eff7 Juan Quintela
    }
560 e7f4eff7 Juan Quintela
};
561 9656f324 pbrook
#endif
562 9656f324 pbrook
563 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
564 950f1472 Glauber Costa
{
565 950f1472 Glauber Costa
    CPUState *env = first_cpu;
566 950f1472 Glauber Costa
567 950f1472 Glauber Costa
    while (env) {
568 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
569 950f1472 Glauber Costa
            break;
570 950f1472 Glauber Costa
        env = env->next_cpu;
571 950f1472 Glauber Costa
    }
572 950f1472 Glauber Costa
573 950f1472 Glauber Costa
    return env;
574 950f1472 Glauber Costa
}
575 950f1472 Glauber Costa
576 6a00d601 bellard
void cpu_exec_init(CPUState *env)
577 fd6ce8f6 bellard
{
578 6a00d601 bellard
    CPUState **penv;
579 6a00d601 bellard
    int cpu_index;
580 6a00d601 bellard
581 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
582 c2764719 pbrook
    cpu_list_lock();
583 c2764719 pbrook
#endif
584 6a00d601 bellard
    env->next_cpu = NULL;
585 6a00d601 bellard
    penv = &first_cpu;
586 6a00d601 bellard
    cpu_index = 0;
587 6a00d601 bellard
    while (*penv != NULL) {
588 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
589 6a00d601 bellard
        cpu_index++;
590 6a00d601 bellard
    }
591 6a00d601 bellard
    env->cpu_index = cpu_index;
592 268a362c aliguori
    env->numa_node = 0;
593 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
594 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
595 6a00d601 bellard
    *penv = env;
596 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
597 c2764719 pbrook
    cpu_list_unlock();
598 c2764719 pbrook
#endif
599 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
600 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
601 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
602 b3c7724c pbrook
                    cpu_save, cpu_load, env);
603 b3c7724c pbrook
#endif
604 fd6ce8f6 bellard
}
605 fd6ce8f6 bellard
606 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
607 9fa3e853 bellard
{
608 9fa3e853 bellard
    if (p->code_bitmap) {
609 59817ccb bellard
        qemu_free(p->code_bitmap);
610 9fa3e853 bellard
        p->code_bitmap = NULL;
611 9fa3e853 bellard
    }
612 9fa3e853 bellard
    p->code_write_count = 0;
613 9fa3e853 bellard
}
614 9fa3e853 bellard
615 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
616 fd6ce8f6 bellard
static void page_flush_tb(void)
617 fd6ce8f6 bellard
{
618 fd6ce8f6 bellard
    int i, j;
619 fd6ce8f6 bellard
    PageDesc *p;
620 fd6ce8f6 bellard
621 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
622 fd6ce8f6 bellard
        p = l1_map[i];
623 fd6ce8f6 bellard
        if (p) {
624 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
625 9fa3e853 bellard
                p->first_tb = NULL;
626 9fa3e853 bellard
                invalidate_page_bitmap(p);
627 9fa3e853 bellard
                p++;
628 9fa3e853 bellard
            }
629 fd6ce8f6 bellard
        }
630 fd6ce8f6 bellard
    }
631 fd6ce8f6 bellard
}
632 fd6ce8f6 bellard
633 fd6ce8f6 bellard
/* flush all the translation blocks */
634 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
635 6a00d601 bellard
void tb_flush(CPUState *env1)
636 fd6ce8f6 bellard
{
637 6a00d601 bellard
    CPUState *env;
638 0124311e bellard
#if defined(DEBUG_FLUSH)
639 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
640 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
641 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
642 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
643 fd6ce8f6 bellard
#endif
644 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
645 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
646 a208e54a pbrook
647 fd6ce8f6 bellard
    nb_tbs = 0;
648 3b46e624 ths
649 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
650 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
651 6a00d601 bellard
    }
652 9fa3e853 bellard
653 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
654 fd6ce8f6 bellard
    page_flush_tb();
655 9fa3e853 bellard
656 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
657 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
658 d4e8164f bellard
       expensive */
659 e3db7226 bellard
    tb_flush_count++;
660 fd6ce8f6 bellard
}
661 fd6ce8f6 bellard
662 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
663 fd6ce8f6 bellard
664 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
665 fd6ce8f6 bellard
{
666 fd6ce8f6 bellard
    TranslationBlock *tb;
667 fd6ce8f6 bellard
    int i;
668 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
669 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
670 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
671 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
672 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
673 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
674 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
675 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
676 fd6ce8f6 bellard
            }
677 fd6ce8f6 bellard
        }
678 fd6ce8f6 bellard
    }
679 fd6ce8f6 bellard
}
680 fd6ce8f6 bellard
681 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
682 fd6ce8f6 bellard
static void tb_page_check(void)
683 fd6ce8f6 bellard
{
684 fd6ce8f6 bellard
    TranslationBlock *tb;
685 fd6ce8f6 bellard
    int i, flags1, flags2;
686 3b46e624 ths
687 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
688 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
689 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
690 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
691 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
692 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
693 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
694 fd6ce8f6 bellard
            }
695 fd6ce8f6 bellard
        }
696 fd6ce8f6 bellard
    }
697 fd6ce8f6 bellard
}
698 fd6ce8f6 bellard
699 fd6ce8f6 bellard
#endif
700 fd6ce8f6 bellard
701 fd6ce8f6 bellard
/* invalidate one TB */
702 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
703 fd6ce8f6 bellard
                             int next_offset)
704 fd6ce8f6 bellard
{
705 fd6ce8f6 bellard
    TranslationBlock *tb1;
706 fd6ce8f6 bellard
    for(;;) {
707 fd6ce8f6 bellard
        tb1 = *ptb;
708 fd6ce8f6 bellard
        if (tb1 == tb) {
709 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
710 fd6ce8f6 bellard
            break;
711 fd6ce8f6 bellard
        }
712 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
713 fd6ce8f6 bellard
    }
714 fd6ce8f6 bellard
}
715 fd6ce8f6 bellard
716 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
717 9fa3e853 bellard
{
718 9fa3e853 bellard
    TranslationBlock *tb1;
719 9fa3e853 bellard
    unsigned int n1;
720 9fa3e853 bellard
721 9fa3e853 bellard
    for(;;) {
722 9fa3e853 bellard
        tb1 = *ptb;
723 9fa3e853 bellard
        n1 = (long)tb1 & 3;
724 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
725 9fa3e853 bellard
        if (tb1 == tb) {
726 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
727 9fa3e853 bellard
            break;
728 9fa3e853 bellard
        }
729 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
730 9fa3e853 bellard
    }
731 9fa3e853 bellard
}
732 9fa3e853 bellard
733 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
734 d4e8164f bellard
{
735 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
736 d4e8164f bellard
    unsigned int n1;
737 d4e8164f bellard
738 d4e8164f bellard
    ptb = &tb->jmp_next[n];
739 d4e8164f bellard
    tb1 = *ptb;
740 d4e8164f bellard
    if (tb1) {
741 d4e8164f bellard
        /* find tb(n) in circular list */
742 d4e8164f bellard
        for(;;) {
743 d4e8164f bellard
            tb1 = *ptb;
744 d4e8164f bellard
            n1 = (long)tb1 & 3;
745 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
746 d4e8164f bellard
            if (n1 == n && tb1 == tb)
747 d4e8164f bellard
                break;
748 d4e8164f bellard
            if (n1 == 2) {
749 d4e8164f bellard
                ptb = &tb1->jmp_first;
750 d4e8164f bellard
            } else {
751 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
752 d4e8164f bellard
            }
753 d4e8164f bellard
        }
754 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
755 d4e8164f bellard
        *ptb = tb->jmp_next[n];
756 d4e8164f bellard
757 d4e8164f bellard
        tb->jmp_next[n] = NULL;
758 d4e8164f bellard
    }
759 d4e8164f bellard
}
760 d4e8164f bellard
761 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
762 d4e8164f bellard
   another TB */
763 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
764 d4e8164f bellard
{
765 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
766 d4e8164f bellard
}
767 d4e8164f bellard
768 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
769 fd6ce8f6 bellard
{
770 6a00d601 bellard
    CPUState *env;
771 8a40a180 bellard
    PageDesc *p;
772 d4e8164f bellard
    unsigned int h, n1;
773 c227f099 Anthony Liguori
    target_phys_addr_t phys_pc;
774 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
775 3b46e624 ths
776 8a40a180 bellard
    /* remove the TB from the hash list */
777 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
778 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
779 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
780 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
781 8a40a180 bellard
782 8a40a180 bellard
    /* remove the TB from the page list */
783 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
784 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
785 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
786 8a40a180 bellard
        invalidate_page_bitmap(p);
787 8a40a180 bellard
    }
788 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
789 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
790 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
791 8a40a180 bellard
        invalidate_page_bitmap(p);
792 8a40a180 bellard
    }
793 8a40a180 bellard
794 36bdbe54 bellard
    tb_invalidated_flag = 1;
795 59817ccb bellard
796 fd6ce8f6 bellard
    /* remove the TB from the hash list */
797 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
798 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
799 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
800 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
801 6a00d601 bellard
    }
802 d4e8164f bellard
803 d4e8164f bellard
    /* suppress this TB from the two jump lists */
804 d4e8164f bellard
    tb_jmp_remove(tb, 0);
805 d4e8164f bellard
    tb_jmp_remove(tb, 1);
806 d4e8164f bellard
807 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
808 d4e8164f bellard
    tb1 = tb->jmp_first;
809 d4e8164f bellard
    for(;;) {
810 d4e8164f bellard
        n1 = (long)tb1 & 3;
811 d4e8164f bellard
        if (n1 == 2)
812 d4e8164f bellard
            break;
813 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
814 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
815 d4e8164f bellard
        tb_reset_jump(tb1, n1);
816 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
817 d4e8164f bellard
        tb1 = tb2;
818 d4e8164f bellard
    }
819 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
820 9fa3e853 bellard
821 e3db7226 bellard
    tb_phys_invalidate_count++;
822 9fa3e853 bellard
}
823 9fa3e853 bellard
824 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
825 9fa3e853 bellard
{
826 9fa3e853 bellard
    int end, mask, end1;
827 9fa3e853 bellard
828 9fa3e853 bellard
    end = start + len;
829 9fa3e853 bellard
    tab += start >> 3;
830 9fa3e853 bellard
    mask = 0xff << (start & 7);
831 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
832 9fa3e853 bellard
        if (start < end) {
833 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
834 9fa3e853 bellard
            *tab |= mask;
835 9fa3e853 bellard
        }
836 9fa3e853 bellard
    } else {
837 9fa3e853 bellard
        *tab++ |= mask;
838 9fa3e853 bellard
        start = (start + 8) & ~7;
839 9fa3e853 bellard
        end1 = end & ~7;
840 9fa3e853 bellard
        while (start < end1) {
841 9fa3e853 bellard
            *tab++ = 0xff;
842 9fa3e853 bellard
            start += 8;
843 9fa3e853 bellard
        }
844 9fa3e853 bellard
        if (start < end) {
845 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
846 9fa3e853 bellard
            *tab |= mask;
847 9fa3e853 bellard
        }
848 9fa3e853 bellard
    }
849 9fa3e853 bellard
}
850 9fa3e853 bellard
851 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
852 9fa3e853 bellard
{
853 9fa3e853 bellard
    int n, tb_start, tb_end;
854 9fa3e853 bellard
    TranslationBlock *tb;
855 3b46e624 ths
856 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
857 9fa3e853 bellard
858 9fa3e853 bellard
    tb = p->first_tb;
859 9fa3e853 bellard
    while (tb != NULL) {
860 9fa3e853 bellard
        n = (long)tb & 3;
861 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
862 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
863 9fa3e853 bellard
        if (n == 0) {
864 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
865 9fa3e853 bellard
               it is not a problem */
866 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
867 9fa3e853 bellard
            tb_end = tb_start + tb->size;
868 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
869 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
870 9fa3e853 bellard
        } else {
871 9fa3e853 bellard
            tb_start = 0;
872 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
873 9fa3e853 bellard
        }
874 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
875 9fa3e853 bellard
        tb = tb->page_next[n];
876 9fa3e853 bellard
    }
877 9fa3e853 bellard
}
878 9fa3e853 bellard
879 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
880 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
881 2e70f6ef pbrook
                              int flags, int cflags)
882 d720b93d bellard
{
883 d720b93d bellard
    TranslationBlock *tb;
884 d720b93d bellard
    uint8_t *tc_ptr;
885 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
886 d720b93d bellard
    int code_gen_size;
887 d720b93d bellard
888 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
889 c27004ec bellard
    tb = tb_alloc(pc);
890 d720b93d bellard
    if (!tb) {
891 d720b93d bellard
        /* flush must be done */
892 d720b93d bellard
        tb_flush(env);
893 d720b93d bellard
        /* cannot fail at this point */
894 c27004ec bellard
        tb = tb_alloc(pc);
895 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
896 2e70f6ef pbrook
        tb_invalidated_flag = 1;
897 d720b93d bellard
    }
898 d720b93d bellard
    tc_ptr = code_gen_ptr;
899 d720b93d bellard
    tb->tc_ptr = tc_ptr;
900 d720b93d bellard
    tb->cs_base = cs_base;
901 d720b93d bellard
    tb->flags = flags;
902 d720b93d bellard
    tb->cflags = cflags;
903 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
904 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
905 3b46e624 ths
906 d720b93d bellard
    /* check next page if needed */
907 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
908 d720b93d bellard
    phys_page2 = -1;
909 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
910 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
911 d720b93d bellard
    }
912 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
913 2e70f6ef pbrook
    return tb;
914 d720b93d bellard
}
915 3b46e624 ths
916 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
917 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
918 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
919 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
920 d720b93d bellard
   TB if code is modified inside this TB. */
921 c227f099 Anthony Liguori
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
922 d720b93d bellard
                                   int is_cpu_write_access)
923 d720b93d bellard
{
924 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
925 d720b93d bellard
    CPUState *env = cpu_single_env;
926 9fa3e853 bellard
    target_ulong tb_start, tb_end;
927 6b917547 aliguori
    PageDesc *p;
928 6b917547 aliguori
    int n;
929 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
930 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
931 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
932 6b917547 aliguori
    int current_tb_modified = 0;
933 6b917547 aliguori
    target_ulong current_pc = 0;
934 6b917547 aliguori
    target_ulong current_cs_base = 0;
935 6b917547 aliguori
    int current_flags = 0;
936 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
937 9fa3e853 bellard
938 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
939 5fafdf24 ths
    if (!p)
940 9fa3e853 bellard
        return;
941 5fafdf24 ths
    if (!p->code_bitmap &&
942 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
943 d720b93d bellard
        is_cpu_write_access) {
944 9fa3e853 bellard
        /* build code bitmap */
945 9fa3e853 bellard
        build_page_bitmap(p);
946 9fa3e853 bellard
    }
947 9fa3e853 bellard
948 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
949 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
950 9fa3e853 bellard
    tb = p->first_tb;
951 9fa3e853 bellard
    while (tb != NULL) {
952 9fa3e853 bellard
        n = (long)tb & 3;
953 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
954 9fa3e853 bellard
        tb_next = tb->page_next[n];
955 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
956 9fa3e853 bellard
        if (n == 0) {
957 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
958 9fa3e853 bellard
               it is not a problem */
959 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
960 9fa3e853 bellard
            tb_end = tb_start + tb->size;
961 9fa3e853 bellard
        } else {
962 9fa3e853 bellard
            tb_start = tb->page_addr[1];
963 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 9fa3e853 bellard
        }
965 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
966 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
967 d720b93d bellard
            if (current_tb_not_found) {
968 d720b93d bellard
                current_tb_not_found = 0;
969 d720b93d bellard
                current_tb = NULL;
970 2e70f6ef pbrook
                if (env->mem_io_pc) {
971 d720b93d bellard
                    /* now we have a real cpu fault */
972 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
973 d720b93d bellard
                }
974 d720b93d bellard
            }
975 d720b93d bellard
            if (current_tb == tb &&
976 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
977 d720b93d bellard
                /* If we are modifying the current TB, we must stop
978 d720b93d bellard
                its execution. We could be more precise by checking
979 d720b93d bellard
                that the modification is after the current PC, but it
980 d720b93d bellard
                would require a specialized function to partially
981 d720b93d bellard
                restore the CPU state */
982 3b46e624 ths
983 d720b93d bellard
                current_tb_modified = 1;
984 5fafdf24 ths
                cpu_restore_state(current_tb, env,
985 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
986 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
987 6b917547 aliguori
                                     &current_flags);
988 d720b93d bellard
            }
989 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
990 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
991 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
992 6f5a9f7e bellard
            saved_tb = NULL;
993 6f5a9f7e bellard
            if (env) {
994 6f5a9f7e bellard
                saved_tb = env->current_tb;
995 6f5a9f7e bellard
                env->current_tb = NULL;
996 6f5a9f7e bellard
            }
997 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
998 6f5a9f7e bellard
            if (env) {
999 6f5a9f7e bellard
                env->current_tb = saved_tb;
1000 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1001 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1002 6f5a9f7e bellard
            }
1003 9fa3e853 bellard
        }
1004 9fa3e853 bellard
        tb = tb_next;
1005 9fa3e853 bellard
    }
1006 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1007 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1008 9fa3e853 bellard
    if (!p->first_tb) {
1009 9fa3e853 bellard
        invalidate_page_bitmap(p);
1010 d720b93d bellard
        if (is_cpu_write_access) {
1011 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1012 d720b93d bellard
        }
1013 d720b93d bellard
    }
1014 d720b93d bellard
#endif
1015 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1016 d720b93d bellard
    if (current_tb_modified) {
1017 d720b93d bellard
        /* we generate a block containing just the instruction
1018 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1019 d720b93d bellard
           itself */
1020 ea1c1802 bellard
        env->current_tb = NULL;
1021 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1022 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1023 9fa3e853 bellard
    }
1024 fd6ce8f6 bellard
#endif
1025 9fa3e853 bellard
}
1026 fd6ce8f6 bellard
1027 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1028 c227f099 Anthony Liguori
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1029 9fa3e853 bellard
{
1030 9fa3e853 bellard
    PageDesc *p;
1031 9fa3e853 bellard
    int offset, b;
1032 59817ccb bellard
#if 0
1033 a4193c8a bellard
    if (1) {
1034 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1035 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1036 93fcfe39 aliguori
                  cpu_single_env->eip,
1037 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1038 59817ccb bellard
    }
1039 59817ccb bellard
#endif
1040 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1041 5fafdf24 ths
    if (!p)
1042 9fa3e853 bellard
        return;
1043 9fa3e853 bellard
    if (p->code_bitmap) {
1044 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1045 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1046 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1047 9fa3e853 bellard
            goto do_invalidate;
1048 9fa3e853 bellard
    } else {
1049 9fa3e853 bellard
    do_invalidate:
1050 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1051 9fa3e853 bellard
    }
1052 9fa3e853 bellard
}
1053 9fa3e853 bellard
1054 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1055 c227f099 Anthony Liguori
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1056 d720b93d bellard
                                    unsigned long pc, void *puc)
1057 9fa3e853 bellard
{
1058 6b917547 aliguori
    TranslationBlock *tb;
1059 9fa3e853 bellard
    PageDesc *p;
1060 6b917547 aliguori
    int n;
1061 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1062 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1063 d720b93d bellard
    CPUState *env = cpu_single_env;
1064 6b917547 aliguori
    int current_tb_modified = 0;
1065 6b917547 aliguori
    target_ulong current_pc = 0;
1066 6b917547 aliguori
    target_ulong current_cs_base = 0;
1067 6b917547 aliguori
    int current_flags = 0;
1068 d720b93d bellard
#endif
1069 9fa3e853 bellard
1070 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1071 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1072 5fafdf24 ths
    if (!p)
1073 9fa3e853 bellard
        return;
1074 9fa3e853 bellard
    tb = p->first_tb;
1075 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1076 d720b93d bellard
    if (tb && pc != 0) {
1077 d720b93d bellard
        current_tb = tb_find_pc(pc);
1078 d720b93d bellard
    }
1079 d720b93d bellard
#endif
1080 9fa3e853 bellard
    while (tb != NULL) {
1081 9fa3e853 bellard
        n = (long)tb & 3;
1082 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1083 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1084 d720b93d bellard
        if (current_tb == tb &&
1085 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1086 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1087 d720b93d bellard
                   its execution. We could be more precise by checking
1088 d720b93d bellard
                   that the modification is after the current PC, but it
1089 d720b93d bellard
                   would require a specialized function to partially
1090 d720b93d bellard
                   restore the CPU state */
1091 3b46e624 ths
1092 d720b93d bellard
            current_tb_modified = 1;
1093 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1094 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095 6b917547 aliguori
                                 &current_flags);
1096 d720b93d bellard
        }
1097 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1098 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1099 9fa3e853 bellard
        tb = tb->page_next[n];
1100 9fa3e853 bellard
    }
1101 fd6ce8f6 bellard
    p->first_tb = NULL;
1102 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1103 d720b93d bellard
    if (current_tb_modified) {
1104 d720b93d bellard
        /* we generate a block containing just the instruction
1105 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1106 d720b93d bellard
           itself */
1107 ea1c1802 bellard
        env->current_tb = NULL;
1108 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1109 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1110 d720b93d bellard
    }
1111 d720b93d bellard
#endif
1112 fd6ce8f6 bellard
}
1113 9fa3e853 bellard
#endif
1114 fd6ce8f6 bellard
1115 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1116 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1117 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1118 fd6ce8f6 bellard
{
1119 fd6ce8f6 bellard
    PageDesc *p;
1120 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1121 9fa3e853 bellard
1122 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1123 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1124 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1125 9fa3e853 bellard
    last_first_tb = p->first_tb;
1126 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1127 9fa3e853 bellard
    invalidate_page_bitmap(p);
1128 fd6ce8f6 bellard
1129 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1130 d720b93d bellard
1131 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1132 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1133 53a5960a pbrook
        target_ulong addr;
1134 53a5960a pbrook
        PageDesc *p2;
1135 9fa3e853 bellard
        int prot;
1136 9fa3e853 bellard
1137 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1138 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1139 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1140 fd6ce8f6 bellard
        prot = 0;
1141 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1142 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1143 53a5960a pbrook
1144 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1145 53a5960a pbrook
            if (!p2)
1146 53a5960a pbrook
                continue;
1147 53a5960a pbrook
            prot |= p2->flags;
1148 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1149 53a5960a pbrook
            page_get_flags(addr);
1150 53a5960a pbrook
          }
1151 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1152 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1153 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1154 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1155 53a5960a pbrook
               page_addr);
1156 fd6ce8f6 bellard
#endif
1157 fd6ce8f6 bellard
    }
1158 9fa3e853 bellard
#else
1159 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1160 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1161 9fa3e853 bellard
       allocated in a physical page */
1162 9fa3e853 bellard
    if (!last_first_tb) {
1163 6a00d601 bellard
        tlb_protect_code(page_addr);
1164 9fa3e853 bellard
    }
1165 9fa3e853 bellard
#endif
1166 d720b93d bellard
1167 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1168 fd6ce8f6 bellard
}
1169 fd6ce8f6 bellard
1170 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1171 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1172 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1173 fd6ce8f6 bellard
{
1174 fd6ce8f6 bellard
    TranslationBlock *tb;
1175 fd6ce8f6 bellard
1176 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1177 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1178 d4e8164f bellard
        return NULL;
1179 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1180 fd6ce8f6 bellard
    tb->pc = pc;
1181 b448f2f3 bellard
    tb->cflags = 0;
1182 d4e8164f bellard
    return tb;
1183 d4e8164f bellard
}
1184 d4e8164f bellard
1185 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1186 2e70f6ef pbrook
{
1187 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1188 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1189 2e70f6ef pbrook
       be the last one generated.  */
1190 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1191 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1192 2e70f6ef pbrook
        nb_tbs--;
1193 2e70f6ef pbrook
    }
1194 2e70f6ef pbrook
}
1195 2e70f6ef pbrook
1196 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1197 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1198 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1199 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1200 d4e8164f bellard
{
1201 9fa3e853 bellard
    unsigned int h;
1202 9fa3e853 bellard
    TranslationBlock **ptb;
1203 9fa3e853 bellard
1204 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1205 c8a706fe pbrook
       before we are done.  */
1206 c8a706fe pbrook
    mmap_lock();
1207 9fa3e853 bellard
    /* add in the physical hash table */
1208 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1209 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1210 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1211 9fa3e853 bellard
    *ptb = tb;
1212 fd6ce8f6 bellard
1213 fd6ce8f6 bellard
    /* add in the page list */
1214 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1215 9fa3e853 bellard
    if (phys_page2 != -1)
1216 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1217 9fa3e853 bellard
    else
1218 9fa3e853 bellard
        tb->page_addr[1] = -1;
1219 9fa3e853 bellard
1220 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1221 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1222 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1223 d4e8164f bellard
1224 d4e8164f bellard
    /* init original jump addresses */
1225 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1226 d4e8164f bellard
        tb_reset_jump(tb, 0);
1227 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1228 d4e8164f bellard
        tb_reset_jump(tb, 1);
1229 8a40a180 bellard
1230 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1231 8a40a180 bellard
    tb_page_check();
1232 8a40a180 bellard
#endif
1233 c8a706fe pbrook
    mmap_unlock();
1234 fd6ce8f6 bellard
}
1235 fd6ce8f6 bellard
1236 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1237 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1238 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1239 fd6ce8f6 bellard
{
1240 9fa3e853 bellard
    int m_min, m_max, m;
1241 9fa3e853 bellard
    unsigned long v;
1242 9fa3e853 bellard
    TranslationBlock *tb;
1243 a513fe19 bellard
1244 a513fe19 bellard
    if (nb_tbs <= 0)
1245 a513fe19 bellard
        return NULL;
1246 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1247 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1248 a513fe19 bellard
        return NULL;
1249 a513fe19 bellard
    /* binary search (cf Knuth) */
1250 a513fe19 bellard
    m_min = 0;
1251 a513fe19 bellard
    m_max = nb_tbs - 1;
1252 a513fe19 bellard
    while (m_min <= m_max) {
1253 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1254 a513fe19 bellard
        tb = &tbs[m];
1255 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1256 a513fe19 bellard
        if (v == tc_ptr)
1257 a513fe19 bellard
            return tb;
1258 a513fe19 bellard
        else if (tc_ptr < v) {
1259 a513fe19 bellard
            m_max = m - 1;
1260 a513fe19 bellard
        } else {
1261 a513fe19 bellard
            m_min = m + 1;
1262 a513fe19 bellard
        }
1263 5fafdf24 ths
    }
1264 a513fe19 bellard
    return &tbs[m_max];
1265 a513fe19 bellard
}
1266 7501267e bellard
1267 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1268 ea041c0e bellard
1269 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1270 ea041c0e bellard
{
1271 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1272 ea041c0e bellard
    unsigned int n1;
1273 ea041c0e bellard
1274 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1275 ea041c0e bellard
    if (tb1 != NULL) {
1276 ea041c0e bellard
        /* find head of list */
1277 ea041c0e bellard
        for(;;) {
1278 ea041c0e bellard
            n1 = (long)tb1 & 3;
1279 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1280 ea041c0e bellard
            if (n1 == 2)
1281 ea041c0e bellard
                break;
1282 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1283 ea041c0e bellard
        }
1284 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1285 ea041c0e bellard
        tb_next = tb1;
1286 ea041c0e bellard
1287 ea041c0e bellard
        /* remove tb from the jmp_first list */
1288 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1289 ea041c0e bellard
        for(;;) {
1290 ea041c0e bellard
            tb1 = *ptb;
1291 ea041c0e bellard
            n1 = (long)tb1 & 3;
1292 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1293 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1294 ea041c0e bellard
                break;
1295 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1296 ea041c0e bellard
        }
1297 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1298 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1299 3b46e624 ths
1300 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1301 ea041c0e bellard
        tb_reset_jump(tb, n);
1302 ea041c0e bellard
1303 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1304 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1305 ea041c0e bellard
    }
1306 ea041c0e bellard
}
1307 ea041c0e bellard
1308 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1309 ea041c0e bellard
{
1310 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1311 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1312 ea041c0e bellard
}
1313 ea041c0e bellard
1314 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1315 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1316 d720b93d bellard
{
1317 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1318 9b3c35e0 j_mayer
    target_ulong pd;
1319 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1320 c2f07f81 pbrook
    PhysPageDesc *p;
1321 d720b93d bellard
1322 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1323 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1324 c2f07f81 pbrook
    if (!p) {
1325 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1326 c2f07f81 pbrook
    } else {
1327 c2f07f81 pbrook
        pd = p->phys_offset;
1328 c2f07f81 pbrook
    }
1329 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1330 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1331 d720b93d bellard
}
1332 c27004ec bellard
#endif
1333 d720b93d bellard
1334 6658ffb8 pbrook
/* Add a watchpoint.  */
1335 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1336 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1337 6658ffb8 pbrook
{
1338 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1339 c0ce998e aliguori
    CPUWatchpoint *wp;
1340 6658ffb8 pbrook
1341 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1342 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1343 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1344 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1345 b4051334 aliguori
        return -EINVAL;
1346 b4051334 aliguori
    }
1347 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1348 a1d1bb31 aliguori
1349 a1d1bb31 aliguori
    wp->vaddr = addr;
1350 b4051334 aliguori
    wp->len_mask = len_mask;
1351 a1d1bb31 aliguori
    wp->flags = flags;
1352 a1d1bb31 aliguori
1353 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1354 c0ce998e aliguori
    if (flags & BP_GDB)
1355 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1356 c0ce998e aliguori
    else
1357 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1358 6658ffb8 pbrook
1359 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1360 a1d1bb31 aliguori
1361 a1d1bb31 aliguori
    if (watchpoint)
1362 a1d1bb31 aliguori
        *watchpoint = wp;
1363 a1d1bb31 aliguori
    return 0;
1364 6658ffb8 pbrook
}
1365 6658ffb8 pbrook
1366 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1367 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1368 a1d1bb31 aliguori
                          int flags)
1369 6658ffb8 pbrook
{
1370 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1371 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1372 6658ffb8 pbrook
1373 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1374 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1375 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1376 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1377 6658ffb8 pbrook
            return 0;
1378 6658ffb8 pbrook
        }
1379 6658ffb8 pbrook
    }
1380 a1d1bb31 aliguori
    return -ENOENT;
1381 6658ffb8 pbrook
}
1382 6658ffb8 pbrook
1383 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1384 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1385 a1d1bb31 aliguori
{
1386 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1387 7d03f82f edgar_igl
1388 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1389 a1d1bb31 aliguori
1390 a1d1bb31 aliguori
    qemu_free(watchpoint);
1391 a1d1bb31 aliguori
}
1392 a1d1bb31 aliguori
1393 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1394 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1395 a1d1bb31 aliguori
{
1396 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1397 a1d1bb31 aliguori
1398 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1399 a1d1bb31 aliguori
        if (wp->flags & mask)
1400 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1401 c0ce998e aliguori
    }
1402 7d03f82f edgar_igl
}
1403 7d03f82f edgar_igl
1404 a1d1bb31 aliguori
/* Add a breakpoint.  */
1405 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1406 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1407 4c3a88a2 bellard
{
1408 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1409 c0ce998e aliguori
    CPUBreakpoint *bp;
1410 3b46e624 ths
1411 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1412 4c3a88a2 bellard
1413 a1d1bb31 aliguori
    bp->pc = pc;
1414 a1d1bb31 aliguori
    bp->flags = flags;
1415 a1d1bb31 aliguori
1416 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1417 c0ce998e aliguori
    if (flags & BP_GDB)
1418 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1419 c0ce998e aliguori
    else
1420 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1421 3b46e624 ths
1422 d720b93d bellard
    breakpoint_invalidate(env, pc);
1423 a1d1bb31 aliguori
1424 a1d1bb31 aliguori
    if (breakpoint)
1425 a1d1bb31 aliguori
        *breakpoint = bp;
1426 4c3a88a2 bellard
    return 0;
1427 4c3a88a2 bellard
#else
1428 a1d1bb31 aliguori
    return -ENOSYS;
1429 4c3a88a2 bellard
#endif
1430 4c3a88a2 bellard
}
1431 4c3a88a2 bellard
1432 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1433 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1434 a1d1bb31 aliguori
{
1435 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1436 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1437 a1d1bb31 aliguori
1438 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1439 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1440 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1441 a1d1bb31 aliguori
            return 0;
1442 a1d1bb31 aliguori
        }
1443 7d03f82f edgar_igl
    }
1444 a1d1bb31 aliguori
    return -ENOENT;
1445 a1d1bb31 aliguori
#else
1446 a1d1bb31 aliguori
    return -ENOSYS;
1447 7d03f82f edgar_igl
#endif
1448 7d03f82f edgar_igl
}
1449 7d03f82f edgar_igl
1450 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1451 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1452 4c3a88a2 bellard
{
1453 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1454 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1455 d720b93d bellard
1456 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1457 a1d1bb31 aliguori
1458 a1d1bb31 aliguori
    qemu_free(breakpoint);
1459 a1d1bb31 aliguori
#endif
1460 a1d1bb31 aliguori
}
1461 a1d1bb31 aliguori
1462 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1463 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1464 a1d1bb31 aliguori
{
1465 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1466 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1467 a1d1bb31 aliguori
1468 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1469 a1d1bb31 aliguori
        if (bp->flags & mask)
1470 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1471 c0ce998e aliguori
    }
1472 4c3a88a2 bellard
#endif
1473 4c3a88a2 bellard
}
1474 4c3a88a2 bellard
1475 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1476 c33a346e bellard
   CPU loop after each instruction */
1477 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1478 c33a346e bellard
{
1479 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1480 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1481 c33a346e bellard
        env->singlestep_enabled = enabled;
1482 e22a25c9 aliguori
        if (kvm_enabled())
1483 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1484 e22a25c9 aliguori
        else {
1485 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1486 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1487 e22a25c9 aliguori
            tb_flush(env);
1488 e22a25c9 aliguori
        }
1489 c33a346e bellard
    }
1490 c33a346e bellard
#endif
1491 c33a346e bellard
}
1492 c33a346e bellard
1493 34865134 bellard
/* enable or disable low levels log */
1494 34865134 bellard
void cpu_set_log(int log_flags)
1495 34865134 bellard
{
1496 34865134 bellard
    loglevel = log_flags;
1497 34865134 bellard
    if (loglevel && !logfile) {
1498 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1499 34865134 bellard
        if (!logfile) {
1500 34865134 bellard
            perror(logfilename);
1501 34865134 bellard
            _exit(1);
1502 34865134 bellard
        }
1503 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1504 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1505 9fa3e853 bellard
        {
1506 b55266b5 blueswir1
            static char logfile_buf[4096];
1507 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1508 9fa3e853 bellard
        }
1509 bf65f53f Filip Navara
#elif !defined(_WIN32)
1510 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1511 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1512 9fa3e853 bellard
#endif
1513 e735b91c pbrook
        log_append = 1;
1514 e735b91c pbrook
    }
1515 e735b91c pbrook
    if (!loglevel && logfile) {
1516 e735b91c pbrook
        fclose(logfile);
1517 e735b91c pbrook
        logfile = NULL;
1518 34865134 bellard
    }
1519 34865134 bellard
}
1520 34865134 bellard
1521 34865134 bellard
void cpu_set_log_filename(const char *filename)
1522 34865134 bellard
{
1523 34865134 bellard
    logfilename = strdup(filename);
1524 e735b91c pbrook
    if (logfile) {
1525 e735b91c pbrook
        fclose(logfile);
1526 e735b91c pbrook
        logfile = NULL;
1527 e735b91c pbrook
    }
1528 e735b91c pbrook
    cpu_set_log(loglevel);
1529 34865134 bellard
}
1530 c33a346e bellard
1531 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1532 ea041c0e bellard
{
1533 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1534 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1535 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1536 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1537 ea041c0e bellard
    TranslationBlock *tb;
1538 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1539 59817ccb bellard
1540 3098dba0 aurel32
    tb = env->current_tb;
1541 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1542 3098dba0 aurel32
       all the potentially executing TB */
1543 f76cfe56 Riku Voipio
    if (tb) {
1544 f76cfe56 Riku Voipio
        spin_lock(&interrupt_lock);
1545 3098dba0 aurel32
        env->current_tb = NULL;
1546 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1547 f76cfe56 Riku Voipio
        spin_unlock(&interrupt_lock);
1548 be214e6c aurel32
    }
1549 3098dba0 aurel32
}
1550 3098dba0 aurel32
1551 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1552 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1553 3098dba0 aurel32
{
1554 3098dba0 aurel32
    int old_mask;
1555 be214e6c aurel32
1556 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1557 68a79315 bellard
    env->interrupt_request |= mask;
1558 3098dba0 aurel32
1559 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1560 8edac960 aliguori
    /*
1561 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1562 8edac960 aliguori
     * case its halted.
1563 8edac960 aliguori
     */
1564 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1565 8edac960 aliguori
        qemu_cpu_kick(env);
1566 8edac960 aliguori
        return;
1567 8edac960 aliguori
    }
1568 8edac960 aliguori
#endif
1569 8edac960 aliguori
1570 2e70f6ef pbrook
    if (use_icount) {
1571 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1572 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1573 2e70f6ef pbrook
        if (!can_do_io(env)
1574 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1575 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1576 2e70f6ef pbrook
        }
1577 2e70f6ef pbrook
#endif
1578 2e70f6ef pbrook
    } else {
1579 3098dba0 aurel32
        cpu_unlink_tb(env);
1580 ea041c0e bellard
    }
1581 ea041c0e bellard
}
1582 ea041c0e bellard
1583 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1584 b54ad049 bellard
{
1585 b54ad049 bellard
    env->interrupt_request &= ~mask;
1586 b54ad049 bellard
}
1587 b54ad049 bellard
1588 3098dba0 aurel32
void cpu_exit(CPUState *env)
1589 3098dba0 aurel32
{
1590 3098dba0 aurel32
    env->exit_request = 1;
1591 3098dba0 aurel32
    cpu_unlink_tb(env);
1592 3098dba0 aurel32
}
1593 3098dba0 aurel32
1594 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1595 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1596 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1597 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1598 f193c797 bellard
      "show target assembly code for each compiled TB" },
1599 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1600 57fec1fe bellard
      "show micro ops for each compiled TB" },
1601 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1602 e01a1157 blueswir1
      "show micro ops "
1603 e01a1157 blueswir1
#ifdef TARGET_I386
1604 e01a1157 blueswir1
      "before eflags optimization and "
1605 f193c797 bellard
#endif
1606 e01a1157 blueswir1
      "after liveness analysis" },
1607 f193c797 bellard
    { CPU_LOG_INT, "int",
1608 f193c797 bellard
      "show interrupts/exceptions in short format" },
1609 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1610 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1611 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1612 e91c8a77 ths
      "show CPU state before block translation" },
1613 f193c797 bellard
#ifdef TARGET_I386
1614 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1615 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1616 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1617 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1618 f193c797 bellard
#endif
1619 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1620 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1621 fd872598 bellard
      "show all i/o ports accesses" },
1622 8e3a9fd2 bellard
#endif
1623 f193c797 bellard
    { 0, NULL, NULL },
1624 f193c797 bellard
};
1625 f193c797 bellard
1626 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1627 f193c797 bellard
{
1628 f193c797 bellard
    if (strlen(s2) != n)
1629 f193c797 bellard
        return 0;
1630 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1631 f193c797 bellard
}
1632 3b46e624 ths
1633 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1634 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1635 f193c797 bellard
{
1636 c7cd6a37 blueswir1
    const CPULogItem *item;
1637 f193c797 bellard
    int mask;
1638 f193c797 bellard
    const char *p, *p1;
1639 f193c797 bellard
1640 f193c797 bellard
    p = str;
1641 f193c797 bellard
    mask = 0;
1642 f193c797 bellard
    for(;;) {
1643 f193c797 bellard
        p1 = strchr(p, ',');
1644 f193c797 bellard
        if (!p1)
1645 f193c797 bellard
            p1 = p + strlen(p);
1646 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1647 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1648 8e3a9fd2 bellard
                        mask |= item->mask;
1649 8e3a9fd2 bellard
                }
1650 8e3a9fd2 bellard
        } else {
1651 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1652 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1653 f193c797 bellard
                goto found;
1654 f193c797 bellard
        }
1655 f193c797 bellard
        return 0;
1656 8e3a9fd2 bellard
        }
1657 f193c797 bellard
    found:
1658 f193c797 bellard
        mask |= item->mask;
1659 f193c797 bellard
        if (*p1 != ',')
1660 f193c797 bellard
            break;
1661 f193c797 bellard
        p = p1 + 1;
1662 f193c797 bellard
    }
1663 f193c797 bellard
    return mask;
1664 f193c797 bellard
}
1665 ea041c0e bellard
1666 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1667 7501267e bellard
{
1668 7501267e bellard
    va_list ap;
1669 493ae1f0 pbrook
    va_list ap2;
1670 7501267e bellard
1671 7501267e bellard
    va_start(ap, fmt);
1672 493ae1f0 pbrook
    va_copy(ap2, ap);
1673 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1674 7501267e bellard
    vfprintf(stderr, fmt, ap);
1675 7501267e bellard
    fprintf(stderr, "\n");
1676 7501267e bellard
#ifdef TARGET_I386
1677 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1678 7fe48483 bellard
#else
1679 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1680 7501267e bellard
#endif
1681 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1682 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1683 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1684 93fcfe39 aliguori
        qemu_log("\n");
1685 f9373291 j_mayer
#ifdef TARGET_I386
1686 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1687 f9373291 j_mayer
#else
1688 93fcfe39 aliguori
        log_cpu_state(env, 0);
1689 f9373291 j_mayer
#endif
1690 31b1a7b4 aliguori
        qemu_log_flush();
1691 93fcfe39 aliguori
        qemu_log_close();
1692 924edcae balrog
    }
1693 493ae1f0 pbrook
    va_end(ap2);
1694 f9373291 j_mayer
    va_end(ap);
1695 7501267e bellard
    abort();
1696 7501267e bellard
}
1697 7501267e bellard
1698 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1699 c5be9f08 ths
{
1700 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1701 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1702 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1703 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1704 5a38f081 aliguori
    CPUBreakpoint *bp;
1705 5a38f081 aliguori
    CPUWatchpoint *wp;
1706 5a38f081 aliguori
#endif
1707 5a38f081 aliguori
1708 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1709 5a38f081 aliguori
1710 5a38f081 aliguori
    /* Preserve chaining and index. */
1711 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1712 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1713 5a38f081 aliguori
1714 5a38f081 aliguori
    /* Clone all break/watchpoints.
1715 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1716 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1717 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1718 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1719 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1720 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1721 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1722 5a38f081 aliguori
    }
1723 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1724 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1725 5a38f081 aliguori
                              wp->flags, NULL);
1726 5a38f081 aliguori
    }
1727 5a38f081 aliguori
#endif
1728 5a38f081 aliguori
1729 c5be9f08 ths
    return new_env;
1730 c5be9f08 ths
}
1731 c5be9f08 ths
1732 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1733 0124311e bellard
1734 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1735 5c751e99 edgar_igl
{
1736 5c751e99 edgar_igl
    unsigned int i;
1737 5c751e99 edgar_igl
1738 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1739 5c751e99 edgar_igl
       overlap the flushed page.  */
1740 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1741 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1742 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1743 5c751e99 edgar_igl
1744 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1745 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1746 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1747 5c751e99 edgar_igl
}
1748 5c751e99 edgar_igl
1749 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1750 08738984 Igor Kovalenko
    .addr_read  = -1,
1751 08738984 Igor Kovalenko
    .addr_write = -1,
1752 08738984 Igor Kovalenko
    .addr_code  = -1,
1753 08738984 Igor Kovalenko
    .addend     = -1,
1754 08738984 Igor Kovalenko
};
1755 08738984 Igor Kovalenko
1756 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1757 ee8b7021 bellard
   implemented yet) */
1758 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1759 33417e70 bellard
{
1760 33417e70 bellard
    int i;
1761 0124311e bellard
1762 9fa3e853 bellard
#if defined(DEBUG_TLB)
1763 9fa3e853 bellard
    printf("tlb_flush:\n");
1764 9fa3e853 bellard
#endif
1765 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1766 0124311e bellard
       links while we are modifying them */
1767 0124311e bellard
    env->current_tb = NULL;
1768 0124311e bellard
1769 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1770 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1771 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1772 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1773 cfde4bd9 Isaku Yamahata
        }
1774 33417e70 bellard
    }
1775 9fa3e853 bellard
1776 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1777 9fa3e853 bellard
1778 e3db7226 bellard
    tlb_flush_count++;
1779 33417e70 bellard
}
1780 33417e70 bellard
1781 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1782 61382a50 bellard
{
1783 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1784 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1786 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1788 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1789 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1790 84b7b8e7 bellard
    }
1791 61382a50 bellard
}
1792 61382a50 bellard
1793 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1794 33417e70 bellard
{
1795 8a40a180 bellard
    int i;
1796 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1797 0124311e bellard
1798 9fa3e853 bellard
#if defined(DEBUG_TLB)
1799 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1800 9fa3e853 bellard
#endif
1801 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1802 0124311e bellard
       links while we are modifying them */
1803 0124311e bellard
    env->current_tb = NULL;
1804 61382a50 bellard
1805 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1806 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1807 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1808 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1809 0124311e bellard
1810 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1811 9fa3e853 bellard
}
1812 9fa3e853 bellard
1813 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1814 9fa3e853 bellard
   can be detected */
1815 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1816 9fa3e853 bellard
{
1817 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1818 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1819 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1820 9fa3e853 bellard
}
1821 9fa3e853 bellard
1822 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1823 3a7d929e bellard
   tested for self modifying code */
1824 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1825 3a7d929e bellard
                                    target_ulong vaddr)
1826 9fa3e853 bellard
{
1827 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1828 1ccde1cb bellard
}
1829 1ccde1cb bellard
1830 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1831 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1832 1ccde1cb bellard
{
1833 1ccde1cb bellard
    unsigned long addr;
1834 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1835 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1836 1ccde1cb bellard
        if ((addr - start) < length) {
1837 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1838 1ccde1cb bellard
        }
1839 1ccde1cb bellard
    }
1840 1ccde1cb bellard
}
1841 1ccde1cb bellard
1842 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1843 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1844 0a962c02 bellard
                                     int dirty_flags)
1845 1ccde1cb bellard
{
1846 1ccde1cb bellard
    CPUState *env;
1847 4f2ac237 bellard
    unsigned long length, start1;
1848 0a962c02 bellard
    int i, mask, len;
1849 0a962c02 bellard
    uint8_t *p;
1850 1ccde1cb bellard
1851 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1852 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1853 1ccde1cb bellard
1854 1ccde1cb bellard
    length = end - start;
1855 1ccde1cb bellard
    if (length == 0)
1856 1ccde1cb bellard
        return;
1857 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1858 f23db169 bellard
    mask = ~dirty_flags;
1859 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1860 f23db169 bellard
    for(i = 0; i < len; i++)
1861 f23db169 bellard
        p[i] &= mask;
1862 f23db169 bellard
1863 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1864 1ccde1cb bellard
       when accessing the range */
1865 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1866 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1867 5579c7f3 pbrook
       address comparisons below.  */
1868 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1869 5579c7f3 pbrook
            != (end - 1) - start) {
1870 5579c7f3 pbrook
        abort();
1871 5579c7f3 pbrook
    }
1872 5579c7f3 pbrook
1873 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1875 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
1877 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1878 cfde4bd9 Isaku Yamahata
                                      start1, length);
1879 cfde4bd9 Isaku Yamahata
        }
1880 6a00d601 bellard
    }
1881 1ccde1cb bellard
}
1882 1ccde1cb bellard
1883 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1884 74576198 aliguori
{
1885 74576198 aliguori
    in_migration = enable;
1886 b0a46a33 Jan Kiszka
    if (kvm_enabled()) {
1887 b0a46a33 Jan Kiszka
        return kvm_set_migration_log(enable);
1888 b0a46a33 Jan Kiszka
    }
1889 74576198 aliguori
    return 0;
1890 74576198 aliguori
}
1891 74576198 aliguori
1892 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1893 74576198 aliguori
{
1894 74576198 aliguori
    return in_migration;
1895 74576198 aliguori
}
1896 74576198 aliguori
1897 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1898 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
1899 2bec46dc aliguori
{
1900 151f7749 Jan Kiszka
    int ret = 0;
1901 151f7749 Jan Kiszka
1902 2bec46dc aliguori
    if (kvm_enabled())
1903 151f7749 Jan Kiszka
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1904 151f7749 Jan Kiszka
    return ret;
1905 2bec46dc aliguori
}
1906 2bec46dc aliguori
1907 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1908 3a7d929e bellard
{
1909 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1910 5579c7f3 pbrook
    void *p;
1911 3a7d929e bellard
1912 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1913 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1914 5579c7f3 pbrook
            + tlb_entry->addend);
1915 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1916 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1917 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1918 3a7d929e bellard
        }
1919 3a7d929e bellard
    }
1920 3a7d929e bellard
}
1921 3a7d929e bellard
1922 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1923 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1924 3a7d929e bellard
{
1925 3a7d929e bellard
    int i;
1926 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1927 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1928 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
1929 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1930 cfde4bd9 Isaku Yamahata
    }
1931 3a7d929e bellard
}
1932 3a7d929e bellard
1933 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1934 1ccde1cb bellard
{
1935 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1936 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1937 1ccde1cb bellard
}
1938 1ccde1cb bellard
1939 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1940 0f459d16 pbrook
   so that it is no longer dirty */
1941 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1942 1ccde1cb bellard
{
1943 1ccde1cb bellard
    int i;
1944 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1945 1ccde1cb bellard
1946 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1947 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1948 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1949 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1950 9fa3e853 bellard
}
1951 9fa3e853 bellard
1952 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
1953 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1954 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
1955 59817ccb bellard
   conflicting with the host address space). */
1956 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1957 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
1958 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
1959 9fa3e853 bellard
{
1960 92e873b9 bellard
    PhysPageDesc *p;
1961 4f2ac237 bellard
    unsigned long pd;
1962 9fa3e853 bellard
    unsigned int index;
1963 4f2ac237 bellard
    target_ulong address;
1964 0f459d16 pbrook
    target_ulong code_address;
1965 c227f099 Anthony Liguori
    target_phys_addr_t addend;
1966 9fa3e853 bellard
    int ret;
1967 84b7b8e7 bellard
    CPUTLBEntry *te;
1968 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1969 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
1970 9fa3e853 bellard
1971 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1972 9fa3e853 bellard
    if (!p) {
1973 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
1974 9fa3e853 bellard
    } else {
1975 9fa3e853 bellard
        pd = p->phys_offset;
1976 9fa3e853 bellard
    }
1977 9fa3e853 bellard
#if defined(DEBUG_TLB)
1978 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1979 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1980 9fa3e853 bellard
#endif
1981 9fa3e853 bellard
1982 9fa3e853 bellard
    ret = 0;
1983 0f459d16 pbrook
    address = vaddr;
1984 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1985 0f459d16 pbrook
        /* IO memory case (romd handled later) */
1986 0f459d16 pbrook
        address |= TLB_MMIO;
1987 0f459d16 pbrook
    }
1988 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1989 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1990 0f459d16 pbrook
        /* Normal RAM.  */
1991 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
1992 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1993 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
1994 0f459d16 pbrook
        else
1995 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
1996 0f459d16 pbrook
    } else {
1997 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
1998 0f459d16 pbrook
           It would be nice to pass an offset from the base address
1999 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2000 0f459d16 pbrook
           and avoid full address decoding in every device.
2001 0f459d16 pbrook
           We can't use the high bits of pd for this because
2002 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2003 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2004 8da3ff18 pbrook
        if (p) {
2005 8da3ff18 pbrook
            iotlb += p->region_offset;
2006 8da3ff18 pbrook
        } else {
2007 8da3ff18 pbrook
            iotlb += paddr;
2008 8da3ff18 pbrook
        }
2009 0f459d16 pbrook
    }
2010 0f459d16 pbrook
2011 0f459d16 pbrook
    code_address = address;
2012 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2013 0f459d16 pbrook
       watchpoint trap routines.  */
2014 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2015 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2016 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2017 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2018 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2019 0f459d16 pbrook
            address |= TLB_MMIO;
2020 6658ffb8 pbrook
        }
2021 0f459d16 pbrook
    }
2022 d79acba4 balrog
2023 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2024 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2025 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2026 0f459d16 pbrook
    te->addend = addend - vaddr;
2027 0f459d16 pbrook
    if (prot & PAGE_READ) {
2028 0f459d16 pbrook
        te->addr_read = address;
2029 0f459d16 pbrook
    } else {
2030 0f459d16 pbrook
        te->addr_read = -1;
2031 0f459d16 pbrook
    }
2032 5c751e99 edgar_igl
2033 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2034 0f459d16 pbrook
        te->addr_code = code_address;
2035 0f459d16 pbrook
    } else {
2036 0f459d16 pbrook
        te->addr_code = -1;
2037 0f459d16 pbrook
    }
2038 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2039 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2040 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2041 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2042 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2043 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2044 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2045 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2046 9fa3e853 bellard
        } else {
2047 0f459d16 pbrook
            te->addr_write = address;
2048 9fa3e853 bellard
        }
2049 0f459d16 pbrook
    } else {
2050 0f459d16 pbrook
        te->addr_write = -1;
2051 9fa3e853 bellard
    }
2052 9fa3e853 bellard
    return ret;
2053 9fa3e853 bellard
}
2054 9fa3e853 bellard
2055 0124311e bellard
#else
2056 0124311e bellard
2057 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2058 0124311e bellard
{
2059 0124311e bellard
}
2060 0124311e bellard
2061 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2062 0124311e bellard
{
2063 0124311e bellard
}
2064 0124311e bellard
2065 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2066 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
2067 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2068 9fa3e853 bellard
{
2069 9fa3e853 bellard
    return 0;
2070 9fa3e853 bellard
}
2071 0124311e bellard
2072 edf8e2af Mika Westerberg
/*
2073 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2074 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2075 edf8e2af Mika Westerberg
 */
2076 edf8e2af Mika Westerberg
int walk_memory_regions(void *priv,
2077 edf8e2af Mika Westerberg
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2078 33417e70 bellard
{
2079 9fa3e853 bellard
    unsigned long start, end;
2080 edf8e2af Mika Westerberg
    PageDesc *p = NULL;
2081 9fa3e853 bellard
    int i, j, prot, prot1;
2082 edf8e2af Mika Westerberg
    int rc = 0;
2083 33417e70 bellard
2084 edf8e2af Mika Westerberg
    start = end = -1;
2085 9fa3e853 bellard
    prot = 0;
2086 edf8e2af Mika Westerberg
2087 edf8e2af Mika Westerberg
    for (i = 0; i <= L1_SIZE; i++) {
2088 edf8e2af Mika Westerberg
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2089 edf8e2af Mika Westerberg
        for (j = 0; j < L2_SIZE; j++) {
2090 edf8e2af Mika Westerberg
            prot1 = (p == NULL) ? 0 : p[j].flags;
2091 edf8e2af Mika Westerberg
            /*
2092 edf8e2af Mika Westerberg
             * "region" is one continuous chunk of memory
2093 edf8e2af Mika Westerberg
             * that has same protection flags set.
2094 edf8e2af Mika Westerberg
             */
2095 9fa3e853 bellard
            if (prot1 != prot) {
2096 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097 9fa3e853 bellard
                if (start != -1) {
2098 edf8e2af Mika Westerberg
                    rc = (*fn)(priv, start, end, prot);
2099 edf8e2af Mika Westerberg
                    /* callback can stop iteration by returning != 0 */
2100 edf8e2af Mika Westerberg
                    if (rc != 0)
2101 edf8e2af Mika Westerberg
                        return (rc);
2102 9fa3e853 bellard
                }
2103 9fa3e853 bellard
                if (prot1 != 0)
2104 9fa3e853 bellard
                    start = end;
2105 9fa3e853 bellard
                else
2106 9fa3e853 bellard
                    start = -1;
2107 9fa3e853 bellard
                prot = prot1;
2108 9fa3e853 bellard
            }
2109 edf8e2af Mika Westerberg
            if (p == NULL)
2110 9fa3e853 bellard
                break;
2111 9fa3e853 bellard
        }
2112 33417e70 bellard
    }
2113 edf8e2af Mika Westerberg
    return (rc);
2114 edf8e2af Mika Westerberg
}
2115 edf8e2af Mika Westerberg
2116 edf8e2af Mika Westerberg
static int dump_region(void *priv, unsigned long start,
2117 edf8e2af Mika Westerberg
    unsigned long end, unsigned long prot)
2118 edf8e2af Mika Westerberg
{
2119 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2120 edf8e2af Mika Westerberg
2121 edf8e2af Mika Westerberg
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2122 edf8e2af Mika Westerberg
        start, end, end - start,
2123 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2124 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2125 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2126 edf8e2af Mika Westerberg
2127 edf8e2af Mika Westerberg
    return (0);
2128 edf8e2af Mika Westerberg
}
2129 edf8e2af Mika Westerberg
2130 edf8e2af Mika Westerberg
/* dump memory mappings */
2131 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2132 edf8e2af Mika Westerberg
{
2133 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2134 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2135 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2136 33417e70 bellard
}
2137 33417e70 bellard
2138 53a5960a pbrook
int page_get_flags(target_ulong address)
2139 33417e70 bellard
{
2140 9fa3e853 bellard
    PageDesc *p;
2141 9fa3e853 bellard
2142 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2143 33417e70 bellard
    if (!p)
2144 9fa3e853 bellard
        return 0;
2145 9fa3e853 bellard
    return p->flags;
2146 9fa3e853 bellard
}
2147 9fa3e853 bellard
2148 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2149 ccbb4d44 Stuart Brady
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2150 9fa3e853 bellard
   depending on PAGE_WRITE */
2151 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2152 9fa3e853 bellard
{
2153 9fa3e853 bellard
    PageDesc *p;
2154 53a5960a pbrook
    target_ulong addr;
2155 9fa3e853 bellard
2156 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2157 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2158 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2159 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2160 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2161 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2162 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2163 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2164 17e2377a pbrook
           address space.  */
2165 17e2377a pbrook
        if (!p)
2166 17e2377a pbrook
            return;
2167 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2168 9fa3e853 bellard
           inside */
2169 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2170 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2171 9fa3e853 bellard
            p->first_tb) {
2172 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2173 9fa3e853 bellard
        }
2174 9fa3e853 bellard
        p->flags = flags;
2175 9fa3e853 bellard
    }
2176 33417e70 bellard
}
2177 33417e70 bellard
2178 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2179 3d97b40b ths
{
2180 3d97b40b ths
    PageDesc *p;
2181 3d97b40b ths
    target_ulong end;
2182 3d97b40b ths
    target_ulong addr;
2183 3d97b40b ths
2184 55f280c9 balrog
    if (start + len < start)
2185 55f280c9 balrog
        /* we've wrapped around */
2186 55f280c9 balrog
        return -1;
2187 55f280c9 balrog
2188 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2189 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2190 3d97b40b ths
2191 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2192 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2193 3d97b40b ths
        if( !p )
2194 3d97b40b ths
            return -1;
2195 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2196 3d97b40b ths
            return -1;
2197 3d97b40b ths
2198 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2199 3d97b40b ths
            return -1;
2200 dae3270c bellard
        if (flags & PAGE_WRITE) {
2201 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2202 dae3270c bellard
                return -1;
2203 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2204 dae3270c bellard
               contains translated code */
2205 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2206 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2207 dae3270c bellard
                    return -1;
2208 dae3270c bellard
            }
2209 dae3270c bellard
            return 0;
2210 dae3270c bellard
        }
2211 3d97b40b ths
    }
2212 3d97b40b ths
    return 0;
2213 3d97b40b ths
}
2214 3d97b40b ths
2215 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2216 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2217 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2218 9fa3e853 bellard
{
2219 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2220 9fa3e853 bellard
    PageDesc *p, *p1;
2221 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2222 9fa3e853 bellard
2223 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2224 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2225 c8a706fe pbrook
       practice it seems to be ok.  */
2226 c8a706fe pbrook
    mmap_lock();
2227 c8a706fe pbrook
2228 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2229 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2230 9fa3e853 bellard
    p1 = page_find(page_index);
2231 c8a706fe pbrook
    if (!p1) {
2232 c8a706fe pbrook
        mmap_unlock();
2233 9fa3e853 bellard
        return 0;
2234 c8a706fe pbrook
    }
2235 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2236 9fa3e853 bellard
    p = p1;
2237 9fa3e853 bellard
    prot = 0;
2238 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2239 9fa3e853 bellard
        prot |= p->flags;
2240 9fa3e853 bellard
        p++;
2241 9fa3e853 bellard
    }
2242 9fa3e853 bellard
    /* if the page was really writable, then we change its
2243 9fa3e853 bellard
       protection back to writable */
2244 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2245 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2246 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2247 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2248 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2249 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2250 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2251 9fa3e853 bellard
               the corresponding translated code. */
2252 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2253 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2254 9fa3e853 bellard
            tb_invalidate_check(address);
2255 9fa3e853 bellard
#endif
2256 c8a706fe pbrook
            mmap_unlock();
2257 9fa3e853 bellard
            return 1;
2258 9fa3e853 bellard
        }
2259 9fa3e853 bellard
    }
2260 c8a706fe pbrook
    mmap_unlock();
2261 9fa3e853 bellard
    return 0;
2262 9fa3e853 bellard
}
2263 9fa3e853 bellard
2264 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2265 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2266 1ccde1cb bellard
{
2267 1ccde1cb bellard
}
2268 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2269 9fa3e853 bellard
2270 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2271 8da3ff18 pbrook
2272 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2273 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2274 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2275 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2276 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2277 db7b5426 blueswir1
                      need_subpage)                                     \
2278 db7b5426 blueswir1
    do {                                                                \
2279 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2280 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2281 db7b5426 blueswir1
        else {                                                          \
2282 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2283 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2284 db7b5426 blueswir1
                need_subpage = 1;                                       \
2285 db7b5426 blueswir1
        }                                                               \
2286 db7b5426 blueswir1
                                                                        \
2287 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2288 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2289 db7b5426 blueswir1
        else {                                                          \
2290 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2291 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2292 db7b5426 blueswir1
                need_subpage = 1;                                       \
2293 db7b5426 blueswir1
        }                                                               \
2294 db7b5426 blueswir1
    } while (0)
2295 db7b5426 blueswir1
2296 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2297 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2298 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2299 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2300 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2301 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2302 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2303 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2304 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2305 c227f099 Anthony Liguori
                                         ram_addr_t size,
2306 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2307 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2308 33417e70 bellard
{
2309 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2310 92e873b9 bellard
    PhysPageDesc *p;
2311 9d42037b bellard
    CPUState *env;
2312 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2313 db7b5426 blueswir1
    void *subpage;
2314 33417e70 bellard
2315 7ba1e619 aliguori
    if (kvm_enabled())
2316 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2317 7ba1e619 aliguori
2318 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2319 67c4d23c pbrook
        region_offset = start_addr;
2320 67c4d23c pbrook
    }
2321 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2322 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2323 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2324 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2325 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2326 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2327 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2328 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2329 db7b5426 blueswir1
            int need_subpage = 0;
2330 db7b5426 blueswir1
2331 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2332 db7b5426 blueswir1
                          need_subpage);
2333 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2334 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2335 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2336 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2337 8da3ff18 pbrook
                                           p->region_offset);
2338 db7b5426 blueswir1
                } else {
2339 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2340 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2341 db7b5426 blueswir1
                }
2342 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2343 8da3ff18 pbrook
                                 region_offset);
2344 8da3ff18 pbrook
                p->region_offset = 0;
2345 db7b5426 blueswir1
            } else {
2346 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2347 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2348 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2349 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2350 db7b5426 blueswir1
            }
2351 db7b5426 blueswir1
        } else {
2352 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2354 8da3ff18 pbrook
            p->region_offset = region_offset;
2355 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2356 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2357 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2358 0e8f0967 pbrook
            } else {
2359 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2360 db7b5426 blueswir1
                int need_subpage = 0;
2361 db7b5426 blueswir1
2362 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2363 db7b5426 blueswir1
                              end_addr2, need_subpage);
2364 db7b5426 blueswir1
2365 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2366 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2367 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2368 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2369 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2370 8da3ff18 pbrook
                                     phys_offset, region_offset);
2371 8da3ff18 pbrook
                    p->region_offset = 0;
2372 db7b5426 blueswir1
                }
2373 db7b5426 blueswir1
            }
2374 db7b5426 blueswir1
        }
2375 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2376 33417e70 bellard
    }
2377 3b46e624 ths
2378 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2379 9d42037b bellard
       reset the modified entries */
2380 9d42037b bellard
    /* XXX: slow ! */
2381 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2382 9d42037b bellard
        tlb_flush(env, 1);
2383 9d42037b bellard
    }
2384 33417e70 bellard
}
2385 33417e70 bellard
2386 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2387 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2388 ba863458 bellard
{
2389 ba863458 bellard
    PhysPageDesc *p;
2390 ba863458 bellard
2391 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2392 ba863458 bellard
    if (!p)
2393 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2394 ba863458 bellard
    return p->phys_offset;
2395 ba863458 bellard
}
2396 ba863458 bellard
2397 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2398 f65ed4c1 aliguori
{
2399 f65ed4c1 aliguori
    if (kvm_enabled())
2400 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2401 f65ed4c1 aliguori
}
2402 f65ed4c1 aliguori
2403 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2404 f65ed4c1 aliguori
{
2405 f65ed4c1 aliguori
    if (kvm_enabled())
2406 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2407 f65ed4c1 aliguori
}
2408 f65ed4c1 aliguori
2409 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2410 94a6b54f pbrook
{
2411 94a6b54f pbrook
    RAMBlock *new_block;
2412 94a6b54f pbrook
2413 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2414 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2415 94a6b54f pbrook
2416 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2417 6b02494d Alexander Graf
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2418 6b02494d Alexander Graf
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2419 6b02494d Alexander Graf
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2420 6b02494d Alexander Graf
#else
2421 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2422 6b02494d Alexander Graf
#endif
2423 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2424 ccb167e9 Izik Eidus
    madvise(new_block->host, size, MADV_MERGEABLE);
2425 ccb167e9 Izik Eidus
#endif
2426 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2427 94a6b54f pbrook
    new_block->length = size;
2428 94a6b54f pbrook
2429 94a6b54f pbrook
    new_block->next = ram_blocks;
2430 94a6b54f pbrook
    ram_blocks = new_block;
2431 94a6b54f pbrook
2432 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2433 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2434 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2435 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2436 94a6b54f pbrook
2437 94a6b54f pbrook
    last_ram_offset += size;
2438 94a6b54f pbrook
2439 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2440 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2441 6f0437e8 Jan Kiszka
2442 94a6b54f pbrook
    return new_block->offset;
2443 94a6b54f pbrook
}
2444 e9a1ab19 bellard
2445 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2446 e9a1ab19 bellard
{
2447 94a6b54f pbrook
    /* TODO: implement this.  */
2448 e9a1ab19 bellard
}
2449 e9a1ab19 bellard
2450 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2451 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2452 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2453 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2454 5579c7f3 pbrook

2455 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2456 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2457 5579c7f3 pbrook
 */
2458 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2459 dc828ca1 pbrook
{
2460 94a6b54f pbrook
    RAMBlock *prev;
2461 94a6b54f pbrook
    RAMBlock **prevp;
2462 94a6b54f pbrook
    RAMBlock *block;
2463 94a6b54f pbrook
2464 94a6b54f pbrook
    prev = NULL;
2465 94a6b54f pbrook
    prevp = &ram_blocks;
2466 94a6b54f pbrook
    block = ram_blocks;
2467 94a6b54f pbrook
    while (block && (block->offset > addr
2468 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2469 94a6b54f pbrook
        if (prev)
2470 94a6b54f pbrook
          prevp = &prev->next;
2471 94a6b54f pbrook
        prev = block;
2472 94a6b54f pbrook
        block = block->next;
2473 94a6b54f pbrook
    }
2474 94a6b54f pbrook
    if (!block) {
2475 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2476 94a6b54f pbrook
        abort();
2477 94a6b54f pbrook
    }
2478 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2479 94a6b54f pbrook
    if (prev) {
2480 94a6b54f pbrook
        prev->next = block->next;
2481 94a6b54f pbrook
        block->next = *prevp;
2482 94a6b54f pbrook
        *prevp = block;
2483 94a6b54f pbrook
    }
2484 94a6b54f pbrook
    return block->host + (addr - block->offset);
2485 dc828ca1 pbrook
}
2486 dc828ca1 pbrook
2487 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2488 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2489 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2490 5579c7f3 pbrook
{
2491 94a6b54f pbrook
    RAMBlock *prev;
2492 94a6b54f pbrook
    RAMBlock **prevp;
2493 94a6b54f pbrook
    RAMBlock *block;
2494 94a6b54f pbrook
    uint8_t *host = ptr;
2495 94a6b54f pbrook
2496 94a6b54f pbrook
    prev = NULL;
2497 94a6b54f pbrook
    prevp = &ram_blocks;
2498 94a6b54f pbrook
    block = ram_blocks;
2499 94a6b54f pbrook
    while (block && (block->host > host
2500 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2501 94a6b54f pbrook
        if (prev)
2502 94a6b54f pbrook
          prevp = &prev->next;
2503 94a6b54f pbrook
        prev = block;
2504 94a6b54f pbrook
        block = block->next;
2505 94a6b54f pbrook
    }
2506 94a6b54f pbrook
    if (!block) {
2507 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2508 94a6b54f pbrook
        abort();
2509 94a6b54f pbrook
    }
2510 94a6b54f pbrook
    return block->offset + (host - block->host);
2511 5579c7f3 pbrook
}
2512 5579c7f3 pbrook
2513 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2514 33417e70 bellard
{
2515 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2516 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2517 67d3b957 pbrook
#endif
2518 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2519 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2520 e18231a3 blueswir1
#endif
2521 e18231a3 blueswir1
    return 0;
2522 e18231a3 blueswir1
}
2523 e18231a3 blueswir1
2524 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2525 e18231a3 blueswir1
{
2526 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2527 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2528 e18231a3 blueswir1
#endif
2529 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2530 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2531 e18231a3 blueswir1
#endif
2532 e18231a3 blueswir1
    return 0;
2533 e18231a3 blueswir1
}
2534 e18231a3 blueswir1
2535 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2536 e18231a3 blueswir1
{
2537 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2538 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2539 e18231a3 blueswir1
#endif
2540 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2541 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2542 b4f0a316 blueswir1
#endif
2543 33417e70 bellard
    return 0;
2544 33417e70 bellard
}
2545 33417e70 bellard
2546 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2547 33417e70 bellard
{
2548 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2549 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2550 67d3b957 pbrook
#endif
2551 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2552 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2553 e18231a3 blueswir1
#endif
2554 e18231a3 blueswir1
}
2555 e18231a3 blueswir1
2556 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2557 e18231a3 blueswir1
{
2558 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2559 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2560 e18231a3 blueswir1
#endif
2561 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2562 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2563 e18231a3 blueswir1
#endif
2564 e18231a3 blueswir1
}
2565 e18231a3 blueswir1
2566 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2567 e18231a3 blueswir1
{
2568 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2569 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2570 e18231a3 blueswir1
#endif
2571 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2572 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2573 b4f0a316 blueswir1
#endif
2574 33417e70 bellard
}
2575 33417e70 bellard
2576 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2577 33417e70 bellard
    unassigned_mem_readb,
2578 e18231a3 blueswir1
    unassigned_mem_readw,
2579 e18231a3 blueswir1
    unassigned_mem_readl,
2580 33417e70 bellard
};
2581 33417e70 bellard
2582 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2583 33417e70 bellard
    unassigned_mem_writeb,
2584 e18231a3 blueswir1
    unassigned_mem_writew,
2585 e18231a3 blueswir1
    unassigned_mem_writel,
2586 33417e70 bellard
};
2587 33417e70 bellard
2588 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2589 0f459d16 pbrook
                                uint32_t val)
2590 9fa3e853 bellard
{
2591 3a7d929e bellard
    int dirty_flags;
2592 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2593 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2594 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2595 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2596 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2597 9fa3e853 bellard
#endif
2598 3a7d929e bellard
    }
2599 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2600 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2601 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2602 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2603 f23db169 bellard
       flushed */
2604 f23db169 bellard
    if (dirty_flags == 0xff)
2605 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2606 9fa3e853 bellard
}
2607 9fa3e853 bellard
2608 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2609 0f459d16 pbrook
                                uint32_t val)
2610 9fa3e853 bellard
{
2611 3a7d929e bellard
    int dirty_flags;
2612 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2613 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2614 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2615 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2616 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2617 9fa3e853 bellard
#endif
2618 3a7d929e bellard
    }
2619 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2620 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2621 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2622 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2623 f23db169 bellard
       flushed */
2624 f23db169 bellard
    if (dirty_flags == 0xff)
2625 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2626 9fa3e853 bellard
}
2627 9fa3e853 bellard
2628 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2629 0f459d16 pbrook
                                uint32_t val)
2630 9fa3e853 bellard
{
2631 3a7d929e bellard
    int dirty_flags;
2632 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2633 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2634 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2635 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2636 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2637 9fa3e853 bellard
#endif
2638 3a7d929e bellard
    }
2639 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2640 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2641 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2642 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2643 f23db169 bellard
       flushed */
2644 f23db169 bellard
    if (dirty_flags == 0xff)
2645 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2646 9fa3e853 bellard
}
2647 9fa3e853 bellard
2648 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
2649 9fa3e853 bellard
    NULL, /* never used */
2650 9fa3e853 bellard
    NULL, /* never used */
2651 9fa3e853 bellard
    NULL, /* never used */
2652 9fa3e853 bellard
};
2653 9fa3e853 bellard
2654 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2655 1ccde1cb bellard
    notdirty_mem_writeb,
2656 1ccde1cb bellard
    notdirty_mem_writew,
2657 1ccde1cb bellard
    notdirty_mem_writel,
2658 1ccde1cb bellard
};
2659 1ccde1cb bellard
2660 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2661 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2662 0f459d16 pbrook
{
2663 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2664 06d55cc1 aliguori
    target_ulong pc, cs_base;
2665 06d55cc1 aliguori
    TranslationBlock *tb;
2666 0f459d16 pbrook
    target_ulong vaddr;
2667 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2668 06d55cc1 aliguori
    int cpu_flags;
2669 0f459d16 pbrook
2670 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2671 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2672 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2673 06d55cc1 aliguori
         * current instruction. */
2674 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2675 06d55cc1 aliguori
        return;
2676 06d55cc1 aliguori
    }
2677 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2678 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2679 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2680 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2681 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2682 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2683 6e140f28 aliguori
                env->watchpoint_hit = wp;
2684 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2685 6e140f28 aliguori
                if (!tb) {
2686 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2687 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2688 6e140f28 aliguori
                }
2689 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2690 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2691 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2692 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2693 6e140f28 aliguori
                } else {
2694 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2695 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2696 6e140f28 aliguori
                }
2697 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2698 06d55cc1 aliguori
            }
2699 6e140f28 aliguori
        } else {
2700 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2701 0f459d16 pbrook
        }
2702 0f459d16 pbrook
    }
2703 0f459d16 pbrook
}
2704 0f459d16 pbrook
2705 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2706 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2707 6658ffb8 pbrook
   phys routines.  */
2708 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2709 6658ffb8 pbrook
{
2710 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2711 6658ffb8 pbrook
    return ldub_phys(addr);
2712 6658ffb8 pbrook
}
2713 6658ffb8 pbrook
2714 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2715 6658ffb8 pbrook
{
2716 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2717 6658ffb8 pbrook
    return lduw_phys(addr);
2718 6658ffb8 pbrook
}
2719 6658ffb8 pbrook
2720 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2721 6658ffb8 pbrook
{
2722 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2723 6658ffb8 pbrook
    return ldl_phys(addr);
2724 6658ffb8 pbrook
}
2725 6658ffb8 pbrook
2726 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2727 6658ffb8 pbrook
                             uint32_t val)
2728 6658ffb8 pbrook
{
2729 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2730 6658ffb8 pbrook
    stb_phys(addr, val);
2731 6658ffb8 pbrook
}
2732 6658ffb8 pbrook
2733 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2734 6658ffb8 pbrook
                             uint32_t val)
2735 6658ffb8 pbrook
{
2736 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2737 6658ffb8 pbrook
    stw_phys(addr, val);
2738 6658ffb8 pbrook
}
2739 6658ffb8 pbrook
2740 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2741 6658ffb8 pbrook
                             uint32_t val)
2742 6658ffb8 pbrook
{
2743 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2744 6658ffb8 pbrook
    stl_phys(addr, val);
2745 6658ffb8 pbrook
}
2746 6658ffb8 pbrook
2747 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2748 6658ffb8 pbrook
    watch_mem_readb,
2749 6658ffb8 pbrook
    watch_mem_readw,
2750 6658ffb8 pbrook
    watch_mem_readl,
2751 6658ffb8 pbrook
};
2752 6658ffb8 pbrook
2753 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2754 6658ffb8 pbrook
    watch_mem_writeb,
2755 6658ffb8 pbrook
    watch_mem_writew,
2756 6658ffb8 pbrook
    watch_mem_writel,
2757 6658ffb8 pbrook
};
2758 6658ffb8 pbrook
2759 c227f099 Anthony Liguori
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2760 db7b5426 blueswir1
                                 unsigned int len)
2761 db7b5426 blueswir1
{
2762 db7b5426 blueswir1
    uint32_t ret;
2763 db7b5426 blueswir1
    unsigned int idx;
2764 db7b5426 blueswir1
2765 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2766 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2767 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2768 db7b5426 blueswir1
           mmio, len, addr, idx);
2769 db7b5426 blueswir1
#endif
2770 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2771 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2772 db7b5426 blueswir1
2773 db7b5426 blueswir1
    return ret;
2774 db7b5426 blueswir1
}
2775 db7b5426 blueswir1
2776 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2777 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2778 db7b5426 blueswir1
{
2779 db7b5426 blueswir1
    unsigned int idx;
2780 db7b5426 blueswir1
2781 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2782 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2783 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2784 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2785 db7b5426 blueswir1
#endif
2786 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2787 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2788 8da3ff18 pbrook
                                  value);
2789 db7b5426 blueswir1
}
2790 db7b5426 blueswir1
2791 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2792 db7b5426 blueswir1
{
2793 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2794 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2795 db7b5426 blueswir1
#endif
2796 db7b5426 blueswir1
2797 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2798 db7b5426 blueswir1
}
2799 db7b5426 blueswir1
2800 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2801 db7b5426 blueswir1
                            uint32_t value)
2802 db7b5426 blueswir1
{
2803 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2804 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2805 db7b5426 blueswir1
#endif
2806 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2807 db7b5426 blueswir1
}
2808 db7b5426 blueswir1
2809 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2810 db7b5426 blueswir1
{
2811 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2812 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2813 db7b5426 blueswir1
#endif
2814 db7b5426 blueswir1
2815 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2816 db7b5426 blueswir1
}
2817 db7b5426 blueswir1
2818 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2819 db7b5426 blueswir1
                            uint32_t value)
2820 db7b5426 blueswir1
{
2821 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2822 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2823 db7b5426 blueswir1
#endif
2824 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2825 db7b5426 blueswir1
}
2826 db7b5426 blueswir1
2827 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2828 db7b5426 blueswir1
{
2829 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2830 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2831 db7b5426 blueswir1
#endif
2832 db7b5426 blueswir1
2833 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2834 db7b5426 blueswir1
}
2835 db7b5426 blueswir1
2836 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2837 c227f099 Anthony Liguori
                         target_phys_addr_t addr, uint32_t value)
2838 db7b5426 blueswir1
{
2839 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2840 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2841 db7b5426 blueswir1
#endif
2842 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2843 db7b5426 blueswir1
}
2844 db7b5426 blueswir1
2845 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
2846 db7b5426 blueswir1
    &subpage_readb,
2847 db7b5426 blueswir1
    &subpage_readw,
2848 db7b5426 blueswir1
    &subpage_readl,
2849 db7b5426 blueswir1
};
2850 db7b5426 blueswir1
2851 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
2852 db7b5426 blueswir1
    &subpage_writeb,
2853 db7b5426 blueswir1
    &subpage_writew,
2854 db7b5426 blueswir1
    &subpage_writel,
2855 db7b5426 blueswir1
};
2856 db7b5426 blueswir1
2857 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2858 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
2859 db7b5426 blueswir1
{
2860 db7b5426 blueswir1
    int idx, eidx;
2861 4254fab8 blueswir1
    unsigned int i;
2862 db7b5426 blueswir1
2863 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2864 db7b5426 blueswir1
        return -1;
2865 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2866 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2867 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2868 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2869 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2870 db7b5426 blueswir1
#endif
2871 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2872 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2873 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2874 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2875 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2876 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2877 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2878 3ee89922 blueswir1
            }
2879 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2880 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2881 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2882 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2883 3ee89922 blueswir1
            }
2884 4254fab8 blueswir1
        }
2885 db7b5426 blueswir1
    }
2886 db7b5426 blueswir1
2887 db7b5426 blueswir1
    return 0;
2888 db7b5426 blueswir1
}
2889 db7b5426 blueswir1
2890 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2891 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2892 db7b5426 blueswir1
{
2893 c227f099 Anthony Liguori
    subpage_t *mmio;
2894 db7b5426 blueswir1
    int subpage_memory;
2895 db7b5426 blueswir1
2896 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
2897 1eec614b aliguori
2898 1eec614b aliguori
    mmio->base = base;
2899 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2900 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2901 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2902 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2903 db7b5426 blueswir1
#endif
2904 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2905 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2906 8da3ff18 pbrook
                         region_offset);
2907 db7b5426 blueswir1
2908 db7b5426 blueswir1
    return mmio;
2909 db7b5426 blueswir1
}
2910 db7b5426 blueswir1
2911 88715657 aliguori
static int get_free_io_mem_idx(void)
2912 88715657 aliguori
{
2913 88715657 aliguori
    int i;
2914 88715657 aliguori
2915 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2916 88715657 aliguori
        if (!io_mem_used[i]) {
2917 88715657 aliguori
            io_mem_used[i] = 1;
2918 88715657 aliguori
            return i;
2919 88715657 aliguori
        }
2920 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2921 88715657 aliguori
    return -1;
2922 88715657 aliguori
}
2923 88715657 aliguori
2924 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
2925 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
2926 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
2927 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
2928 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
2929 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
2930 4254fab8 blueswir1
   returned if error. */
2931 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
2932 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
2933 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
2934 1eed09cb Avi Kivity
                                        void *opaque)
2935 33417e70 bellard
{
2936 4254fab8 blueswir1
    int i, subwidth = 0;
2937 33417e70 bellard
2938 33417e70 bellard
    if (io_index <= 0) {
2939 88715657 aliguori
        io_index = get_free_io_mem_idx();
2940 88715657 aliguori
        if (io_index == -1)
2941 88715657 aliguori
            return io_index;
2942 33417e70 bellard
    } else {
2943 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
2944 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
2945 33417e70 bellard
            return -1;
2946 33417e70 bellard
    }
2947 b5ff1b31 bellard
2948 33417e70 bellard
    for(i = 0;i < 3; i++) {
2949 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
2950 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
2951 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
2952 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
2953 33417e70 bellard
    }
2954 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
2955 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
2956 33417e70 bellard
}
2957 61382a50 bellard
2958 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2959 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
2960 1eed09cb Avi Kivity
                           void *opaque)
2961 1eed09cb Avi Kivity
{
2962 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2963 1eed09cb Avi Kivity
}
2964 1eed09cb Avi Kivity
2965 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
2966 88715657 aliguori
{
2967 88715657 aliguori
    int i;
2968 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
2969 88715657 aliguori
2970 88715657 aliguori
    for (i=0;i < 3; i++) {
2971 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2972 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2973 88715657 aliguori
    }
2974 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
2975 88715657 aliguori
    io_mem_used[io_index] = 0;
2976 88715657 aliguori
}
2977 88715657 aliguori
2978 e9179ce1 Avi Kivity
static void io_mem_init(void)
2979 e9179ce1 Avi Kivity
{
2980 e9179ce1 Avi Kivity
    int i;
2981 e9179ce1 Avi Kivity
2982 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2983 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2984 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2985 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
2986 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
2987 e9179ce1 Avi Kivity
2988 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2989 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
2990 e9179ce1 Avi Kivity
}
2991 e9179ce1 Avi Kivity
2992 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
2993 e2eef170 pbrook
2994 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
2995 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
2996 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2997 13eb76e0 bellard
                            int len, int is_write)
2998 13eb76e0 bellard
{
2999 13eb76e0 bellard
    int l, flags;
3000 13eb76e0 bellard
    target_ulong page;
3001 53a5960a pbrook
    void * p;
3002 13eb76e0 bellard
3003 13eb76e0 bellard
    while (len > 0) {
3004 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3005 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3006 13eb76e0 bellard
        if (l > len)
3007 13eb76e0 bellard
            l = len;
3008 13eb76e0 bellard
        flags = page_get_flags(page);
3009 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3010 13eb76e0 bellard
            return;
3011 13eb76e0 bellard
        if (is_write) {
3012 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3013 13eb76e0 bellard
                return;
3014 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3015 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3016 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3017 579a97f7 bellard
                return;
3018 72fb7daa aurel32
            memcpy(p, buf, l);
3019 72fb7daa aurel32
            unlock_user(p, addr, l);
3020 13eb76e0 bellard
        } else {
3021 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3022 13eb76e0 bellard
                return;
3023 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3024 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3025 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3026 579a97f7 bellard
                return;
3027 72fb7daa aurel32
            memcpy(buf, p, l);
3028 5b257578 aurel32
            unlock_user(p, addr, 0);
3029 13eb76e0 bellard
        }
3030 13eb76e0 bellard
        len -= l;
3031 13eb76e0 bellard
        buf += l;
3032 13eb76e0 bellard
        addr += l;
3033 13eb76e0 bellard
    }
3034 13eb76e0 bellard
}
3035 8df1cd07 bellard
3036 13eb76e0 bellard
#else
3037 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3038 13eb76e0 bellard
                            int len, int is_write)
3039 13eb76e0 bellard
{
3040 13eb76e0 bellard
    int l, io_index;
3041 13eb76e0 bellard
    uint8_t *ptr;
3042 13eb76e0 bellard
    uint32_t val;
3043 c227f099 Anthony Liguori
    target_phys_addr_t page;
3044 2e12669a bellard
    unsigned long pd;
3045 92e873b9 bellard
    PhysPageDesc *p;
3046 3b46e624 ths
3047 13eb76e0 bellard
    while (len > 0) {
3048 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3049 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3050 13eb76e0 bellard
        if (l > len)
3051 13eb76e0 bellard
            l = len;
3052 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3053 13eb76e0 bellard
        if (!p) {
3054 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3055 13eb76e0 bellard
        } else {
3056 13eb76e0 bellard
            pd = p->phys_offset;
3057 13eb76e0 bellard
        }
3058 3b46e624 ths
3059 13eb76e0 bellard
        if (is_write) {
3060 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3061 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3062 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3063 8da3ff18 pbrook
                if (p)
3064 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3065 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3066 6a00d601 bellard
                   potential bugs */
3067 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3068 1c213d19 bellard
                    /* 32 bit write access */
3069 c27004ec bellard
                    val = ldl_p(buf);
3070 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3071 13eb76e0 bellard
                    l = 4;
3072 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3073 1c213d19 bellard
                    /* 16 bit write access */
3074 c27004ec bellard
                    val = lduw_p(buf);
3075 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3076 13eb76e0 bellard
                    l = 2;
3077 13eb76e0 bellard
                } else {
3078 1c213d19 bellard
                    /* 8 bit write access */
3079 c27004ec bellard
                    val = ldub_p(buf);
3080 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3081 13eb76e0 bellard
                    l = 1;
3082 13eb76e0 bellard
                }
3083 13eb76e0 bellard
            } else {
3084 b448f2f3 bellard
                unsigned long addr1;
3085 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3086 13eb76e0 bellard
                /* RAM case */
3087 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3088 13eb76e0 bellard
                memcpy(ptr, buf, l);
3089 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3090 3a7d929e bellard
                    /* invalidate code */
3091 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3092 3a7d929e bellard
                    /* set dirty bit */
3093 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3094 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3095 3a7d929e bellard
                }
3096 13eb76e0 bellard
            }
3097 13eb76e0 bellard
        } else {
3098 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3099 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3100 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3101 13eb76e0 bellard
                /* I/O case */
3102 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3103 8da3ff18 pbrook
                if (p)
3104 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3105 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3106 13eb76e0 bellard
                    /* 32 bit read access */
3107 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3108 c27004ec bellard
                    stl_p(buf, val);
3109 13eb76e0 bellard
                    l = 4;
3110 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3111 13eb76e0 bellard
                    /* 16 bit read access */
3112 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3113 c27004ec bellard
                    stw_p(buf, val);
3114 13eb76e0 bellard
                    l = 2;
3115 13eb76e0 bellard
                } else {
3116 1c213d19 bellard
                    /* 8 bit read access */
3117 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3118 c27004ec bellard
                    stb_p(buf, val);
3119 13eb76e0 bellard
                    l = 1;
3120 13eb76e0 bellard
                }
3121 13eb76e0 bellard
            } else {
3122 13eb76e0 bellard
                /* RAM case */
3123 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3124 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3125 13eb76e0 bellard
                memcpy(buf, ptr, l);
3126 13eb76e0 bellard
            }
3127 13eb76e0 bellard
        }
3128 13eb76e0 bellard
        len -= l;
3129 13eb76e0 bellard
        buf += l;
3130 13eb76e0 bellard
        addr += l;
3131 13eb76e0 bellard
    }
3132 13eb76e0 bellard
}
3133 8df1cd07 bellard
3134 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3135 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3136 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3137 d0ecd2aa bellard
{
3138 d0ecd2aa bellard
    int l;
3139 d0ecd2aa bellard
    uint8_t *ptr;
3140 c227f099 Anthony Liguori
    target_phys_addr_t page;
3141 d0ecd2aa bellard
    unsigned long pd;
3142 d0ecd2aa bellard
    PhysPageDesc *p;
3143 3b46e624 ths
3144 d0ecd2aa bellard
    while (len > 0) {
3145 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3146 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3147 d0ecd2aa bellard
        if (l > len)
3148 d0ecd2aa bellard
            l = len;
3149 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3150 d0ecd2aa bellard
        if (!p) {
3151 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3152 d0ecd2aa bellard
        } else {
3153 d0ecd2aa bellard
            pd = p->phys_offset;
3154 d0ecd2aa bellard
        }
3155 3b46e624 ths
3156 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3157 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3158 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3159 d0ecd2aa bellard
            /* do nothing */
3160 d0ecd2aa bellard
        } else {
3161 d0ecd2aa bellard
            unsigned long addr1;
3162 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3163 d0ecd2aa bellard
            /* ROM/RAM case */
3164 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3165 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3166 d0ecd2aa bellard
        }
3167 d0ecd2aa bellard
        len -= l;
3168 d0ecd2aa bellard
        buf += l;
3169 d0ecd2aa bellard
        addr += l;
3170 d0ecd2aa bellard
    }
3171 d0ecd2aa bellard
}
3172 d0ecd2aa bellard
3173 6d16c2f8 aliguori
typedef struct {
3174 6d16c2f8 aliguori
    void *buffer;
3175 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3176 c227f099 Anthony Liguori
    target_phys_addr_t len;
3177 6d16c2f8 aliguori
} BounceBuffer;
3178 6d16c2f8 aliguori
3179 6d16c2f8 aliguori
static BounceBuffer bounce;
3180 6d16c2f8 aliguori
3181 ba223c29 aliguori
typedef struct MapClient {
3182 ba223c29 aliguori
    void *opaque;
3183 ba223c29 aliguori
    void (*callback)(void *opaque);
3184 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3185 ba223c29 aliguori
} MapClient;
3186 ba223c29 aliguori
3187 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3188 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3189 ba223c29 aliguori
3190 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3191 ba223c29 aliguori
{
3192 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3193 ba223c29 aliguori
3194 ba223c29 aliguori
    client->opaque = opaque;
3195 ba223c29 aliguori
    client->callback = callback;
3196 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3197 ba223c29 aliguori
    return client;
3198 ba223c29 aliguori
}
3199 ba223c29 aliguori
3200 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3201 ba223c29 aliguori
{
3202 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3203 ba223c29 aliguori
3204 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3205 34d5e948 Isaku Yamahata
    qemu_free(client);
3206 ba223c29 aliguori
}
3207 ba223c29 aliguori
3208 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3209 ba223c29 aliguori
{
3210 ba223c29 aliguori
    MapClient *client;
3211 ba223c29 aliguori
3212 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3213 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3214 ba223c29 aliguori
        client->callback(client->opaque);
3215 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3216 ba223c29 aliguori
    }
3217 ba223c29 aliguori
}
3218 ba223c29 aliguori
3219 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3220 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3221 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3222 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3223 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3224 ba223c29 aliguori
 * likely to succeed.
3225 6d16c2f8 aliguori
 */
3226 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3227 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3228 6d16c2f8 aliguori
                              int is_write)
3229 6d16c2f8 aliguori
{
3230 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3231 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3232 6d16c2f8 aliguori
    int l;
3233 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3234 6d16c2f8 aliguori
    uint8_t *ptr;
3235 c227f099 Anthony Liguori
    target_phys_addr_t page;
3236 6d16c2f8 aliguori
    unsigned long pd;
3237 6d16c2f8 aliguori
    PhysPageDesc *p;
3238 6d16c2f8 aliguori
    unsigned long addr1;
3239 6d16c2f8 aliguori
3240 6d16c2f8 aliguori
    while (len > 0) {
3241 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3242 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3243 6d16c2f8 aliguori
        if (l > len)
3244 6d16c2f8 aliguori
            l = len;
3245 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3246 6d16c2f8 aliguori
        if (!p) {
3247 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3248 6d16c2f8 aliguori
        } else {
3249 6d16c2f8 aliguori
            pd = p->phys_offset;
3250 6d16c2f8 aliguori
        }
3251 6d16c2f8 aliguori
3252 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3253 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3254 6d16c2f8 aliguori
                break;
3255 6d16c2f8 aliguori
            }
3256 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3257 6d16c2f8 aliguori
            bounce.addr = addr;
3258 6d16c2f8 aliguori
            bounce.len = l;
3259 6d16c2f8 aliguori
            if (!is_write) {
3260 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3261 6d16c2f8 aliguori
            }
3262 6d16c2f8 aliguori
            ptr = bounce.buffer;
3263 6d16c2f8 aliguori
        } else {
3264 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3265 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3266 6d16c2f8 aliguori
        }
3267 6d16c2f8 aliguori
        if (!done) {
3268 6d16c2f8 aliguori
            ret = ptr;
3269 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3270 6d16c2f8 aliguori
            break;
3271 6d16c2f8 aliguori
        }
3272 6d16c2f8 aliguori
3273 6d16c2f8 aliguori
        len -= l;
3274 6d16c2f8 aliguori
        addr += l;
3275 6d16c2f8 aliguori
        done += l;
3276 6d16c2f8 aliguori
    }
3277 6d16c2f8 aliguori
    *plen = done;
3278 6d16c2f8 aliguori
    return ret;
3279 6d16c2f8 aliguori
}
3280 6d16c2f8 aliguori
3281 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3282 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3283 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3284 6d16c2f8 aliguori
 */
3285 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3286 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3287 6d16c2f8 aliguori
{
3288 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3289 6d16c2f8 aliguori
        if (is_write) {
3290 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3291 6d16c2f8 aliguori
            while (access_len) {
3292 6d16c2f8 aliguori
                unsigned l;
3293 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3294 6d16c2f8 aliguori
                if (l > access_len)
3295 6d16c2f8 aliguori
                    l = access_len;
3296 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3297 6d16c2f8 aliguori
                    /* invalidate code */
3298 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3299 6d16c2f8 aliguori
                    /* set dirty bit */
3300 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3301 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3302 6d16c2f8 aliguori
                }
3303 6d16c2f8 aliguori
                addr1 += l;
3304 6d16c2f8 aliguori
                access_len -= l;
3305 6d16c2f8 aliguori
            }
3306 6d16c2f8 aliguori
        }
3307 6d16c2f8 aliguori
        return;
3308 6d16c2f8 aliguori
    }
3309 6d16c2f8 aliguori
    if (is_write) {
3310 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3311 6d16c2f8 aliguori
    }
3312 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3313 6d16c2f8 aliguori
    bounce.buffer = NULL;
3314 ba223c29 aliguori
    cpu_notify_map_clients();
3315 6d16c2f8 aliguori
}
3316 d0ecd2aa bellard
3317 8df1cd07 bellard
/* warning: addr must be aligned */
3318 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3319 8df1cd07 bellard
{
3320 8df1cd07 bellard
    int io_index;
3321 8df1cd07 bellard
    uint8_t *ptr;
3322 8df1cd07 bellard
    uint32_t val;
3323 8df1cd07 bellard
    unsigned long pd;
3324 8df1cd07 bellard
    PhysPageDesc *p;
3325 8df1cd07 bellard
3326 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3327 8df1cd07 bellard
    if (!p) {
3328 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3329 8df1cd07 bellard
    } else {
3330 8df1cd07 bellard
        pd = p->phys_offset;
3331 8df1cd07 bellard
    }
3332 3b46e624 ths
3333 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3334 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3335 8df1cd07 bellard
        /* I/O case */
3336 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3337 8da3ff18 pbrook
        if (p)
3338 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3339 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3340 8df1cd07 bellard
    } else {
3341 8df1cd07 bellard
        /* RAM case */
3342 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3343 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3344 8df1cd07 bellard
        val = ldl_p(ptr);
3345 8df1cd07 bellard
    }
3346 8df1cd07 bellard
    return val;
3347 8df1cd07 bellard
}
3348 8df1cd07 bellard
3349 84b7b8e7 bellard
/* warning: addr must be aligned */
3350 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3351 84b7b8e7 bellard
{
3352 84b7b8e7 bellard
    int io_index;
3353 84b7b8e7 bellard
    uint8_t *ptr;
3354 84b7b8e7 bellard
    uint64_t val;
3355 84b7b8e7 bellard
    unsigned long pd;
3356 84b7b8e7 bellard
    PhysPageDesc *p;
3357 84b7b8e7 bellard
3358 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359 84b7b8e7 bellard
    if (!p) {
3360 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3361 84b7b8e7 bellard
    } else {
3362 84b7b8e7 bellard
        pd = p->phys_offset;
3363 84b7b8e7 bellard
    }
3364 3b46e624 ths
3365 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3366 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3367 84b7b8e7 bellard
        /* I/O case */
3368 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3369 8da3ff18 pbrook
        if (p)
3370 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3371 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3372 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3373 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3374 84b7b8e7 bellard
#else
3375 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3376 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3377 84b7b8e7 bellard
#endif
3378 84b7b8e7 bellard
    } else {
3379 84b7b8e7 bellard
        /* RAM case */
3380 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3381 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3382 84b7b8e7 bellard
        val = ldq_p(ptr);
3383 84b7b8e7 bellard
    }
3384 84b7b8e7 bellard
    return val;
3385 84b7b8e7 bellard
}
3386 84b7b8e7 bellard
3387 aab33094 bellard
/* XXX: optimize */
3388 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3389 aab33094 bellard
{
3390 aab33094 bellard
    uint8_t val;
3391 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3392 aab33094 bellard
    return val;
3393 aab33094 bellard
}
3394 aab33094 bellard
3395 aab33094 bellard
/* XXX: optimize */
3396 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3397 aab33094 bellard
{
3398 aab33094 bellard
    uint16_t val;
3399 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3400 aab33094 bellard
    return tswap16(val);
3401 aab33094 bellard
}
3402 aab33094 bellard
3403 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3404 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3405 8df1cd07 bellard
   bits are used to track modified PTEs */
3406 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3407 8df1cd07 bellard
{
3408 8df1cd07 bellard
    int io_index;
3409 8df1cd07 bellard
    uint8_t *ptr;
3410 8df1cd07 bellard
    unsigned long pd;
3411 8df1cd07 bellard
    PhysPageDesc *p;
3412 8df1cd07 bellard
3413 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3414 8df1cd07 bellard
    if (!p) {
3415 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3416 8df1cd07 bellard
    } else {
3417 8df1cd07 bellard
        pd = p->phys_offset;
3418 8df1cd07 bellard
    }
3419 3b46e624 ths
3420 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3421 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3422 8da3ff18 pbrook
        if (p)
3423 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3424 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3425 8df1cd07 bellard
    } else {
3426 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3427 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3428 8df1cd07 bellard
        stl_p(ptr, val);
3429 74576198 aliguori
3430 74576198 aliguori
        if (unlikely(in_migration)) {
3431 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3432 74576198 aliguori
                /* invalidate code */
3433 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3434 74576198 aliguori
                /* set dirty bit */
3435 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3436 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3437 74576198 aliguori
            }
3438 74576198 aliguori
        }
3439 8df1cd07 bellard
    }
3440 8df1cd07 bellard
}
3441 8df1cd07 bellard
3442 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3443 bc98a7ef j_mayer
{
3444 bc98a7ef j_mayer
    int io_index;
3445 bc98a7ef j_mayer
    uint8_t *ptr;
3446 bc98a7ef j_mayer
    unsigned long pd;
3447 bc98a7ef j_mayer
    PhysPageDesc *p;
3448 bc98a7ef j_mayer
3449 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3450 bc98a7ef j_mayer
    if (!p) {
3451 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3452 bc98a7ef j_mayer
    } else {
3453 bc98a7ef j_mayer
        pd = p->phys_offset;
3454 bc98a7ef j_mayer
    }
3455 3b46e624 ths
3456 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3458 8da3ff18 pbrook
        if (p)
3459 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3460 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3461 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3462 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3463 bc98a7ef j_mayer
#else
3464 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3465 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3466 bc98a7ef j_mayer
#endif
3467 bc98a7ef j_mayer
    } else {
3468 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3469 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3470 bc98a7ef j_mayer
        stq_p(ptr, val);
3471 bc98a7ef j_mayer
    }
3472 bc98a7ef j_mayer
}
3473 bc98a7ef j_mayer
3474 8df1cd07 bellard
/* warning: addr must be aligned */
3475 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3476 8df1cd07 bellard
{
3477 8df1cd07 bellard
    int io_index;
3478 8df1cd07 bellard
    uint8_t *ptr;
3479 8df1cd07 bellard
    unsigned long pd;
3480 8df1cd07 bellard
    PhysPageDesc *p;
3481 8df1cd07 bellard
3482 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3483 8df1cd07 bellard
    if (!p) {
3484 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3485 8df1cd07 bellard
    } else {
3486 8df1cd07 bellard
        pd = p->phys_offset;
3487 8df1cd07 bellard
    }
3488 3b46e624 ths
3489 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3490 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3491 8da3ff18 pbrook
        if (p)
3492 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3493 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3494 8df1cd07 bellard
    } else {
3495 8df1cd07 bellard
        unsigned long addr1;
3496 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3497 8df1cd07 bellard
        /* RAM case */
3498 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3499 8df1cd07 bellard
        stl_p(ptr, val);
3500 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3501 3a7d929e bellard
            /* invalidate code */
3502 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3503 3a7d929e bellard
            /* set dirty bit */
3504 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3506 3a7d929e bellard
        }
3507 8df1cd07 bellard
    }
3508 8df1cd07 bellard
}
3509 8df1cd07 bellard
3510 aab33094 bellard
/* XXX: optimize */
3511 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3512 aab33094 bellard
{
3513 aab33094 bellard
    uint8_t v = val;
3514 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3515 aab33094 bellard
}
3516 aab33094 bellard
3517 aab33094 bellard
/* XXX: optimize */
3518 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3519 aab33094 bellard
{
3520 aab33094 bellard
    uint16_t v = tswap16(val);
3521 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3522 aab33094 bellard
}
3523 aab33094 bellard
3524 aab33094 bellard
/* XXX: optimize */
3525 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3526 aab33094 bellard
{
3527 aab33094 bellard
    val = tswap64(val);
3528 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3529 aab33094 bellard
}
3530 aab33094 bellard
3531 13eb76e0 bellard
#endif
3532 13eb76e0 bellard
3533 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3534 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3535 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3536 13eb76e0 bellard
{
3537 13eb76e0 bellard
    int l;
3538 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3539 9b3c35e0 j_mayer
    target_ulong page;
3540 13eb76e0 bellard
3541 13eb76e0 bellard
    while (len > 0) {
3542 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3543 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3544 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3545 13eb76e0 bellard
        if (phys_addr == -1)
3546 13eb76e0 bellard
            return -1;
3547 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3548 13eb76e0 bellard
        if (l > len)
3549 13eb76e0 bellard
            l = len;
3550 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3551 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3552 5e2972fd aliguori
        if (is_write)
3553 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3554 5e2972fd aliguori
        else
3555 5e2972fd aliguori
#endif
3556 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3557 13eb76e0 bellard
        len -= l;
3558 13eb76e0 bellard
        buf += l;
3559 13eb76e0 bellard
        addr += l;
3560 13eb76e0 bellard
    }
3561 13eb76e0 bellard
    return 0;
3562 13eb76e0 bellard
}
3563 13eb76e0 bellard
3564 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3565 2e70f6ef pbrook
   must be at the end of the TB */
3566 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3567 2e70f6ef pbrook
{
3568 2e70f6ef pbrook
    TranslationBlock *tb;
3569 2e70f6ef pbrook
    uint32_t n, cflags;
3570 2e70f6ef pbrook
    target_ulong pc, cs_base;
3571 2e70f6ef pbrook
    uint64_t flags;
3572 2e70f6ef pbrook
3573 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3574 2e70f6ef pbrook
    if (!tb) {
3575 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3576 2e70f6ef pbrook
                  retaddr);
3577 2e70f6ef pbrook
    }
3578 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3579 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3580 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3581 bf20dc07 ths
       occurred.  */
3582 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3583 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3584 2e70f6ef pbrook
    n++;
3585 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3586 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3587 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3588 2e70f6ef pbrook
       branch.  */
3589 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3590 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3591 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3592 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3593 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3594 2e70f6ef pbrook
    }
3595 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3596 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3597 2e70f6ef pbrook
            && n > 1) {
3598 2e70f6ef pbrook
        env->pc -= 2;
3599 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3600 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3601 2e70f6ef pbrook
    }
3602 2e70f6ef pbrook
#endif
3603 2e70f6ef pbrook
    /* This should never happen.  */
3604 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3605 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3606 2e70f6ef pbrook
3607 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3608 2e70f6ef pbrook
    pc = tb->pc;
3609 2e70f6ef pbrook
    cs_base = tb->cs_base;
3610 2e70f6ef pbrook
    flags = tb->flags;
3611 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3612 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3613 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3614 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3615 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3616 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3617 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3618 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3619 2e70f6ef pbrook
       second new TB.  */
3620 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3621 2e70f6ef pbrook
}
3622 2e70f6ef pbrook
3623 e3db7226 bellard
void dump_exec_info(FILE *f,
3624 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3625 e3db7226 bellard
{
3626 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3627 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3628 e3db7226 bellard
    TranslationBlock *tb;
3629 3b46e624 ths
3630 e3db7226 bellard
    target_code_size = 0;
3631 e3db7226 bellard
    max_target_code_size = 0;
3632 e3db7226 bellard
    cross_page = 0;
3633 e3db7226 bellard
    direct_jmp_count = 0;
3634 e3db7226 bellard
    direct_jmp2_count = 0;
3635 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3636 e3db7226 bellard
        tb = &tbs[i];
3637 e3db7226 bellard
        target_code_size += tb->size;
3638 e3db7226 bellard
        if (tb->size > max_target_code_size)
3639 e3db7226 bellard
            max_target_code_size = tb->size;
3640 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3641 e3db7226 bellard
            cross_page++;
3642 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3643 e3db7226 bellard
            direct_jmp_count++;
3644 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3645 e3db7226 bellard
                direct_jmp2_count++;
3646 e3db7226 bellard
            }
3647 e3db7226 bellard
        }
3648 e3db7226 bellard
    }
3649 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3650 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3651 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3652 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3653 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3654 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3655 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3656 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3657 e3db7226 bellard
                max_target_code_size);
3658 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3659 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3660 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3661 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3662 5fafdf24 ths
            cross_page,
3663 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3664 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3665 5fafdf24 ths
                direct_jmp_count,
3666 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3667 e3db7226 bellard
                direct_jmp2_count,
3668 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3669 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3670 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3671 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3672 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3673 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3674 e3db7226 bellard
}
3675 e3db7226 bellard
3676 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3677 61382a50 bellard
3678 61382a50 bellard
#define MMUSUFFIX _cmmu
3679 61382a50 bellard
#define GETPC() NULL
3680 61382a50 bellard
#define env cpu_single_env
3681 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3682 61382a50 bellard
3683 61382a50 bellard
#define SHIFT 0
3684 61382a50 bellard
#include "softmmu_template.h"
3685 61382a50 bellard
3686 61382a50 bellard
#define SHIFT 1
3687 61382a50 bellard
#include "softmmu_template.h"
3688 61382a50 bellard
3689 61382a50 bellard
#define SHIFT 2
3690 61382a50 bellard
#include "softmmu_template.h"
3691 61382a50 bellard
3692 61382a50 bellard
#define SHIFT 3
3693 61382a50 bellard
#include "softmmu_template.h"
3694 61382a50 bellard
3695 61382a50 bellard
#undef env
3696 61382a50 bellard
3697 61382a50 bellard
#endif