Statistics
| Branch: | Revision:

root / exec.c @ 72cf2d4f

History | View | Annotate | Download (109.5 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 53a5960a pbrook
#endif
44 54936004 bellard
45 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
46 66e85a21 bellard
//#define DEBUG_FLUSH
47 9fa3e853 bellard
//#define DEBUG_TLB
48 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
49 fd6ce8f6 bellard
50 fd6ce8f6 bellard
/* make various TB consistency checks */
51 5fafdf24 ths
//#define DEBUG_TB_CHECK
52 5fafdf24 ths
//#define DEBUG_TLB_CHECK
53 fd6ce8f6 bellard
54 1196be37 ths
//#define DEBUG_IOPORT
55 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
56 1196be37 ths
57 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
58 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
59 99773bd4 pbrook
#undef DEBUG_TB_CHECK
60 99773bd4 pbrook
#endif
61 99773bd4 pbrook
62 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
63 9fa3e853 bellard
64 108c49b8 bellard
#if defined(TARGET_SPARC64)
65 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
67 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
69 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71 108c49b8 bellard
#elif defined(TARGET_PPC64)
72 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73 4a1418e0 Anthony Liguori
#elif defined(TARGET_X86_64)
74 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75 4a1418e0 Anthony Liguori
#elif defined(TARGET_I386)
76 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77 108c49b8 bellard
#else
78 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79 108c49b8 bellard
#endif
80 108c49b8 bellard
81 bdaf78e0 blueswir1
static TranslationBlock *tbs;
82 26a5f13b bellard
int code_gen_max_blocks;
83 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 bdaf78e0 blueswir1
static int nb_tbs;
85 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
86 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 fd6ce8f6 bellard
88 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
89 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
91 d03d860b blueswir1
 section close to code segment. */
92 d03d860b blueswir1
#define code_gen_section                                \
93 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
94 d03d860b blueswir1
    __attribute__((aligned (32)))
95 f8e2af11 Stefan Weil
#elif defined(_WIN32)
96 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 94a6b54f pbrook
typedef struct RAMBlock {
117 94a6b54f pbrook
    uint8_t *host;
118 94a6b54f pbrook
    ram_addr_t offset;
119 94a6b54f pbrook
    ram_addr_t length;
120 94a6b54f pbrook
    struct RAMBlock *next;
121 94a6b54f pbrook
} RAMBlock;
122 94a6b54f pbrook
123 94a6b54f pbrook
static RAMBlock *ram_blocks;
124 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
126 94a6b54f pbrook
   of this variable will break.  */
127 94a6b54f pbrook
ram_addr_t last_ram_offset;
128 e2eef170 pbrook
#endif
129 9fa3e853 bellard
130 6a00d601 bellard
CPUState *first_cpu;
131 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
132 6a00d601 bellard
   cpu_exec() */
133 5fafdf24 ths
CPUState *cpu_single_env;
134 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
135 bf20dc07 ths
   1 = Precise instruction counting.
136 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
137 2e70f6ef pbrook
int use_icount = 0;
138 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
139 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
140 2e70f6ef pbrook
int64_t qemu_icount;
141 6a00d601 bellard
142 54936004 bellard
typedef struct PageDesc {
143 92e873b9 bellard
    /* list of TBs intersecting this ram page */
144 fd6ce8f6 bellard
    TranslationBlock *first_tb;
145 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
146 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
147 9fa3e853 bellard
    unsigned int code_write_count;
148 9fa3e853 bellard
    uint8_t *code_bitmap;
149 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
150 9fa3e853 bellard
    unsigned long flags;
151 9fa3e853 bellard
#endif
152 54936004 bellard
} PageDesc;
153 54936004 bellard
154 92e873b9 bellard
typedef struct PhysPageDesc {
155 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
156 00f82b8a aurel32
    ram_addr_t phys_offset;
157 8da3ff18 pbrook
    ram_addr_t region_offset;
158 92e873b9 bellard
} PhysPageDesc;
159 92e873b9 bellard
160 54936004 bellard
#define L2_BITS 10
161 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
163 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
164 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
165 bedb69ea j_mayer
 */
166 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 bedb69ea j_mayer
#else
168 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 bedb69ea j_mayer
#endif
170 54936004 bellard
171 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
172 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
173 54936004 bellard
174 83fb7adf bellard
unsigned long qemu_real_host_page_size;
175 83fb7adf bellard
unsigned long qemu_host_page_bits;
176 83fb7adf bellard
unsigned long qemu_host_page_size;
177 83fb7adf bellard
unsigned long qemu_host_page_mask;
178 54936004 bellard
179 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
180 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
181 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
182 54936004 bellard
183 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
184 e2eef170 pbrook
static void io_mem_init(void);
185 e2eef170 pbrook
186 33417e70 bellard
/* io memory support */
187 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
191 6658ffb8 pbrook
static int io_mem_watch;
192 6658ffb8 pbrook
#endif
193 33417e70 bellard
194 34865134 bellard
/* log support */
195 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
196 34865134 bellard
FILE *logfile;
197 34865134 bellard
int loglevel;
198 e735b91c pbrook
static int log_append = 0;
199 34865134 bellard
200 e3db7226 bellard
/* statistics */
201 e3db7226 bellard
static int tlb_flush_count;
202 e3db7226 bellard
static int tb_flush_count;
203 e3db7226 bellard
static int tb_phys_invalidate_count;
204 e3db7226 bellard
205 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 db7b5426 blueswir1
typedef struct subpage_t {
207 db7b5426 blueswir1
    target_phys_addr_t base;
208 d60efc6b Blue Swirl
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209 d60efc6b Blue Swirl
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
211 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212 db7b5426 blueswir1
} subpage_t;
213 db7b5426 blueswir1
214 7cb69cae bellard
#ifdef _WIN32
215 7cb69cae bellard
static void map_exec(void *addr, long size)
216 7cb69cae bellard
{
217 7cb69cae bellard
    DWORD old_protect;
218 7cb69cae bellard
    VirtualProtect(addr, size,
219 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
220 7cb69cae bellard
    
221 7cb69cae bellard
}
222 7cb69cae bellard
#else
223 7cb69cae bellard
static void map_exec(void *addr, long size)
224 7cb69cae bellard
{
225 4369415f bellard
    unsigned long start, end, page_size;
226 7cb69cae bellard
    
227 4369415f bellard
    page_size = getpagesize();
228 7cb69cae bellard
    start = (unsigned long)addr;
229 4369415f bellard
    start &= ~(page_size - 1);
230 7cb69cae bellard
    
231 7cb69cae bellard
    end = (unsigned long)addr + size;
232 4369415f bellard
    end += page_size - 1;
233 4369415f bellard
    end &= ~(page_size - 1);
234 7cb69cae bellard
    
235 7cb69cae bellard
    mprotect((void *)start, end - start,
236 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
237 7cb69cae bellard
}
238 7cb69cae bellard
#endif
239 7cb69cae bellard
240 b346ff46 bellard
static void page_init(void)
241 54936004 bellard
{
242 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
243 54936004 bellard
       TARGET_PAGE_SIZE */
244 c2b48b69 aliguori
#ifdef _WIN32
245 c2b48b69 aliguori
    {
246 c2b48b69 aliguori
        SYSTEM_INFO system_info;
247 c2b48b69 aliguori
248 c2b48b69 aliguori
        GetSystemInfo(&system_info);
249 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
250 c2b48b69 aliguori
    }
251 c2b48b69 aliguori
#else
252 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
253 c2b48b69 aliguori
#endif
254 83fb7adf bellard
    if (qemu_host_page_size == 0)
255 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
256 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
257 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
258 83fb7adf bellard
    qemu_host_page_bits = 0;
259 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260 83fb7adf bellard
        qemu_host_page_bits++;
261 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
262 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
264 50a9569b balrog
265 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266 50a9569b balrog
    {
267 50a9569b balrog
        long long startaddr, endaddr;
268 50a9569b balrog
        FILE *f;
269 50a9569b balrog
        int n;
270 50a9569b balrog
271 c8a706fe pbrook
        mmap_lock();
272 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
273 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
274 50a9569b balrog
        if (f) {
275 50a9569b balrog
            do {
276 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277 50a9569b balrog
                if (n == 2) {
278 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
279 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
281 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
283 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
284 50a9569b balrog
                                   PAGE_RESERVED); 
285 50a9569b balrog
                }
286 50a9569b balrog
            } while (!feof(f));
287 50a9569b balrog
            fclose(f);
288 50a9569b balrog
        }
289 c8a706fe pbrook
        mmap_unlock();
290 50a9569b balrog
    }
291 50a9569b balrog
#endif
292 54936004 bellard
}
293 54936004 bellard
294 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
295 54936004 bellard
{
296 17e2377a pbrook
#if TARGET_LONG_BITS > 32
297 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
298 17e2377a pbrook
       excluded high addresses.  */
299 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300 17e2377a pbrook
        return NULL;
301 17e2377a pbrook
#endif
302 434929bf aliguori
    return &l1_map[index >> L2_BITS];
303 434929bf aliguori
}
304 434929bf aliguori
305 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
306 434929bf aliguori
{
307 434929bf aliguori
    PageDesc **lp, *p;
308 434929bf aliguori
    lp = page_l1_map(index);
309 434929bf aliguori
    if (!lp)
310 434929bf aliguori
        return NULL;
311 434929bf aliguori
312 54936004 bellard
    p = *lp;
313 54936004 bellard
    if (!p) {
314 54936004 bellard
        /* allocate if not found */
315 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
316 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
317 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
318 660f11be Blue Swirl
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
319 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320 54936004 bellard
        *lp = p;
321 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
322 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
323 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
324 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
325 17e2377a pbrook
                           PAGE_RESERVED); 
326 17e2377a pbrook
        }
327 17e2377a pbrook
#else
328 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329 17e2377a pbrook
        *lp = p;
330 17e2377a pbrook
#endif
331 54936004 bellard
    }
332 54936004 bellard
    return p + (index & (L2_SIZE - 1));
333 54936004 bellard
}
334 54936004 bellard
335 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
336 54936004 bellard
{
337 434929bf aliguori
    PageDesc **lp, *p;
338 434929bf aliguori
    lp = page_l1_map(index);
339 434929bf aliguori
    if (!lp)
340 434929bf aliguori
        return NULL;
341 54936004 bellard
342 434929bf aliguori
    p = *lp;
343 660f11be Blue Swirl
    if (!p) {
344 660f11be Blue Swirl
        return NULL;
345 660f11be Blue Swirl
    }
346 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
347 fd6ce8f6 bellard
}
348 fd6ce8f6 bellard
349 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350 92e873b9 bellard
{
351 108c49b8 bellard
    void **lp, **p;
352 e3f4e2a4 pbrook
    PhysPageDesc *pd;
353 92e873b9 bellard
354 108c49b8 bellard
    p = (void **)l1_phys_map;
355 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 108c49b8 bellard
357 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 108c49b8 bellard
#endif
360 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361 92e873b9 bellard
    p = *lp;
362 92e873b9 bellard
    if (!p) {
363 92e873b9 bellard
        /* allocate if not found */
364 108c49b8 bellard
        if (!alloc)
365 108c49b8 bellard
            return NULL;
366 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
368 108c49b8 bellard
        *lp = p;
369 108c49b8 bellard
    }
370 108c49b8 bellard
#endif
371 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 e3f4e2a4 pbrook
    pd = *lp;
373 e3f4e2a4 pbrook
    if (!pd) {
374 e3f4e2a4 pbrook
        int i;
375 108c49b8 bellard
        /* allocate if not found */
376 108c49b8 bellard
        if (!alloc)
377 108c49b8 bellard
            return NULL;
378 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 e3f4e2a4 pbrook
        *lp = pd;
380 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
381 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 67c4d23c pbrook
        }
384 92e873b9 bellard
    }
385 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386 92e873b9 bellard
}
387 92e873b9 bellard
388 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389 92e873b9 bellard
{
390 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
391 92e873b9 bellard
}
392 92e873b9 bellard
393 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
394 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
395 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396 3a7d929e bellard
                                    target_ulong vaddr);
397 c8a706fe pbrook
#define mmap_lock() do { } while(0)
398 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
399 9fa3e853 bellard
#endif
400 fd6ce8f6 bellard
401 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402 4369415f bellard
403 4369415f bellard
#if defined(CONFIG_USER_ONLY)
404 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
405 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
406 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
407 4369415f bellard
#endif
408 4369415f bellard
409 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
410 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411 4369415f bellard
#endif
412 4369415f bellard
413 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
414 26a5f13b bellard
{
415 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
416 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
417 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
419 4369415f bellard
#else
420 26a5f13b bellard
    code_gen_buffer_size = tb_size;
421 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
422 4369415f bellard
#if defined(CONFIG_USER_ONLY)
423 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
424 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425 4369415f bellard
#else
426 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
427 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428 4369415f bellard
#endif
429 26a5f13b bellard
    }
430 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
433 26a5f13b bellard
       the host cpu and OS */
434 26a5f13b bellard
#if defined(__linux__) 
435 26a5f13b bellard
    {
436 26a5f13b bellard
        int flags;
437 141ac468 blueswir1
        void *start = NULL;
438 141ac468 blueswir1
439 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440 26a5f13b bellard
#if defined(__x86_64__)
441 26a5f13b bellard
        flags |= MAP_32BIT;
442 26a5f13b bellard
        /* Cannot map more than that */
443 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
445 141ac468 blueswir1
#elif defined(__sparc_v9__)
446 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
447 141ac468 blueswir1
        flags |= MAP_FIXED;
448 141ac468 blueswir1
        start = (void *) 0x60000000UL;
449 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
451 1cb0661e balrog
#elif defined(__arm__)
452 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
453 1cb0661e balrog
        flags |= MAP_FIXED;
454 1cb0661e balrog
        start = (void *) 0x01000000UL;
455 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
457 26a5f13b bellard
#endif
458 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460 26a5f13b bellard
                               flags, -1, 0);
461 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
462 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 26a5f13b bellard
            exit(1);
464 26a5f13b bellard
        }
465 26a5f13b bellard
    }
466 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
467 06e67a82 aliguori
    {
468 06e67a82 aliguori
        int flags;
469 06e67a82 aliguori
        void *addr = NULL;
470 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471 06e67a82 aliguori
#if defined(__x86_64__)
472 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 06e67a82 aliguori
         * 0x40000000 is free */
474 06e67a82 aliguori
        flags |= MAP_FIXED;
475 06e67a82 aliguori
        addr = (void *)0x40000000;
476 06e67a82 aliguori
        /* Cannot map more than that */
477 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
479 06e67a82 aliguori
#endif
480 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482 06e67a82 aliguori
                               flags, -1, 0);
483 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
484 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 06e67a82 aliguori
            exit(1);
486 06e67a82 aliguori
        }
487 06e67a82 aliguori
    }
488 26a5f13b bellard
#else
489 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
491 26a5f13b bellard
#endif
492 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
495 26a5f13b bellard
        code_gen_max_block_size();
496 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498 26a5f13b bellard
}
499 26a5f13b bellard
500 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
502 26a5f13b bellard
   size. */
503 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
504 26a5f13b bellard
{
505 26a5f13b bellard
    cpu_gen_init();
506 26a5f13b bellard
    code_gen_alloc(tb_size);
507 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
508 4369415f bellard
    page_init();
509 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
510 26a5f13b bellard
    io_mem_init();
511 e2eef170 pbrook
#endif
512 26a5f13b bellard
}
513 26a5f13b bellard
514 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515 9656f324 pbrook
516 e7f4eff7 Juan Quintela
static void cpu_common_pre_save(const void *opaque)
517 9656f324 pbrook
{
518 e7f4eff7 Juan Quintela
    CPUState *env = (void *)opaque;
519 9656f324 pbrook
520 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
521 9656f324 pbrook
}
522 9656f324 pbrook
523 e7f4eff7 Juan Quintela
static int cpu_common_pre_load(void *opaque)
524 9656f324 pbrook
{
525 9656f324 pbrook
    CPUState *env = opaque;
526 9656f324 pbrook
527 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
528 e7f4eff7 Juan Quintela
    return 0;
529 e7f4eff7 Juan Quintela
}
530 e7f4eff7 Juan Quintela
531 e7f4eff7 Juan Quintela
static int cpu_common_post_load(void *opaque)
532 e7f4eff7 Juan Quintela
{
533 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
534 9656f324 pbrook
535 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
536 3098dba0 aurel32
       version_id is increased. */
537 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
538 9656f324 pbrook
    tlb_flush(env, 1);
539 9656f324 pbrook
540 9656f324 pbrook
    return 0;
541 9656f324 pbrook
}
542 e7f4eff7 Juan Quintela
543 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
544 e7f4eff7 Juan Quintela
    .name = "cpu_common",
545 e7f4eff7 Juan Quintela
    .version_id = 1,
546 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
547 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
548 e7f4eff7 Juan Quintela
    .pre_save = cpu_common_pre_save,
549 e7f4eff7 Juan Quintela
    .pre_load = cpu_common_pre_load,
550 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
551 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
552 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
553 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
554 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
555 e7f4eff7 Juan Quintela
    }
556 e7f4eff7 Juan Quintela
};
557 9656f324 pbrook
#endif
558 9656f324 pbrook
559 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
560 950f1472 Glauber Costa
{
561 950f1472 Glauber Costa
    CPUState *env = first_cpu;
562 950f1472 Glauber Costa
563 950f1472 Glauber Costa
    while (env) {
564 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
565 950f1472 Glauber Costa
            break;
566 950f1472 Glauber Costa
        env = env->next_cpu;
567 950f1472 Glauber Costa
    }
568 950f1472 Glauber Costa
569 950f1472 Glauber Costa
    return env;
570 950f1472 Glauber Costa
}
571 950f1472 Glauber Costa
572 6a00d601 bellard
void cpu_exec_init(CPUState *env)
573 fd6ce8f6 bellard
{
574 6a00d601 bellard
    CPUState **penv;
575 6a00d601 bellard
    int cpu_index;
576 6a00d601 bellard
577 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
578 c2764719 pbrook
    cpu_list_lock();
579 c2764719 pbrook
#endif
580 6a00d601 bellard
    env->next_cpu = NULL;
581 6a00d601 bellard
    penv = &first_cpu;
582 6a00d601 bellard
    cpu_index = 0;
583 6a00d601 bellard
    while (*penv != NULL) {
584 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
585 6a00d601 bellard
        cpu_index++;
586 6a00d601 bellard
    }
587 6a00d601 bellard
    env->cpu_index = cpu_index;
588 268a362c aliguori
    env->numa_node = 0;
589 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
590 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
591 6a00d601 bellard
    *penv = env;
592 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
593 c2764719 pbrook
    cpu_list_unlock();
594 c2764719 pbrook
#endif
595 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
597 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
598 b3c7724c pbrook
                    cpu_save, cpu_load, env);
599 b3c7724c pbrook
#endif
600 fd6ce8f6 bellard
}
601 fd6ce8f6 bellard
602 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
603 9fa3e853 bellard
{
604 9fa3e853 bellard
    if (p->code_bitmap) {
605 59817ccb bellard
        qemu_free(p->code_bitmap);
606 9fa3e853 bellard
        p->code_bitmap = NULL;
607 9fa3e853 bellard
    }
608 9fa3e853 bellard
    p->code_write_count = 0;
609 9fa3e853 bellard
}
610 9fa3e853 bellard
611 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
612 fd6ce8f6 bellard
static void page_flush_tb(void)
613 fd6ce8f6 bellard
{
614 fd6ce8f6 bellard
    int i, j;
615 fd6ce8f6 bellard
    PageDesc *p;
616 fd6ce8f6 bellard
617 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
618 fd6ce8f6 bellard
        p = l1_map[i];
619 fd6ce8f6 bellard
        if (p) {
620 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
621 9fa3e853 bellard
                p->first_tb = NULL;
622 9fa3e853 bellard
                invalidate_page_bitmap(p);
623 9fa3e853 bellard
                p++;
624 9fa3e853 bellard
            }
625 fd6ce8f6 bellard
        }
626 fd6ce8f6 bellard
    }
627 fd6ce8f6 bellard
}
628 fd6ce8f6 bellard
629 fd6ce8f6 bellard
/* flush all the translation blocks */
630 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
631 6a00d601 bellard
void tb_flush(CPUState *env1)
632 fd6ce8f6 bellard
{
633 6a00d601 bellard
    CPUState *env;
634 0124311e bellard
#if defined(DEBUG_FLUSH)
635 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
636 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
637 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
638 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
639 fd6ce8f6 bellard
#endif
640 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
641 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
642 a208e54a pbrook
643 fd6ce8f6 bellard
    nb_tbs = 0;
644 3b46e624 ths
645 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
646 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
647 6a00d601 bellard
    }
648 9fa3e853 bellard
649 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
650 fd6ce8f6 bellard
    page_flush_tb();
651 9fa3e853 bellard
652 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
653 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
654 d4e8164f bellard
       expensive */
655 e3db7226 bellard
    tb_flush_count++;
656 fd6ce8f6 bellard
}
657 fd6ce8f6 bellard
658 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
659 fd6ce8f6 bellard
660 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
661 fd6ce8f6 bellard
{
662 fd6ce8f6 bellard
    TranslationBlock *tb;
663 fd6ce8f6 bellard
    int i;
664 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
665 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
668 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
669 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
670 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
671 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
672 fd6ce8f6 bellard
            }
673 fd6ce8f6 bellard
        }
674 fd6ce8f6 bellard
    }
675 fd6ce8f6 bellard
}
676 fd6ce8f6 bellard
677 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
678 fd6ce8f6 bellard
static void tb_page_check(void)
679 fd6ce8f6 bellard
{
680 fd6ce8f6 bellard
    TranslationBlock *tb;
681 fd6ce8f6 bellard
    int i, flags1, flags2;
682 3b46e624 ths
683 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
684 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
685 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
686 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
687 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
688 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
689 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
690 fd6ce8f6 bellard
            }
691 fd6ce8f6 bellard
        }
692 fd6ce8f6 bellard
    }
693 fd6ce8f6 bellard
}
694 fd6ce8f6 bellard
695 fd6ce8f6 bellard
#endif
696 fd6ce8f6 bellard
697 fd6ce8f6 bellard
/* invalidate one TB */
698 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
699 fd6ce8f6 bellard
                             int next_offset)
700 fd6ce8f6 bellard
{
701 fd6ce8f6 bellard
    TranslationBlock *tb1;
702 fd6ce8f6 bellard
    for(;;) {
703 fd6ce8f6 bellard
        tb1 = *ptb;
704 fd6ce8f6 bellard
        if (tb1 == tb) {
705 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
706 fd6ce8f6 bellard
            break;
707 fd6ce8f6 bellard
        }
708 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
709 fd6ce8f6 bellard
    }
710 fd6ce8f6 bellard
}
711 fd6ce8f6 bellard
712 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
713 9fa3e853 bellard
{
714 9fa3e853 bellard
    TranslationBlock *tb1;
715 9fa3e853 bellard
    unsigned int n1;
716 9fa3e853 bellard
717 9fa3e853 bellard
    for(;;) {
718 9fa3e853 bellard
        tb1 = *ptb;
719 9fa3e853 bellard
        n1 = (long)tb1 & 3;
720 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
721 9fa3e853 bellard
        if (tb1 == tb) {
722 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
723 9fa3e853 bellard
            break;
724 9fa3e853 bellard
        }
725 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
726 9fa3e853 bellard
    }
727 9fa3e853 bellard
}
728 9fa3e853 bellard
729 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
730 d4e8164f bellard
{
731 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
732 d4e8164f bellard
    unsigned int n1;
733 d4e8164f bellard
734 d4e8164f bellard
    ptb = &tb->jmp_next[n];
735 d4e8164f bellard
    tb1 = *ptb;
736 d4e8164f bellard
    if (tb1) {
737 d4e8164f bellard
        /* find tb(n) in circular list */
738 d4e8164f bellard
        for(;;) {
739 d4e8164f bellard
            tb1 = *ptb;
740 d4e8164f bellard
            n1 = (long)tb1 & 3;
741 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
742 d4e8164f bellard
            if (n1 == n && tb1 == tb)
743 d4e8164f bellard
                break;
744 d4e8164f bellard
            if (n1 == 2) {
745 d4e8164f bellard
                ptb = &tb1->jmp_first;
746 d4e8164f bellard
            } else {
747 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
748 d4e8164f bellard
            }
749 d4e8164f bellard
        }
750 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
751 d4e8164f bellard
        *ptb = tb->jmp_next[n];
752 d4e8164f bellard
753 d4e8164f bellard
        tb->jmp_next[n] = NULL;
754 d4e8164f bellard
    }
755 d4e8164f bellard
}
756 d4e8164f bellard
757 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
758 d4e8164f bellard
   another TB */
759 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
760 d4e8164f bellard
{
761 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
762 d4e8164f bellard
}
763 d4e8164f bellard
764 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
765 fd6ce8f6 bellard
{
766 6a00d601 bellard
    CPUState *env;
767 8a40a180 bellard
    PageDesc *p;
768 d4e8164f bellard
    unsigned int h, n1;
769 00f82b8a aurel32
    target_phys_addr_t phys_pc;
770 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
771 3b46e624 ths
772 8a40a180 bellard
    /* remove the TB from the hash list */
773 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
774 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
775 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
776 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
777 8a40a180 bellard
778 8a40a180 bellard
    /* remove the TB from the page list */
779 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
780 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
781 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
782 8a40a180 bellard
        invalidate_page_bitmap(p);
783 8a40a180 bellard
    }
784 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
785 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
786 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
787 8a40a180 bellard
        invalidate_page_bitmap(p);
788 8a40a180 bellard
    }
789 8a40a180 bellard
790 36bdbe54 bellard
    tb_invalidated_flag = 1;
791 59817ccb bellard
792 fd6ce8f6 bellard
    /* remove the TB from the hash list */
793 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
794 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
795 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
796 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
797 6a00d601 bellard
    }
798 d4e8164f bellard
799 d4e8164f bellard
    /* suppress this TB from the two jump lists */
800 d4e8164f bellard
    tb_jmp_remove(tb, 0);
801 d4e8164f bellard
    tb_jmp_remove(tb, 1);
802 d4e8164f bellard
803 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
804 d4e8164f bellard
    tb1 = tb->jmp_first;
805 d4e8164f bellard
    for(;;) {
806 d4e8164f bellard
        n1 = (long)tb1 & 3;
807 d4e8164f bellard
        if (n1 == 2)
808 d4e8164f bellard
            break;
809 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
811 d4e8164f bellard
        tb_reset_jump(tb1, n1);
812 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
813 d4e8164f bellard
        tb1 = tb2;
814 d4e8164f bellard
    }
815 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
816 9fa3e853 bellard
817 e3db7226 bellard
    tb_phys_invalidate_count++;
818 9fa3e853 bellard
}
819 9fa3e853 bellard
820 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
821 9fa3e853 bellard
{
822 9fa3e853 bellard
    int end, mask, end1;
823 9fa3e853 bellard
824 9fa3e853 bellard
    end = start + len;
825 9fa3e853 bellard
    tab += start >> 3;
826 9fa3e853 bellard
    mask = 0xff << (start & 7);
827 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
828 9fa3e853 bellard
        if (start < end) {
829 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
830 9fa3e853 bellard
            *tab |= mask;
831 9fa3e853 bellard
        }
832 9fa3e853 bellard
    } else {
833 9fa3e853 bellard
        *tab++ |= mask;
834 9fa3e853 bellard
        start = (start + 8) & ~7;
835 9fa3e853 bellard
        end1 = end & ~7;
836 9fa3e853 bellard
        while (start < end1) {
837 9fa3e853 bellard
            *tab++ = 0xff;
838 9fa3e853 bellard
            start += 8;
839 9fa3e853 bellard
        }
840 9fa3e853 bellard
        if (start < end) {
841 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
842 9fa3e853 bellard
            *tab |= mask;
843 9fa3e853 bellard
        }
844 9fa3e853 bellard
    }
845 9fa3e853 bellard
}
846 9fa3e853 bellard
847 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
848 9fa3e853 bellard
{
849 9fa3e853 bellard
    int n, tb_start, tb_end;
850 9fa3e853 bellard
    TranslationBlock *tb;
851 3b46e624 ths
852 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
853 9fa3e853 bellard
854 9fa3e853 bellard
    tb = p->first_tb;
855 9fa3e853 bellard
    while (tb != NULL) {
856 9fa3e853 bellard
        n = (long)tb & 3;
857 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
858 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
859 9fa3e853 bellard
        if (n == 0) {
860 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
861 9fa3e853 bellard
               it is not a problem */
862 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
863 9fa3e853 bellard
            tb_end = tb_start + tb->size;
864 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
865 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
866 9fa3e853 bellard
        } else {
867 9fa3e853 bellard
            tb_start = 0;
868 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
869 9fa3e853 bellard
        }
870 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
871 9fa3e853 bellard
        tb = tb->page_next[n];
872 9fa3e853 bellard
    }
873 9fa3e853 bellard
}
874 9fa3e853 bellard
875 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
876 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
877 2e70f6ef pbrook
                              int flags, int cflags)
878 d720b93d bellard
{
879 d720b93d bellard
    TranslationBlock *tb;
880 d720b93d bellard
    uint8_t *tc_ptr;
881 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
882 d720b93d bellard
    int code_gen_size;
883 d720b93d bellard
884 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
885 c27004ec bellard
    tb = tb_alloc(pc);
886 d720b93d bellard
    if (!tb) {
887 d720b93d bellard
        /* flush must be done */
888 d720b93d bellard
        tb_flush(env);
889 d720b93d bellard
        /* cannot fail at this point */
890 c27004ec bellard
        tb = tb_alloc(pc);
891 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
892 2e70f6ef pbrook
        tb_invalidated_flag = 1;
893 d720b93d bellard
    }
894 d720b93d bellard
    tc_ptr = code_gen_ptr;
895 d720b93d bellard
    tb->tc_ptr = tc_ptr;
896 d720b93d bellard
    tb->cs_base = cs_base;
897 d720b93d bellard
    tb->flags = flags;
898 d720b93d bellard
    tb->cflags = cflags;
899 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
900 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
901 3b46e624 ths
902 d720b93d bellard
    /* check next page if needed */
903 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
904 d720b93d bellard
    phys_page2 = -1;
905 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
906 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
907 d720b93d bellard
    }
908 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
909 2e70f6ef pbrook
    return tb;
910 d720b93d bellard
}
911 3b46e624 ths
912 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
913 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
914 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
915 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
916 d720b93d bellard
   TB if code is modified inside this TB. */
917 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
918 d720b93d bellard
                                   int is_cpu_write_access)
919 d720b93d bellard
{
920 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
921 d720b93d bellard
    CPUState *env = cpu_single_env;
922 9fa3e853 bellard
    target_ulong tb_start, tb_end;
923 6b917547 aliguori
    PageDesc *p;
924 6b917547 aliguori
    int n;
925 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
926 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
927 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
928 6b917547 aliguori
    int current_tb_modified = 0;
929 6b917547 aliguori
    target_ulong current_pc = 0;
930 6b917547 aliguori
    target_ulong current_cs_base = 0;
931 6b917547 aliguori
    int current_flags = 0;
932 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
933 9fa3e853 bellard
934 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
935 5fafdf24 ths
    if (!p)
936 9fa3e853 bellard
        return;
937 5fafdf24 ths
    if (!p->code_bitmap &&
938 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
939 d720b93d bellard
        is_cpu_write_access) {
940 9fa3e853 bellard
        /* build code bitmap */
941 9fa3e853 bellard
        build_page_bitmap(p);
942 9fa3e853 bellard
    }
943 9fa3e853 bellard
944 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
945 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
946 9fa3e853 bellard
    tb = p->first_tb;
947 9fa3e853 bellard
    while (tb != NULL) {
948 9fa3e853 bellard
        n = (long)tb & 3;
949 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
950 9fa3e853 bellard
        tb_next = tb->page_next[n];
951 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
952 9fa3e853 bellard
        if (n == 0) {
953 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
954 9fa3e853 bellard
               it is not a problem */
955 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
956 9fa3e853 bellard
            tb_end = tb_start + tb->size;
957 9fa3e853 bellard
        } else {
958 9fa3e853 bellard
            tb_start = tb->page_addr[1];
959 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
960 9fa3e853 bellard
        }
961 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
962 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
963 d720b93d bellard
            if (current_tb_not_found) {
964 d720b93d bellard
                current_tb_not_found = 0;
965 d720b93d bellard
                current_tb = NULL;
966 2e70f6ef pbrook
                if (env->mem_io_pc) {
967 d720b93d bellard
                    /* now we have a real cpu fault */
968 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
969 d720b93d bellard
                }
970 d720b93d bellard
            }
971 d720b93d bellard
            if (current_tb == tb &&
972 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
973 d720b93d bellard
                /* If we are modifying the current TB, we must stop
974 d720b93d bellard
                its execution. We could be more precise by checking
975 d720b93d bellard
                that the modification is after the current PC, but it
976 d720b93d bellard
                would require a specialized function to partially
977 d720b93d bellard
                restore the CPU state */
978 3b46e624 ths
979 d720b93d bellard
                current_tb_modified = 1;
980 5fafdf24 ths
                cpu_restore_state(current_tb, env,
981 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
982 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
983 6b917547 aliguori
                                     &current_flags);
984 d720b93d bellard
            }
985 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
986 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
987 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
988 6f5a9f7e bellard
            saved_tb = NULL;
989 6f5a9f7e bellard
            if (env) {
990 6f5a9f7e bellard
                saved_tb = env->current_tb;
991 6f5a9f7e bellard
                env->current_tb = NULL;
992 6f5a9f7e bellard
            }
993 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
994 6f5a9f7e bellard
            if (env) {
995 6f5a9f7e bellard
                env->current_tb = saved_tb;
996 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
997 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
998 6f5a9f7e bellard
            }
999 9fa3e853 bellard
        }
1000 9fa3e853 bellard
        tb = tb_next;
1001 9fa3e853 bellard
    }
1002 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1003 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1004 9fa3e853 bellard
    if (!p->first_tb) {
1005 9fa3e853 bellard
        invalidate_page_bitmap(p);
1006 d720b93d bellard
        if (is_cpu_write_access) {
1007 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1008 d720b93d bellard
        }
1009 d720b93d bellard
    }
1010 d720b93d bellard
#endif
1011 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1012 d720b93d bellard
    if (current_tb_modified) {
1013 d720b93d bellard
        /* we generate a block containing just the instruction
1014 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1015 d720b93d bellard
           itself */
1016 ea1c1802 bellard
        env->current_tb = NULL;
1017 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1018 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1019 9fa3e853 bellard
    }
1020 fd6ce8f6 bellard
#endif
1021 9fa3e853 bellard
}
1022 fd6ce8f6 bellard
1023 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1024 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1025 9fa3e853 bellard
{
1026 9fa3e853 bellard
    PageDesc *p;
1027 9fa3e853 bellard
    int offset, b;
1028 59817ccb bellard
#if 0
1029 a4193c8a bellard
    if (1) {
1030 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1031 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1032 93fcfe39 aliguori
                  cpu_single_env->eip,
1033 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1034 59817ccb bellard
    }
1035 59817ccb bellard
#endif
1036 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1037 5fafdf24 ths
    if (!p)
1038 9fa3e853 bellard
        return;
1039 9fa3e853 bellard
    if (p->code_bitmap) {
1040 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1041 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1042 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1043 9fa3e853 bellard
            goto do_invalidate;
1044 9fa3e853 bellard
    } else {
1045 9fa3e853 bellard
    do_invalidate:
1046 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1047 9fa3e853 bellard
    }
1048 9fa3e853 bellard
}
1049 9fa3e853 bellard
1050 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1051 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1052 d720b93d bellard
                                    unsigned long pc, void *puc)
1053 9fa3e853 bellard
{
1054 6b917547 aliguori
    TranslationBlock *tb;
1055 9fa3e853 bellard
    PageDesc *p;
1056 6b917547 aliguori
    int n;
1057 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1058 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1059 d720b93d bellard
    CPUState *env = cpu_single_env;
1060 6b917547 aliguori
    int current_tb_modified = 0;
1061 6b917547 aliguori
    target_ulong current_pc = 0;
1062 6b917547 aliguori
    target_ulong current_cs_base = 0;
1063 6b917547 aliguori
    int current_flags = 0;
1064 d720b93d bellard
#endif
1065 9fa3e853 bellard
1066 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1067 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1068 5fafdf24 ths
    if (!p)
1069 9fa3e853 bellard
        return;
1070 9fa3e853 bellard
    tb = p->first_tb;
1071 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1072 d720b93d bellard
    if (tb && pc != 0) {
1073 d720b93d bellard
        current_tb = tb_find_pc(pc);
1074 d720b93d bellard
    }
1075 d720b93d bellard
#endif
1076 9fa3e853 bellard
    while (tb != NULL) {
1077 9fa3e853 bellard
        n = (long)tb & 3;
1078 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1079 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1080 d720b93d bellard
        if (current_tb == tb &&
1081 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1082 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1083 d720b93d bellard
                   its execution. We could be more precise by checking
1084 d720b93d bellard
                   that the modification is after the current PC, but it
1085 d720b93d bellard
                   would require a specialized function to partially
1086 d720b93d bellard
                   restore the CPU state */
1087 3b46e624 ths
1088 d720b93d bellard
            current_tb_modified = 1;
1089 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1090 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1091 6b917547 aliguori
                                 &current_flags);
1092 d720b93d bellard
        }
1093 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1094 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1095 9fa3e853 bellard
        tb = tb->page_next[n];
1096 9fa3e853 bellard
    }
1097 fd6ce8f6 bellard
    p->first_tb = NULL;
1098 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1099 d720b93d bellard
    if (current_tb_modified) {
1100 d720b93d bellard
        /* we generate a block containing just the instruction
1101 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1102 d720b93d bellard
           itself */
1103 ea1c1802 bellard
        env->current_tb = NULL;
1104 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1105 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1106 d720b93d bellard
    }
1107 d720b93d bellard
#endif
1108 fd6ce8f6 bellard
}
1109 9fa3e853 bellard
#endif
1110 fd6ce8f6 bellard
1111 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1112 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1113 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1114 fd6ce8f6 bellard
{
1115 fd6ce8f6 bellard
    PageDesc *p;
1116 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1117 9fa3e853 bellard
1118 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1119 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1120 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1121 9fa3e853 bellard
    last_first_tb = p->first_tb;
1122 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1123 9fa3e853 bellard
    invalidate_page_bitmap(p);
1124 fd6ce8f6 bellard
1125 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1126 d720b93d bellard
1127 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1128 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1129 53a5960a pbrook
        target_ulong addr;
1130 53a5960a pbrook
        PageDesc *p2;
1131 9fa3e853 bellard
        int prot;
1132 9fa3e853 bellard
1133 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1134 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1135 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1136 fd6ce8f6 bellard
        prot = 0;
1137 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1138 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1139 53a5960a pbrook
1140 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1141 53a5960a pbrook
            if (!p2)
1142 53a5960a pbrook
                continue;
1143 53a5960a pbrook
            prot |= p2->flags;
1144 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1145 53a5960a pbrook
            page_get_flags(addr);
1146 53a5960a pbrook
          }
1147 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1148 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1149 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1150 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1151 53a5960a pbrook
               page_addr);
1152 fd6ce8f6 bellard
#endif
1153 fd6ce8f6 bellard
    }
1154 9fa3e853 bellard
#else
1155 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1156 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1157 9fa3e853 bellard
       allocated in a physical page */
1158 9fa3e853 bellard
    if (!last_first_tb) {
1159 6a00d601 bellard
        tlb_protect_code(page_addr);
1160 9fa3e853 bellard
    }
1161 9fa3e853 bellard
#endif
1162 d720b93d bellard
1163 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1164 fd6ce8f6 bellard
}
1165 fd6ce8f6 bellard
1166 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1167 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1168 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1169 fd6ce8f6 bellard
{
1170 fd6ce8f6 bellard
    TranslationBlock *tb;
1171 fd6ce8f6 bellard
1172 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1173 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1174 d4e8164f bellard
        return NULL;
1175 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1176 fd6ce8f6 bellard
    tb->pc = pc;
1177 b448f2f3 bellard
    tb->cflags = 0;
1178 d4e8164f bellard
    return tb;
1179 d4e8164f bellard
}
1180 d4e8164f bellard
1181 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1182 2e70f6ef pbrook
{
1183 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1184 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1185 2e70f6ef pbrook
       be the last one generated.  */
1186 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1187 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1188 2e70f6ef pbrook
        nb_tbs--;
1189 2e70f6ef pbrook
    }
1190 2e70f6ef pbrook
}
1191 2e70f6ef pbrook
1192 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1193 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1194 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1195 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1196 d4e8164f bellard
{
1197 9fa3e853 bellard
    unsigned int h;
1198 9fa3e853 bellard
    TranslationBlock **ptb;
1199 9fa3e853 bellard
1200 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1201 c8a706fe pbrook
       before we are done.  */
1202 c8a706fe pbrook
    mmap_lock();
1203 9fa3e853 bellard
    /* add in the physical hash table */
1204 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1205 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1206 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1207 9fa3e853 bellard
    *ptb = tb;
1208 fd6ce8f6 bellard
1209 fd6ce8f6 bellard
    /* add in the page list */
1210 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1211 9fa3e853 bellard
    if (phys_page2 != -1)
1212 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1213 9fa3e853 bellard
    else
1214 9fa3e853 bellard
        tb->page_addr[1] = -1;
1215 9fa3e853 bellard
1216 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1217 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1218 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1219 d4e8164f bellard
1220 d4e8164f bellard
    /* init original jump addresses */
1221 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1222 d4e8164f bellard
        tb_reset_jump(tb, 0);
1223 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1224 d4e8164f bellard
        tb_reset_jump(tb, 1);
1225 8a40a180 bellard
1226 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1227 8a40a180 bellard
    tb_page_check();
1228 8a40a180 bellard
#endif
1229 c8a706fe pbrook
    mmap_unlock();
1230 fd6ce8f6 bellard
}
1231 fd6ce8f6 bellard
1232 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1233 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1234 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1235 fd6ce8f6 bellard
{
1236 9fa3e853 bellard
    int m_min, m_max, m;
1237 9fa3e853 bellard
    unsigned long v;
1238 9fa3e853 bellard
    TranslationBlock *tb;
1239 a513fe19 bellard
1240 a513fe19 bellard
    if (nb_tbs <= 0)
1241 a513fe19 bellard
        return NULL;
1242 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1243 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1244 a513fe19 bellard
        return NULL;
1245 a513fe19 bellard
    /* binary search (cf Knuth) */
1246 a513fe19 bellard
    m_min = 0;
1247 a513fe19 bellard
    m_max = nb_tbs - 1;
1248 a513fe19 bellard
    while (m_min <= m_max) {
1249 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1250 a513fe19 bellard
        tb = &tbs[m];
1251 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1252 a513fe19 bellard
        if (v == tc_ptr)
1253 a513fe19 bellard
            return tb;
1254 a513fe19 bellard
        else if (tc_ptr < v) {
1255 a513fe19 bellard
            m_max = m - 1;
1256 a513fe19 bellard
        } else {
1257 a513fe19 bellard
            m_min = m + 1;
1258 a513fe19 bellard
        }
1259 5fafdf24 ths
    }
1260 a513fe19 bellard
    return &tbs[m_max];
1261 a513fe19 bellard
}
1262 7501267e bellard
1263 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1264 ea041c0e bellard
1265 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1266 ea041c0e bellard
{
1267 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1268 ea041c0e bellard
    unsigned int n1;
1269 ea041c0e bellard
1270 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1271 ea041c0e bellard
    if (tb1 != NULL) {
1272 ea041c0e bellard
        /* find head of list */
1273 ea041c0e bellard
        for(;;) {
1274 ea041c0e bellard
            n1 = (long)tb1 & 3;
1275 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1276 ea041c0e bellard
            if (n1 == 2)
1277 ea041c0e bellard
                break;
1278 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1279 ea041c0e bellard
        }
1280 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1281 ea041c0e bellard
        tb_next = tb1;
1282 ea041c0e bellard
1283 ea041c0e bellard
        /* remove tb from the jmp_first list */
1284 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1285 ea041c0e bellard
        for(;;) {
1286 ea041c0e bellard
            tb1 = *ptb;
1287 ea041c0e bellard
            n1 = (long)tb1 & 3;
1288 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1289 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1290 ea041c0e bellard
                break;
1291 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1292 ea041c0e bellard
        }
1293 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1294 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1295 3b46e624 ths
1296 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1297 ea041c0e bellard
        tb_reset_jump(tb, n);
1298 ea041c0e bellard
1299 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1300 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1301 ea041c0e bellard
    }
1302 ea041c0e bellard
}
1303 ea041c0e bellard
1304 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1305 ea041c0e bellard
{
1306 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1307 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1308 ea041c0e bellard
}
1309 ea041c0e bellard
1310 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1311 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312 d720b93d bellard
{
1313 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1314 9b3c35e0 j_mayer
    target_ulong pd;
1315 c2f07f81 pbrook
    ram_addr_t ram_addr;
1316 c2f07f81 pbrook
    PhysPageDesc *p;
1317 d720b93d bellard
1318 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1319 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1320 c2f07f81 pbrook
    if (!p) {
1321 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1322 c2f07f81 pbrook
    } else {
1323 c2f07f81 pbrook
        pd = p->phys_offset;
1324 c2f07f81 pbrook
    }
1325 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1326 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1327 d720b93d bellard
}
1328 c27004ec bellard
#endif
1329 d720b93d bellard
1330 6658ffb8 pbrook
/* Add a watchpoint.  */
1331 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1332 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1333 6658ffb8 pbrook
{
1334 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1335 c0ce998e aliguori
    CPUWatchpoint *wp;
1336 6658ffb8 pbrook
1337 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1338 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1339 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1340 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1341 b4051334 aliguori
        return -EINVAL;
1342 b4051334 aliguori
    }
1343 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1344 a1d1bb31 aliguori
1345 a1d1bb31 aliguori
    wp->vaddr = addr;
1346 b4051334 aliguori
    wp->len_mask = len_mask;
1347 a1d1bb31 aliguori
    wp->flags = flags;
1348 a1d1bb31 aliguori
1349 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1350 c0ce998e aliguori
    if (flags & BP_GDB)
1351 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1352 c0ce998e aliguori
    else
1353 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1354 6658ffb8 pbrook
1355 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1356 a1d1bb31 aliguori
1357 a1d1bb31 aliguori
    if (watchpoint)
1358 a1d1bb31 aliguori
        *watchpoint = wp;
1359 a1d1bb31 aliguori
    return 0;
1360 6658ffb8 pbrook
}
1361 6658ffb8 pbrook
1362 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1363 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1364 a1d1bb31 aliguori
                          int flags)
1365 6658ffb8 pbrook
{
1366 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1367 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1368 6658ffb8 pbrook
1369 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1370 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1371 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1372 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1373 6658ffb8 pbrook
            return 0;
1374 6658ffb8 pbrook
        }
1375 6658ffb8 pbrook
    }
1376 a1d1bb31 aliguori
    return -ENOENT;
1377 6658ffb8 pbrook
}
1378 6658ffb8 pbrook
1379 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1380 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1381 a1d1bb31 aliguori
{
1382 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1383 7d03f82f edgar_igl
1384 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1385 a1d1bb31 aliguori
1386 a1d1bb31 aliguori
    qemu_free(watchpoint);
1387 a1d1bb31 aliguori
}
1388 a1d1bb31 aliguori
1389 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1390 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1391 a1d1bb31 aliguori
{
1392 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1393 a1d1bb31 aliguori
1394 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1395 a1d1bb31 aliguori
        if (wp->flags & mask)
1396 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1397 c0ce998e aliguori
    }
1398 7d03f82f edgar_igl
}
1399 7d03f82f edgar_igl
1400 a1d1bb31 aliguori
/* Add a breakpoint.  */
1401 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1402 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1403 4c3a88a2 bellard
{
1404 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1405 c0ce998e aliguori
    CPUBreakpoint *bp;
1406 3b46e624 ths
1407 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1408 4c3a88a2 bellard
1409 a1d1bb31 aliguori
    bp->pc = pc;
1410 a1d1bb31 aliguori
    bp->flags = flags;
1411 a1d1bb31 aliguori
1412 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1413 c0ce998e aliguori
    if (flags & BP_GDB)
1414 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1415 c0ce998e aliguori
    else
1416 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1417 3b46e624 ths
1418 d720b93d bellard
    breakpoint_invalidate(env, pc);
1419 a1d1bb31 aliguori
1420 a1d1bb31 aliguori
    if (breakpoint)
1421 a1d1bb31 aliguori
        *breakpoint = bp;
1422 4c3a88a2 bellard
    return 0;
1423 4c3a88a2 bellard
#else
1424 a1d1bb31 aliguori
    return -ENOSYS;
1425 4c3a88a2 bellard
#endif
1426 4c3a88a2 bellard
}
1427 4c3a88a2 bellard
1428 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1429 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1430 a1d1bb31 aliguori
{
1431 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1432 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1433 a1d1bb31 aliguori
1434 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1435 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1436 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1437 a1d1bb31 aliguori
            return 0;
1438 a1d1bb31 aliguori
        }
1439 7d03f82f edgar_igl
    }
1440 a1d1bb31 aliguori
    return -ENOENT;
1441 a1d1bb31 aliguori
#else
1442 a1d1bb31 aliguori
    return -ENOSYS;
1443 7d03f82f edgar_igl
#endif
1444 7d03f82f edgar_igl
}
1445 7d03f82f edgar_igl
1446 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1447 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1448 4c3a88a2 bellard
{
1449 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1450 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1451 d720b93d bellard
1452 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1453 a1d1bb31 aliguori
1454 a1d1bb31 aliguori
    qemu_free(breakpoint);
1455 a1d1bb31 aliguori
#endif
1456 a1d1bb31 aliguori
}
1457 a1d1bb31 aliguori
1458 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1459 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1460 a1d1bb31 aliguori
{
1461 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1462 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1463 a1d1bb31 aliguori
1464 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1465 a1d1bb31 aliguori
        if (bp->flags & mask)
1466 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1467 c0ce998e aliguori
    }
1468 4c3a88a2 bellard
#endif
1469 4c3a88a2 bellard
}
1470 4c3a88a2 bellard
1471 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1472 c33a346e bellard
   CPU loop after each instruction */
1473 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1474 c33a346e bellard
{
1475 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1476 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1477 c33a346e bellard
        env->singlestep_enabled = enabled;
1478 e22a25c9 aliguori
        if (kvm_enabled())
1479 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1480 e22a25c9 aliguori
        else {
1481 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1482 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1483 e22a25c9 aliguori
            tb_flush(env);
1484 e22a25c9 aliguori
        }
1485 c33a346e bellard
    }
1486 c33a346e bellard
#endif
1487 c33a346e bellard
}
1488 c33a346e bellard
1489 34865134 bellard
/* enable or disable low levels log */
1490 34865134 bellard
void cpu_set_log(int log_flags)
1491 34865134 bellard
{
1492 34865134 bellard
    loglevel = log_flags;
1493 34865134 bellard
    if (loglevel && !logfile) {
1494 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1495 34865134 bellard
        if (!logfile) {
1496 34865134 bellard
            perror(logfilename);
1497 34865134 bellard
            _exit(1);
1498 34865134 bellard
        }
1499 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1500 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1501 9fa3e853 bellard
        {
1502 b55266b5 blueswir1
            static char logfile_buf[4096];
1503 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1504 9fa3e853 bellard
        }
1505 bf65f53f Filip Navara
#elif !defined(_WIN32)
1506 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1507 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1508 9fa3e853 bellard
#endif
1509 e735b91c pbrook
        log_append = 1;
1510 e735b91c pbrook
    }
1511 e735b91c pbrook
    if (!loglevel && logfile) {
1512 e735b91c pbrook
        fclose(logfile);
1513 e735b91c pbrook
        logfile = NULL;
1514 34865134 bellard
    }
1515 34865134 bellard
}
1516 34865134 bellard
1517 34865134 bellard
void cpu_set_log_filename(const char *filename)
1518 34865134 bellard
{
1519 34865134 bellard
    logfilename = strdup(filename);
1520 e735b91c pbrook
    if (logfile) {
1521 e735b91c pbrook
        fclose(logfile);
1522 e735b91c pbrook
        logfile = NULL;
1523 e735b91c pbrook
    }
1524 e735b91c pbrook
    cpu_set_log(loglevel);
1525 34865134 bellard
}
1526 c33a346e bellard
1527 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1528 ea041c0e bellard
{
1529 2f7bb878 Juan Quintela
#if defined(CONFIG_USE_NPTL)
1530 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1531 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1532 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1533 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1534 3098dba0 aurel32
#else
1535 ea041c0e bellard
    TranslationBlock *tb;
1536 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1537 59817ccb bellard
1538 3098dba0 aurel32
    tb = env->current_tb;
1539 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1540 3098dba0 aurel32
       all the potentially executing TB */
1541 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1542 3098dba0 aurel32
        env->current_tb = NULL;
1543 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1544 3098dba0 aurel32
        resetlock(&interrupt_lock);
1545 be214e6c aurel32
    }
1546 3098dba0 aurel32
#endif
1547 3098dba0 aurel32
}
1548 3098dba0 aurel32
1549 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1550 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1551 3098dba0 aurel32
{
1552 3098dba0 aurel32
    int old_mask;
1553 be214e6c aurel32
1554 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1555 68a79315 bellard
    env->interrupt_request |= mask;
1556 3098dba0 aurel32
1557 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1558 8edac960 aliguori
    /*
1559 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1560 8edac960 aliguori
     * case its halted.
1561 8edac960 aliguori
     */
1562 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1563 8edac960 aliguori
        qemu_cpu_kick(env);
1564 8edac960 aliguori
        return;
1565 8edac960 aliguori
    }
1566 8edac960 aliguori
#endif
1567 8edac960 aliguori
1568 2e70f6ef pbrook
    if (use_icount) {
1569 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1570 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1571 2e70f6ef pbrook
        if (!can_do_io(env)
1572 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1573 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1574 2e70f6ef pbrook
        }
1575 2e70f6ef pbrook
#endif
1576 2e70f6ef pbrook
    } else {
1577 3098dba0 aurel32
        cpu_unlink_tb(env);
1578 ea041c0e bellard
    }
1579 ea041c0e bellard
}
1580 ea041c0e bellard
1581 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1582 b54ad049 bellard
{
1583 b54ad049 bellard
    env->interrupt_request &= ~mask;
1584 b54ad049 bellard
}
1585 b54ad049 bellard
1586 3098dba0 aurel32
void cpu_exit(CPUState *env)
1587 3098dba0 aurel32
{
1588 3098dba0 aurel32
    env->exit_request = 1;
1589 3098dba0 aurel32
    cpu_unlink_tb(env);
1590 3098dba0 aurel32
}
1591 3098dba0 aurel32
1592 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1593 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1594 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1595 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1596 f193c797 bellard
      "show target assembly code for each compiled TB" },
1597 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1598 57fec1fe bellard
      "show micro ops for each compiled TB" },
1599 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1600 e01a1157 blueswir1
      "show micro ops "
1601 e01a1157 blueswir1
#ifdef TARGET_I386
1602 e01a1157 blueswir1
      "before eflags optimization and "
1603 f193c797 bellard
#endif
1604 e01a1157 blueswir1
      "after liveness analysis" },
1605 f193c797 bellard
    { CPU_LOG_INT, "int",
1606 f193c797 bellard
      "show interrupts/exceptions in short format" },
1607 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1608 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1609 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1610 e91c8a77 ths
      "show CPU state before block translation" },
1611 f193c797 bellard
#ifdef TARGET_I386
1612 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1613 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1614 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1615 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1616 f193c797 bellard
#endif
1617 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1618 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1619 fd872598 bellard
      "show all i/o ports accesses" },
1620 8e3a9fd2 bellard
#endif
1621 f193c797 bellard
    { 0, NULL, NULL },
1622 f193c797 bellard
};
1623 f193c797 bellard
1624 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1625 f193c797 bellard
{
1626 f193c797 bellard
    if (strlen(s2) != n)
1627 f193c797 bellard
        return 0;
1628 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1629 f193c797 bellard
}
1630 3b46e624 ths
1631 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1632 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1633 f193c797 bellard
{
1634 c7cd6a37 blueswir1
    const CPULogItem *item;
1635 f193c797 bellard
    int mask;
1636 f193c797 bellard
    const char *p, *p1;
1637 f193c797 bellard
1638 f193c797 bellard
    p = str;
1639 f193c797 bellard
    mask = 0;
1640 f193c797 bellard
    for(;;) {
1641 f193c797 bellard
        p1 = strchr(p, ',');
1642 f193c797 bellard
        if (!p1)
1643 f193c797 bellard
            p1 = p + strlen(p);
1644 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1645 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1646 8e3a9fd2 bellard
                        mask |= item->mask;
1647 8e3a9fd2 bellard
                }
1648 8e3a9fd2 bellard
        } else {
1649 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1650 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1651 f193c797 bellard
                goto found;
1652 f193c797 bellard
        }
1653 f193c797 bellard
        return 0;
1654 8e3a9fd2 bellard
        }
1655 f193c797 bellard
    found:
1656 f193c797 bellard
        mask |= item->mask;
1657 f193c797 bellard
        if (*p1 != ',')
1658 f193c797 bellard
            break;
1659 f193c797 bellard
        p = p1 + 1;
1660 f193c797 bellard
    }
1661 f193c797 bellard
    return mask;
1662 f193c797 bellard
}
1663 ea041c0e bellard
1664 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1665 7501267e bellard
{
1666 7501267e bellard
    va_list ap;
1667 493ae1f0 pbrook
    va_list ap2;
1668 7501267e bellard
1669 7501267e bellard
    va_start(ap, fmt);
1670 493ae1f0 pbrook
    va_copy(ap2, ap);
1671 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1672 7501267e bellard
    vfprintf(stderr, fmt, ap);
1673 7501267e bellard
    fprintf(stderr, "\n");
1674 7501267e bellard
#ifdef TARGET_I386
1675 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1676 7fe48483 bellard
#else
1677 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1678 7501267e bellard
#endif
1679 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1680 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1681 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1682 93fcfe39 aliguori
        qemu_log("\n");
1683 f9373291 j_mayer
#ifdef TARGET_I386
1684 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1685 f9373291 j_mayer
#else
1686 93fcfe39 aliguori
        log_cpu_state(env, 0);
1687 f9373291 j_mayer
#endif
1688 31b1a7b4 aliguori
        qemu_log_flush();
1689 93fcfe39 aliguori
        qemu_log_close();
1690 924edcae balrog
    }
1691 493ae1f0 pbrook
    va_end(ap2);
1692 f9373291 j_mayer
    va_end(ap);
1693 7501267e bellard
    abort();
1694 7501267e bellard
}
1695 7501267e bellard
1696 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1697 c5be9f08 ths
{
1698 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1699 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1700 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1701 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1702 5a38f081 aliguori
    CPUBreakpoint *bp;
1703 5a38f081 aliguori
    CPUWatchpoint *wp;
1704 5a38f081 aliguori
#endif
1705 5a38f081 aliguori
1706 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1707 5a38f081 aliguori
1708 5a38f081 aliguori
    /* Preserve chaining and index. */
1709 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1710 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1711 5a38f081 aliguori
1712 5a38f081 aliguori
    /* Clone all break/watchpoints.
1713 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1714 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1715 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1716 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1717 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1718 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1719 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1720 5a38f081 aliguori
    }
1721 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1722 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1723 5a38f081 aliguori
                              wp->flags, NULL);
1724 5a38f081 aliguori
    }
1725 5a38f081 aliguori
#endif
1726 5a38f081 aliguori
1727 c5be9f08 ths
    return new_env;
1728 c5be9f08 ths
}
1729 c5be9f08 ths
1730 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1731 0124311e bellard
1732 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1733 5c751e99 edgar_igl
{
1734 5c751e99 edgar_igl
    unsigned int i;
1735 5c751e99 edgar_igl
1736 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1737 5c751e99 edgar_igl
       overlap the flushed page.  */
1738 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1739 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1740 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1741 5c751e99 edgar_igl
1742 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1743 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1744 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745 5c751e99 edgar_igl
}
1746 5c751e99 edgar_igl
1747 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1748 08738984 Igor Kovalenko
    .addr_read  = -1,
1749 08738984 Igor Kovalenko
    .addr_write = -1,
1750 08738984 Igor Kovalenko
    .addr_code  = -1,
1751 08738984 Igor Kovalenko
    .addend     = -1,
1752 08738984 Igor Kovalenko
};
1753 08738984 Igor Kovalenko
1754 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1755 ee8b7021 bellard
   implemented yet) */
1756 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1757 33417e70 bellard
{
1758 33417e70 bellard
    int i;
1759 0124311e bellard
1760 9fa3e853 bellard
#if defined(DEBUG_TLB)
1761 9fa3e853 bellard
    printf("tlb_flush:\n");
1762 9fa3e853 bellard
#endif
1763 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1764 0124311e bellard
       links while we are modifying them */
1765 0124311e bellard
    env->current_tb = NULL;
1766 0124311e bellard
1767 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1768 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1769 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1770 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1771 cfde4bd9 Isaku Yamahata
        }
1772 33417e70 bellard
    }
1773 9fa3e853 bellard
1774 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1775 9fa3e853 bellard
1776 e3db7226 bellard
    tlb_flush_count++;
1777 33417e70 bellard
}
1778 33417e70 bellard
1779 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1780 61382a50 bellard
{
1781 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1782 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1783 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1784 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1786 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1787 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1788 84b7b8e7 bellard
    }
1789 61382a50 bellard
}
1790 61382a50 bellard
1791 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1792 33417e70 bellard
{
1793 8a40a180 bellard
    int i;
1794 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1795 0124311e bellard
1796 9fa3e853 bellard
#if defined(DEBUG_TLB)
1797 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1798 9fa3e853 bellard
#endif
1799 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1800 0124311e bellard
       links while we are modifying them */
1801 0124311e bellard
    env->current_tb = NULL;
1802 61382a50 bellard
1803 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1804 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1805 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1806 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1807 0124311e bellard
1808 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1809 9fa3e853 bellard
}
1810 9fa3e853 bellard
1811 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1812 9fa3e853 bellard
   can be detected */
1813 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1814 9fa3e853 bellard
{
1815 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1816 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1817 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1818 9fa3e853 bellard
}
1819 9fa3e853 bellard
1820 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821 3a7d929e bellard
   tested for self modifying code */
1822 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1823 3a7d929e bellard
                                    target_ulong vaddr)
1824 9fa3e853 bellard
{
1825 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826 1ccde1cb bellard
}
1827 1ccde1cb bellard
1828 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1829 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1830 1ccde1cb bellard
{
1831 1ccde1cb bellard
    unsigned long addr;
1832 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1833 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1834 1ccde1cb bellard
        if ((addr - start) < length) {
1835 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1836 1ccde1cb bellard
        }
1837 1ccde1cb bellard
    }
1838 1ccde1cb bellard
}
1839 1ccde1cb bellard
1840 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1841 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1842 0a962c02 bellard
                                     int dirty_flags)
1843 1ccde1cb bellard
{
1844 1ccde1cb bellard
    CPUState *env;
1845 4f2ac237 bellard
    unsigned long length, start1;
1846 0a962c02 bellard
    int i, mask, len;
1847 0a962c02 bellard
    uint8_t *p;
1848 1ccde1cb bellard
1849 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1850 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1851 1ccde1cb bellard
1852 1ccde1cb bellard
    length = end - start;
1853 1ccde1cb bellard
    if (length == 0)
1854 1ccde1cb bellard
        return;
1855 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1856 f23db169 bellard
    mask = ~dirty_flags;
1857 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1858 f23db169 bellard
    for(i = 0; i < len; i++)
1859 f23db169 bellard
        p[i] &= mask;
1860 f23db169 bellard
1861 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1862 1ccde1cb bellard
       when accessing the range */
1863 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1864 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1865 5579c7f3 pbrook
       address comparisons below.  */
1866 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1867 5579c7f3 pbrook
            != (end - 1) - start) {
1868 5579c7f3 pbrook
        abort();
1869 5579c7f3 pbrook
    }
1870 5579c7f3 pbrook
1871 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1872 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1873 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1874 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
1875 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1876 cfde4bd9 Isaku Yamahata
                                      start1, length);
1877 cfde4bd9 Isaku Yamahata
        }
1878 6a00d601 bellard
    }
1879 1ccde1cb bellard
}
1880 1ccde1cb bellard
1881 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1882 74576198 aliguori
{
1883 74576198 aliguori
    in_migration = enable;
1884 b0a46a33 Jan Kiszka
    if (kvm_enabled()) {
1885 b0a46a33 Jan Kiszka
        return kvm_set_migration_log(enable);
1886 b0a46a33 Jan Kiszka
    }
1887 74576198 aliguori
    return 0;
1888 74576198 aliguori
}
1889 74576198 aliguori
1890 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1891 74576198 aliguori
{
1892 74576198 aliguori
    return in_migration;
1893 74576198 aliguori
}
1894 74576198 aliguori
1895 151f7749 Jan Kiszka
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1896 151f7749 Jan Kiszka
                                   target_phys_addr_t end_addr)
1897 2bec46dc aliguori
{
1898 151f7749 Jan Kiszka
    int ret = 0;
1899 151f7749 Jan Kiszka
1900 2bec46dc aliguori
    if (kvm_enabled())
1901 151f7749 Jan Kiszka
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1902 151f7749 Jan Kiszka
    return ret;
1903 2bec46dc aliguori
}
1904 2bec46dc aliguori
1905 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1906 3a7d929e bellard
{
1907 3a7d929e bellard
    ram_addr_t ram_addr;
1908 5579c7f3 pbrook
    void *p;
1909 3a7d929e bellard
1910 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1911 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1912 5579c7f3 pbrook
            + tlb_entry->addend);
1913 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1914 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1915 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1916 3a7d929e bellard
        }
1917 3a7d929e bellard
    }
1918 3a7d929e bellard
}
1919 3a7d929e bellard
1920 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1921 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1922 3a7d929e bellard
{
1923 3a7d929e bellard
    int i;
1924 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1925 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1926 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
1927 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1928 cfde4bd9 Isaku Yamahata
    }
1929 3a7d929e bellard
}
1930 3a7d929e bellard
1931 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1932 1ccde1cb bellard
{
1933 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1934 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1935 1ccde1cb bellard
}
1936 1ccde1cb bellard
1937 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1938 0f459d16 pbrook
   so that it is no longer dirty */
1939 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1940 1ccde1cb bellard
{
1941 1ccde1cb bellard
    int i;
1942 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1943 1ccde1cb bellard
1944 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1945 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1946 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1947 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1948 9fa3e853 bellard
}
1949 9fa3e853 bellard
1950 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
1951 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1952 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
1953 59817ccb bellard
   conflicting with the host address space). */
1954 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1955 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
1956 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
1957 9fa3e853 bellard
{
1958 92e873b9 bellard
    PhysPageDesc *p;
1959 4f2ac237 bellard
    unsigned long pd;
1960 9fa3e853 bellard
    unsigned int index;
1961 4f2ac237 bellard
    target_ulong address;
1962 0f459d16 pbrook
    target_ulong code_address;
1963 108c49b8 bellard
    target_phys_addr_t addend;
1964 9fa3e853 bellard
    int ret;
1965 84b7b8e7 bellard
    CPUTLBEntry *te;
1966 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1967 0f459d16 pbrook
    target_phys_addr_t iotlb;
1968 9fa3e853 bellard
1969 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1970 9fa3e853 bellard
    if (!p) {
1971 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
1972 9fa3e853 bellard
    } else {
1973 9fa3e853 bellard
        pd = p->phys_offset;
1974 9fa3e853 bellard
    }
1975 9fa3e853 bellard
#if defined(DEBUG_TLB)
1976 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1977 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1978 9fa3e853 bellard
#endif
1979 9fa3e853 bellard
1980 9fa3e853 bellard
    ret = 0;
1981 0f459d16 pbrook
    address = vaddr;
1982 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1983 0f459d16 pbrook
        /* IO memory case (romd handled later) */
1984 0f459d16 pbrook
        address |= TLB_MMIO;
1985 0f459d16 pbrook
    }
1986 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1987 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1988 0f459d16 pbrook
        /* Normal RAM.  */
1989 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
1990 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1991 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
1992 0f459d16 pbrook
        else
1993 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
1994 0f459d16 pbrook
    } else {
1995 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
1996 0f459d16 pbrook
           It would be nice to pass an offset from the base address
1997 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
1998 0f459d16 pbrook
           and avoid full address decoding in every device.
1999 0f459d16 pbrook
           We can't use the high bits of pd for this because
2000 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2001 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2002 8da3ff18 pbrook
        if (p) {
2003 8da3ff18 pbrook
            iotlb += p->region_offset;
2004 8da3ff18 pbrook
        } else {
2005 8da3ff18 pbrook
            iotlb += paddr;
2006 8da3ff18 pbrook
        }
2007 0f459d16 pbrook
    }
2008 0f459d16 pbrook
2009 0f459d16 pbrook
    code_address = address;
2010 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2011 0f459d16 pbrook
       watchpoint trap routines.  */
2012 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2013 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2014 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2015 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2016 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2017 0f459d16 pbrook
            address |= TLB_MMIO;
2018 6658ffb8 pbrook
        }
2019 0f459d16 pbrook
    }
2020 d79acba4 balrog
2021 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2022 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2023 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2024 0f459d16 pbrook
    te->addend = addend - vaddr;
2025 0f459d16 pbrook
    if (prot & PAGE_READ) {
2026 0f459d16 pbrook
        te->addr_read = address;
2027 0f459d16 pbrook
    } else {
2028 0f459d16 pbrook
        te->addr_read = -1;
2029 0f459d16 pbrook
    }
2030 5c751e99 edgar_igl
2031 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2032 0f459d16 pbrook
        te->addr_code = code_address;
2033 0f459d16 pbrook
    } else {
2034 0f459d16 pbrook
        te->addr_code = -1;
2035 0f459d16 pbrook
    }
2036 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2037 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2038 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2039 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2040 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2041 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2042 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2043 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2044 9fa3e853 bellard
        } else {
2045 0f459d16 pbrook
            te->addr_write = address;
2046 9fa3e853 bellard
        }
2047 0f459d16 pbrook
    } else {
2048 0f459d16 pbrook
        te->addr_write = -1;
2049 9fa3e853 bellard
    }
2050 9fa3e853 bellard
    return ret;
2051 9fa3e853 bellard
}
2052 9fa3e853 bellard
2053 0124311e bellard
#else
2054 0124311e bellard
2055 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2056 0124311e bellard
{
2057 0124311e bellard
}
2058 0124311e bellard
2059 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2060 0124311e bellard
{
2061 0124311e bellard
}
2062 0124311e bellard
2063 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2064 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2065 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2066 9fa3e853 bellard
{
2067 9fa3e853 bellard
    return 0;
2068 9fa3e853 bellard
}
2069 0124311e bellard
2070 edf8e2af Mika Westerberg
/*
2071 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2072 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2073 edf8e2af Mika Westerberg
 */
2074 edf8e2af Mika Westerberg
int walk_memory_regions(void *priv,
2075 edf8e2af Mika Westerberg
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2076 33417e70 bellard
{
2077 9fa3e853 bellard
    unsigned long start, end;
2078 edf8e2af Mika Westerberg
    PageDesc *p = NULL;
2079 9fa3e853 bellard
    int i, j, prot, prot1;
2080 edf8e2af Mika Westerberg
    int rc = 0;
2081 33417e70 bellard
2082 edf8e2af Mika Westerberg
    start = end = -1;
2083 9fa3e853 bellard
    prot = 0;
2084 edf8e2af Mika Westerberg
2085 edf8e2af Mika Westerberg
    for (i = 0; i <= L1_SIZE; i++) {
2086 edf8e2af Mika Westerberg
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2087 edf8e2af Mika Westerberg
        for (j = 0; j < L2_SIZE; j++) {
2088 edf8e2af Mika Westerberg
            prot1 = (p == NULL) ? 0 : p[j].flags;
2089 edf8e2af Mika Westerberg
            /*
2090 edf8e2af Mika Westerberg
             * "region" is one continuous chunk of memory
2091 edf8e2af Mika Westerberg
             * that has same protection flags set.
2092 edf8e2af Mika Westerberg
             */
2093 9fa3e853 bellard
            if (prot1 != prot) {
2094 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2095 9fa3e853 bellard
                if (start != -1) {
2096 edf8e2af Mika Westerberg
                    rc = (*fn)(priv, start, end, prot);
2097 edf8e2af Mika Westerberg
                    /* callback can stop iteration by returning != 0 */
2098 edf8e2af Mika Westerberg
                    if (rc != 0)
2099 edf8e2af Mika Westerberg
                        return (rc);
2100 9fa3e853 bellard
                }
2101 9fa3e853 bellard
                if (prot1 != 0)
2102 9fa3e853 bellard
                    start = end;
2103 9fa3e853 bellard
                else
2104 9fa3e853 bellard
                    start = -1;
2105 9fa3e853 bellard
                prot = prot1;
2106 9fa3e853 bellard
            }
2107 edf8e2af Mika Westerberg
            if (p == NULL)
2108 9fa3e853 bellard
                break;
2109 9fa3e853 bellard
        }
2110 33417e70 bellard
    }
2111 edf8e2af Mika Westerberg
    return (rc);
2112 edf8e2af Mika Westerberg
}
2113 edf8e2af Mika Westerberg
2114 edf8e2af Mika Westerberg
static int dump_region(void *priv, unsigned long start,
2115 edf8e2af Mika Westerberg
    unsigned long end, unsigned long prot)
2116 edf8e2af Mika Westerberg
{
2117 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2118 edf8e2af Mika Westerberg
2119 edf8e2af Mika Westerberg
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2120 edf8e2af Mika Westerberg
        start, end, end - start,
2121 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2122 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2123 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2124 edf8e2af Mika Westerberg
2125 edf8e2af Mika Westerberg
    return (0);
2126 edf8e2af Mika Westerberg
}
2127 edf8e2af Mika Westerberg
2128 edf8e2af Mika Westerberg
/* dump memory mappings */
2129 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2130 edf8e2af Mika Westerberg
{
2131 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2132 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2133 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2134 33417e70 bellard
}
2135 33417e70 bellard
2136 53a5960a pbrook
int page_get_flags(target_ulong address)
2137 33417e70 bellard
{
2138 9fa3e853 bellard
    PageDesc *p;
2139 9fa3e853 bellard
2140 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2141 33417e70 bellard
    if (!p)
2142 9fa3e853 bellard
        return 0;
2143 9fa3e853 bellard
    return p->flags;
2144 9fa3e853 bellard
}
2145 9fa3e853 bellard
2146 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2147 ccbb4d44 Stuart Brady
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2148 9fa3e853 bellard
   depending on PAGE_WRITE */
2149 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2150 9fa3e853 bellard
{
2151 9fa3e853 bellard
    PageDesc *p;
2152 53a5960a pbrook
    target_ulong addr;
2153 9fa3e853 bellard
2154 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2155 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2156 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2157 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2158 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2159 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2160 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2161 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2162 17e2377a pbrook
           address space.  */
2163 17e2377a pbrook
        if (!p)
2164 17e2377a pbrook
            return;
2165 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2166 9fa3e853 bellard
           inside */
2167 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2168 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2169 9fa3e853 bellard
            p->first_tb) {
2170 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2171 9fa3e853 bellard
        }
2172 9fa3e853 bellard
        p->flags = flags;
2173 9fa3e853 bellard
    }
2174 33417e70 bellard
}
2175 33417e70 bellard
2176 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2177 3d97b40b ths
{
2178 3d97b40b ths
    PageDesc *p;
2179 3d97b40b ths
    target_ulong end;
2180 3d97b40b ths
    target_ulong addr;
2181 3d97b40b ths
2182 55f280c9 balrog
    if (start + len < start)
2183 55f280c9 balrog
        /* we've wrapped around */
2184 55f280c9 balrog
        return -1;
2185 55f280c9 balrog
2186 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2187 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2188 3d97b40b ths
2189 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2191 3d97b40b ths
        if( !p )
2192 3d97b40b ths
            return -1;
2193 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2194 3d97b40b ths
            return -1;
2195 3d97b40b ths
2196 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2197 3d97b40b ths
            return -1;
2198 dae3270c bellard
        if (flags & PAGE_WRITE) {
2199 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2200 dae3270c bellard
                return -1;
2201 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2202 dae3270c bellard
               contains translated code */
2203 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2204 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2205 dae3270c bellard
                    return -1;
2206 dae3270c bellard
            }
2207 dae3270c bellard
            return 0;
2208 dae3270c bellard
        }
2209 3d97b40b ths
    }
2210 3d97b40b ths
    return 0;
2211 3d97b40b ths
}
2212 3d97b40b ths
2213 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2214 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2215 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2216 9fa3e853 bellard
{
2217 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2218 9fa3e853 bellard
    PageDesc *p, *p1;
2219 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2220 9fa3e853 bellard
2221 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2222 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2223 c8a706fe pbrook
       practice it seems to be ok.  */
2224 c8a706fe pbrook
    mmap_lock();
2225 c8a706fe pbrook
2226 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2227 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2228 9fa3e853 bellard
    p1 = page_find(page_index);
2229 c8a706fe pbrook
    if (!p1) {
2230 c8a706fe pbrook
        mmap_unlock();
2231 9fa3e853 bellard
        return 0;
2232 c8a706fe pbrook
    }
2233 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2234 9fa3e853 bellard
    p = p1;
2235 9fa3e853 bellard
    prot = 0;
2236 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2237 9fa3e853 bellard
        prot |= p->flags;
2238 9fa3e853 bellard
        p++;
2239 9fa3e853 bellard
    }
2240 9fa3e853 bellard
    /* if the page was really writable, then we change its
2241 9fa3e853 bellard
       protection back to writable */
2242 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2243 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2244 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2245 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2246 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2247 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2248 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2249 9fa3e853 bellard
               the corresponding translated code. */
2250 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2251 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2252 9fa3e853 bellard
            tb_invalidate_check(address);
2253 9fa3e853 bellard
#endif
2254 c8a706fe pbrook
            mmap_unlock();
2255 9fa3e853 bellard
            return 1;
2256 9fa3e853 bellard
        }
2257 9fa3e853 bellard
    }
2258 c8a706fe pbrook
    mmap_unlock();
2259 9fa3e853 bellard
    return 0;
2260 9fa3e853 bellard
}
2261 9fa3e853 bellard
2262 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2263 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2264 1ccde1cb bellard
{
2265 1ccde1cb bellard
}
2266 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2267 9fa3e853 bellard
2268 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2269 8da3ff18 pbrook
2270 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2271 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2272 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2273 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2274 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2275 db7b5426 blueswir1
                      need_subpage)                                     \
2276 db7b5426 blueswir1
    do {                                                                \
2277 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2278 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2279 db7b5426 blueswir1
        else {                                                          \
2280 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2281 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2282 db7b5426 blueswir1
                need_subpage = 1;                                       \
2283 db7b5426 blueswir1
        }                                                               \
2284 db7b5426 blueswir1
                                                                        \
2285 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2286 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2287 db7b5426 blueswir1
        else {                                                          \
2288 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2289 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2290 db7b5426 blueswir1
                need_subpage = 1;                                       \
2291 db7b5426 blueswir1
        }                                                               \
2292 db7b5426 blueswir1
    } while (0)
2293 db7b5426 blueswir1
2294 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2295 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2296 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2297 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2298 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2299 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2300 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2301 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2302 8da3ff18 pbrook
                                         ram_addr_t size,
2303 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2304 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2305 33417e70 bellard
{
2306 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2307 92e873b9 bellard
    PhysPageDesc *p;
2308 9d42037b bellard
    CPUState *env;
2309 00f82b8a aurel32
    ram_addr_t orig_size = size;
2310 db7b5426 blueswir1
    void *subpage;
2311 33417e70 bellard
2312 7ba1e619 aliguori
    if (kvm_enabled())
2313 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2314 7ba1e619 aliguori
2315 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2316 67c4d23c pbrook
        region_offset = start_addr;
2317 67c4d23c pbrook
    }
2318 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2319 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2320 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2321 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2322 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2323 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2324 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2325 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2326 db7b5426 blueswir1
            int need_subpage = 0;
2327 db7b5426 blueswir1
2328 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2329 db7b5426 blueswir1
                          need_subpage);
2330 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2331 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2332 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2333 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2334 8da3ff18 pbrook
                                           p->region_offset);
2335 db7b5426 blueswir1
                } else {
2336 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2337 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2338 db7b5426 blueswir1
                }
2339 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2340 8da3ff18 pbrook
                                 region_offset);
2341 8da3ff18 pbrook
                p->region_offset = 0;
2342 db7b5426 blueswir1
            } else {
2343 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2344 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2345 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2346 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2347 db7b5426 blueswir1
            }
2348 db7b5426 blueswir1
        } else {
2349 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2350 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2351 8da3ff18 pbrook
            p->region_offset = region_offset;
2352 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2353 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2354 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2355 0e8f0967 pbrook
            } else {
2356 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2357 db7b5426 blueswir1
                int need_subpage = 0;
2358 db7b5426 blueswir1
2359 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2360 db7b5426 blueswir1
                              end_addr2, need_subpage);
2361 db7b5426 blueswir1
2362 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2363 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2364 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2365 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2366 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2367 8da3ff18 pbrook
                                     phys_offset, region_offset);
2368 8da3ff18 pbrook
                    p->region_offset = 0;
2369 db7b5426 blueswir1
                }
2370 db7b5426 blueswir1
            }
2371 db7b5426 blueswir1
        }
2372 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2373 33417e70 bellard
    }
2374 3b46e624 ths
2375 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2376 9d42037b bellard
       reset the modified entries */
2377 9d42037b bellard
    /* XXX: slow ! */
2378 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2379 9d42037b bellard
        tlb_flush(env, 1);
2380 9d42037b bellard
    }
2381 33417e70 bellard
}
2382 33417e70 bellard
2383 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2384 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2385 ba863458 bellard
{
2386 ba863458 bellard
    PhysPageDesc *p;
2387 ba863458 bellard
2388 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2389 ba863458 bellard
    if (!p)
2390 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2391 ba863458 bellard
    return p->phys_offset;
2392 ba863458 bellard
}
2393 ba863458 bellard
2394 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2395 f65ed4c1 aliguori
{
2396 f65ed4c1 aliguori
    if (kvm_enabled())
2397 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2398 f65ed4c1 aliguori
}
2399 f65ed4c1 aliguori
2400 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401 f65ed4c1 aliguori
{
2402 f65ed4c1 aliguori
    if (kvm_enabled())
2403 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2404 f65ed4c1 aliguori
}
2405 f65ed4c1 aliguori
2406 94a6b54f pbrook
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2407 94a6b54f pbrook
{
2408 94a6b54f pbrook
    RAMBlock *new_block;
2409 94a6b54f pbrook
2410 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2411 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2412 94a6b54f pbrook
2413 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2414 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2415 94a6b54f pbrook
    new_block->length = size;
2416 94a6b54f pbrook
2417 94a6b54f pbrook
    new_block->next = ram_blocks;
2418 94a6b54f pbrook
    ram_blocks = new_block;
2419 94a6b54f pbrook
2420 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2421 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2422 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2423 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2424 94a6b54f pbrook
2425 94a6b54f pbrook
    last_ram_offset += size;
2426 94a6b54f pbrook
2427 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2428 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2429 6f0437e8 Jan Kiszka
2430 94a6b54f pbrook
    return new_block->offset;
2431 94a6b54f pbrook
}
2432 e9a1ab19 bellard
2433 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2434 e9a1ab19 bellard
{
2435 94a6b54f pbrook
    /* TODO: implement this.  */
2436 e9a1ab19 bellard
}
2437 e9a1ab19 bellard
2438 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2439 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2440 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2441 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2442 5579c7f3 pbrook

2443 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2444 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2445 5579c7f3 pbrook
 */
2446 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2447 dc828ca1 pbrook
{
2448 94a6b54f pbrook
    RAMBlock *prev;
2449 94a6b54f pbrook
    RAMBlock **prevp;
2450 94a6b54f pbrook
    RAMBlock *block;
2451 94a6b54f pbrook
2452 94a6b54f pbrook
    prev = NULL;
2453 94a6b54f pbrook
    prevp = &ram_blocks;
2454 94a6b54f pbrook
    block = ram_blocks;
2455 94a6b54f pbrook
    while (block && (block->offset > addr
2456 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2457 94a6b54f pbrook
        if (prev)
2458 94a6b54f pbrook
          prevp = &prev->next;
2459 94a6b54f pbrook
        prev = block;
2460 94a6b54f pbrook
        block = block->next;
2461 94a6b54f pbrook
    }
2462 94a6b54f pbrook
    if (!block) {
2463 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2464 94a6b54f pbrook
        abort();
2465 94a6b54f pbrook
    }
2466 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2467 94a6b54f pbrook
    if (prev) {
2468 94a6b54f pbrook
        prev->next = block->next;
2469 94a6b54f pbrook
        block->next = *prevp;
2470 94a6b54f pbrook
        *prevp = block;
2471 94a6b54f pbrook
    }
2472 94a6b54f pbrook
    return block->host + (addr - block->offset);
2473 dc828ca1 pbrook
}
2474 dc828ca1 pbrook
2475 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2476 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2477 5579c7f3 pbrook
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2478 5579c7f3 pbrook
{
2479 94a6b54f pbrook
    RAMBlock *prev;
2480 94a6b54f pbrook
    RAMBlock **prevp;
2481 94a6b54f pbrook
    RAMBlock *block;
2482 94a6b54f pbrook
    uint8_t *host = ptr;
2483 94a6b54f pbrook
2484 94a6b54f pbrook
    prev = NULL;
2485 94a6b54f pbrook
    prevp = &ram_blocks;
2486 94a6b54f pbrook
    block = ram_blocks;
2487 94a6b54f pbrook
    while (block && (block->host > host
2488 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2489 94a6b54f pbrook
        if (prev)
2490 94a6b54f pbrook
          prevp = &prev->next;
2491 94a6b54f pbrook
        prev = block;
2492 94a6b54f pbrook
        block = block->next;
2493 94a6b54f pbrook
    }
2494 94a6b54f pbrook
    if (!block) {
2495 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2496 94a6b54f pbrook
        abort();
2497 94a6b54f pbrook
    }
2498 94a6b54f pbrook
    return block->offset + (host - block->host);
2499 5579c7f3 pbrook
}
2500 5579c7f3 pbrook
2501 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2502 33417e70 bellard
{
2503 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2504 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2505 67d3b957 pbrook
#endif
2506 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2507 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2508 e18231a3 blueswir1
#endif
2509 e18231a3 blueswir1
    return 0;
2510 e18231a3 blueswir1
}
2511 e18231a3 blueswir1
2512 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2513 e18231a3 blueswir1
{
2514 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2515 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2516 e18231a3 blueswir1
#endif
2517 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2518 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2519 e18231a3 blueswir1
#endif
2520 e18231a3 blueswir1
    return 0;
2521 e18231a3 blueswir1
}
2522 e18231a3 blueswir1
2523 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2524 e18231a3 blueswir1
{
2525 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2526 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2527 e18231a3 blueswir1
#endif
2528 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2529 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2530 b4f0a316 blueswir1
#endif
2531 33417e70 bellard
    return 0;
2532 33417e70 bellard
}
2533 33417e70 bellard
2534 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2535 33417e70 bellard
{
2536 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2537 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2538 67d3b957 pbrook
#endif
2539 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2540 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2541 e18231a3 blueswir1
#endif
2542 e18231a3 blueswir1
}
2543 e18231a3 blueswir1
2544 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2545 e18231a3 blueswir1
{
2546 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2547 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2548 e18231a3 blueswir1
#endif
2549 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2550 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2551 e18231a3 blueswir1
#endif
2552 e18231a3 blueswir1
}
2553 e18231a3 blueswir1
2554 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2555 e18231a3 blueswir1
{
2556 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2557 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2558 e18231a3 blueswir1
#endif
2559 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2560 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2561 b4f0a316 blueswir1
#endif
2562 33417e70 bellard
}
2563 33417e70 bellard
2564 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2565 33417e70 bellard
    unassigned_mem_readb,
2566 e18231a3 blueswir1
    unassigned_mem_readw,
2567 e18231a3 blueswir1
    unassigned_mem_readl,
2568 33417e70 bellard
};
2569 33417e70 bellard
2570 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2571 33417e70 bellard
    unassigned_mem_writeb,
2572 e18231a3 blueswir1
    unassigned_mem_writew,
2573 e18231a3 blueswir1
    unassigned_mem_writel,
2574 33417e70 bellard
};
2575 33417e70 bellard
2576 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2577 0f459d16 pbrook
                                uint32_t val)
2578 9fa3e853 bellard
{
2579 3a7d929e bellard
    int dirty_flags;
2580 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2581 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2582 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2583 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2584 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2585 9fa3e853 bellard
#endif
2586 3a7d929e bellard
    }
2587 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2588 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2589 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2590 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2591 f23db169 bellard
       flushed */
2592 f23db169 bellard
    if (dirty_flags == 0xff)
2593 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2594 9fa3e853 bellard
}
2595 9fa3e853 bellard
2596 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2597 0f459d16 pbrook
                                uint32_t val)
2598 9fa3e853 bellard
{
2599 3a7d929e bellard
    int dirty_flags;
2600 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2601 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2602 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2603 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2604 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2605 9fa3e853 bellard
#endif
2606 3a7d929e bellard
    }
2607 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2608 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2609 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2610 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2611 f23db169 bellard
       flushed */
2612 f23db169 bellard
    if (dirty_flags == 0xff)
2613 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2614 9fa3e853 bellard
}
2615 9fa3e853 bellard
2616 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2617 0f459d16 pbrook
                                uint32_t val)
2618 9fa3e853 bellard
{
2619 3a7d929e bellard
    int dirty_flags;
2620 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2621 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2622 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2623 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2624 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2625 9fa3e853 bellard
#endif
2626 3a7d929e bellard
    }
2627 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2628 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2629 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2630 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2631 f23db169 bellard
       flushed */
2632 f23db169 bellard
    if (dirty_flags == 0xff)
2633 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2634 9fa3e853 bellard
}
2635 9fa3e853 bellard
2636 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
2637 9fa3e853 bellard
    NULL, /* never used */
2638 9fa3e853 bellard
    NULL, /* never used */
2639 9fa3e853 bellard
    NULL, /* never used */
2640 9fa3e853 bellard
};
2641 9fa3e853 bellard
2642 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2643 1ccde1cb bellard
    notdirty_mem_writeb,
2644 1ccde1cb bellard
    notdirty_mem_writew,
2645 1ccde1cb bellard
    notdirty_mem_writel,
2646 1ccde1cb bellard
};
2647 1ccde1cb bellard
2648 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2649 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2650 0f459d16 pbrook
{
2651 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2652 06d55cc1 aliguori
    target_ulong pc, cs_base;
2653 06d55cc1 aliguori
    TranslationBlock *tb;
2654 0f459d16 pbrook
    target_ulong vaddr;
2655 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2656 06d55cc1 aliguori
    int cpu_flags;
2657 0f459d16 pbrook
2658 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2659 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2660 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2661 06d55cc1 aliguori
         * current instruction. */
2662 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2663 06d55cc1 aliguori
        return;
2664 06d55cc1 aliguori
    }
2665 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2666 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2667 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2668 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2669 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2670 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2671 6e140f28 aliguori
                env->watchpoint_hit = wp;
2672 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2673 6e140f28 aliguori
                if (!tb) {
2674 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2675 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2676 6e140f28 aliguori
                }
2677 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2678 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2679 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2680 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2681 6e140f28 aliguori
                } else {
2682 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2683 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2684 6e140f28 aliguori
                }
2685 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2686 06d55cc1 aliguori
            }
2687 6e140f28 aliguori
        } else {
2688 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2689 0f459d16 pbrook
        }
2690 0f459d16 pbrook
    }
2691 0f459d16 pbrook
}
2692 0f459d16 pbrook
2693 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2694 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2695 6658ffb8 pbrook
   phys routines.  */
2696 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2697 6658ffb8 pbrook
{
2698 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2699 6658ffb8 pbrook
    return ldub_phys(addr);
2700 6658ffb8 pbrook
}
2701 6658ffb8 pbrook
2702 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2703 6658ffb8 pbrook
{
2704 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2705 6658ffb8 pbrook
    return lduw_phys(addr);
2706 6658ffb8 pbrook
}
2707 6658ffb8 pbrook
2708 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2709 6658ffb8 pbrook
{
2710 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2711 6658ffb8 pbrook
    return ldl_phys(addr);
2712 6658ffb8 pbrook
}
2713 6658ffb8 pbrook
2714 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2715 6658ffb8 pbrook
                             uint32_t val)
2716 6658ffb8 pbrook
{
2717 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2718 6658ffb8 pbrook
    stb_phys(addr, val);
2719 6658ffb8 pbrook
}
2720 6658ffb8 pbrook
2721 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2722 6658ffb8 pbrook
                             uint32_t val)
2723 6658ffb8 pbrook
{
2724 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2725 6658ffb8 pbrook
    stw_phys(addr, val);
2726 6658ffb8 pbrook
}
2727 6658ffb8 pbrook
2728 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2729 6658ffb8 pbrook
                             uint32_t val)
2730 6658ffb8 pbrook
{
2731 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2732 6658ffb8 pbrook
    stl_phys(addr, val);
2733 6658ffb8 pbrook
}
2734 6658ffb8 pbrook
2735 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2736 6658ffb8 pbrook
    watch_mem_readb,
2737 6658ffb8 pbrook
    watch_mem_readw,
2738 6658ffb8 pbrook
    watch_mem_readl,
2739 6658ffb8 pbrook
};
2740 6658ffb8 pbrook
2741 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2742 6658ffb8 pbrook
    watch_mem_writeb,
2743 6658ffb8 pbrook
    watch_mem_writew,
2744 6658ffb8 pbrook
    watch_mem_writel,
2745 6658ffb8 pbrook
};
2746 6658ffb8 pbrook
2747 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2748 db7b5426 blueswir1
                                 unsigned int len)
2749 db7b5426 blueswir1
{
2750 db7b5426 blueswir1
    uint32_t ret;
2751 db7b5426 blueswir1
    unsigned int idx;
2752 db7b5426 blueswir1
2753 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2754 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2755 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2756 db7b5426 blueswir1
           mmio, len, addr, idx);
2757 db7b5426 blueswir1
#endif
2758 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2759 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2760 db7b5426 blueswir1
2761 db7b5426 blueswir1
    return ret;
2762 db7b5426 blueswir1
}
2763 db7b5426 blueswir1
2764 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2765 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2766 db7b5426 blueswir1
{
2767 db7b5426 blueswir1
    unsigned int idx;
2768 db7b5426 blueswir1
2769 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2770 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2771 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2772 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2773 db7b5426 blueswir1
#endif
2774 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2775 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2776 8da3ff18 pbrook
                                  value);
2777 db7b5426 blueswir1
}
2778 db7b5426 blueswir1
2779 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2780 db7b5426 blueswir1
{
2781 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2782 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2783 db7b5426 blueswir1
#endif
2784 db7b5426 blueswir1
2785 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2786 db7b5426 blueswir1
}
2787 db7b5426 blueswir1
2788 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2789 db7b5426 blueswir1
                            uint32_t value)
2790 db7b5426 blueswir1
{
2791 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2792 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2793 db7b5426 blueswir1
#endif
2794 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2795 db7b5426 blueswir1
}
2796 db7b5426 blueswir1
2797 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2798 db7b5426 blueswir1
{
2799 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2800 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2801 db7b5426 blueswir1
#endif
2802 db7b5426 blueswir1
2803 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2804 db7b5426 blueswir1
}
2805 db7b5426 blueswir1
2806 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2807 db7b5426 blueswir1
                            uint32_t value)
2808 db7b5426 blueswir1
{
2809 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2810 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2811 db7b5426 blueswir1
#endif
2812 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2813 db7b5426 blueswir1
}
2814 db7b5426 blueswir1
2815 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2816 db7b5426 blueswir1
{
2817 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2818 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2819 db7b5426 blueswir1
#endif
2820 db7b5426 blueswir1
2821 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2822 db7b5426 blueswir1
}
2823 db7b5426 blueswir1
2824 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2825 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2826 db7b5426 blueswir1
{
2827 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2828 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2829 db7b5426 blueswir1
#endif
2830 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2831 db7b5426 blueswir1
}
2832 db7b5426 blueswir1
2833 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
2834 db7b5426 blueswir1
    &subpage_readb,
2835 db7b5426 blueswir1
    &subpage_readw,
2836 db7b5426 blueswir1
    &subpage_readl,
2837 db7b5426 blueswir1
};
2838 db7b5426 blueswir1
2839 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
2840 db7b5426 blueswir1
    &subpage_writeb,
2841 db7b5426 blueswir1
    &subpage_writew,
2842 db7b5426 blueswir1
    &subpage_writel,
2843 db7b5426 blueswir1
};
2844 db7b5426 blueswir1
2845 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2846 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2847 db7b5426 blueswir1
{
2848 db7b5426 blueswir1
    int idx, eidx;
2849 4254fab8 blueswir1
    unsigned int i;
2850 db7b5426 blueswir1
2851 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2852 db7b5426 blueswir1
        return -1;
2853 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2854 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2855 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2856 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2857 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2858 db7b5426 blueswir1
#endif
2859 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2860 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2861 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2862 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2863 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2864 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2865 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2866 3ee89922 blueswir1
            }
2867 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2868 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2869 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2870 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2871 3ee89922 blueswir1
            }
2872 4254fab8 blueswir1
        }
2873 db7b5426 blueswir1
    }
2874 db7b5426 blueswir1
2875 db7b5426 blueswir1
    return 0;
2876 db7b5426 blueswir1
}
2877 db7b5426 blueswir1
2878 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2879 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2880 db7b5426 blueswir1
{
2881 db7b5426 blueswir1
    subpage_t *mmio;
2882 db7b5426 blueswir1
    int subpage_memory;
2883 db7b5426 blueswir1
2884 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2885 1eec614b aliguori
2886 1eec614b aliguori
    mmio->base = base;
2887 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2888 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2889 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2890 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2891 db7b5426 blueswir1
#endif
2892 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2893 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2894 8da3ff18 pbrook
                         region_offset);
2895 db7b5426 blueswir1
2896 db7b5426 blueswir1
    return mmio;
2897 db7b5426 blueswir1
}
2898 db7b5426 blueswir1
2899 88715657 aliguori
static int get_free_io_mem_idx(void)
2900 88715657 aliguori
{
2901 88715657 aliguori
    int i;
2902 88715657 aliguori
2903 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2904 88715657 aliguori
        if (!io_mem_used[i]) {
2905 88715657 aliguori
            io_mem_used[i] = 1;
2906 88715657 aliguori
            return i;
2907 88715657 aliguori
        }
2908 88715657 aliguori
2909 88715657 aliguori
    return -1;
2910 88715657 aliguori
}
2911 88715657 aliguori
2912 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
2913 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
2914 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
2915 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
2916 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
2917 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
2918 4254fab8 blueswir1
   returned if error. */
2919 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
2920 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
2921 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
2922 1eed09cb Avi Kivity
                                        void *opaque)
2923 33417e70 bellard
{
2924 4254fab8 blueswir1
    int i, subwidth = 0;
2925 33417e70 bellard
2926 33417e70 bellard
    if (io_index <= 0) {
2927 88715657 aliguori
        io_index = get_free_io_mem_idx();
2928 88715657 aliguori
        if (io_index == -1)
2929 88715657 aliguori
            return io_index;
2930 33417e70 bellard
    } else {
2931 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
2932 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
2933 33417e70 bellard
            return -1;
2934 33417e70 bellard
    }
2935 b5ff1b31 bellard
2936 33417e70 bellard
    for(i = 0;i < 3; i++) {
2937 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
2938 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
2939 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
2940 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
2941 33417e70 bellard
    }
2942 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
2943 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
2944 33417e70 bellard
}
2945 61382a50 bellard
2946 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2947 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
2948 1eed09cb Avi Kivity
                           void *opaque)
2949 1eed09cb Avi Kivity
{
2950 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2951 1eed09cb Avi Kivity
}
2952 1eed09cb Avi Kivity
2953 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
2954 88715657 aliguori
{
2955 88715657 aliguori
    int i;
2956 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
2957 88715657 aliguori
2958 88715657 aliguori
    for (i=0;i < 3; i++) {
2959 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2960 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2961 88715657 aliguori
    }
2962 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
2963 88715657 aliguori
    io_mem_used[io_index] = 0;
2964 88715657 aliguori
}
2965 88715657 aliguori
2966 e9179ce1 Avi Kivity
static void io_mem_init(void)
2967 e9179ce1 Avi Kivity
{
2968 e9179ce1 Avi Kivity
    int i;
2969 e9179ce1 Avi Kivity
2970 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2971 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2972 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2973 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
2974 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
2975 e9179ce1 Avi Kivity
2976 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2977 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
2978 e9179ce1 Avi Kivity
}
2979 e9179ce1 Avi Kivity
2980 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
2981 e2eef170 pbrook
2982 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
2983 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
2984 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2985 13eb76e0 bellard
                            int len, int is_write)
2986 13eb76e0 bellard
{
2987 13eb76e0 bellard
    int l, flags;
2988 13eb76e0 bellard
    target_ulong page;
2989 53a5960a pbrook
    void * p;
2990 13eb76e0 bellard
2991 13eb76e0 bellard
    while (len > 0) {
2992 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2993 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2994 13eb76e0 bellard
        if (l > len)
2995 13eb76e0 bellard
            l = len;
2996 13eb76e0 bellard
        flags = page_get_flags(page);
2997 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
2998 13eb76e0 bellard
            return;
2999 13eb76e0 bellard
        if (is_write) {
3000 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3001 13eb76e0 bellard
                return;
3002 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3003 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3004 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3005 579a97f7 bellard
                return;
3006 72fb7daa aurel32
            memcpy(p, buf, l);
3007 72fb7daa aurel32
            unlock_user(p, addr, l);
3008 13eb76e0 bellard
        } else {
3009 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3010 13eb76e0 bellard
                return;
3011 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3012 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3013 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3014 579a97f7 bellard
                return;
3015 72fb7daa aurel32
            memcpy(buf, p, l);
3016 5b257578 aurel32
            unlock_user(p, addr, 0);
3017 13eb76e0 bellard
        }
3018 13eb76e0 bellard
        len -= l;
3019 13eb76e0 bellard
        buf += l;
3020 13eb76e0 bellard
        addr += l;
3021 13eb76e0 bellard
    }
3022 13eb76e0 bellard
}
3023 8df1cd07 bellard
3024 13eb76e0 bellard
#else
3025 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3026 13eb76e0 bellard
                            int len, int is_write)
3027 13eb76e0 bellard
{
3028 13eb76e0 bellard
    int l, io_index;
3029 13eb76e0 bellard
    uint8_t *ptr;
3030 13eb76e0 bellard
    uint32_t val;
3031 2e12669a bellard
    target_phys_addr_t page;
3032 2e12669a bellard
    unsigned long pd;
3033 92e873b9 bellard
    PhysPageDesc *p;
3034 3b46e624 ths
3035 13eb76e0 bellard
    while (len > 0) {
3036 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3037 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3038 13eb76e0 bellard
        if (l > len)
3039 13eb76e0 bellard
            l = len;
3040 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3041 13eb76e0 bellard
        if (!p) {
3042 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3043 13eb76e0 bellard
        } else {
3044 13eb76e0 bellard
            pd = p->phys_offset;
3045 13eb76e0 bellard
        }
3046 3b46e624 ths
3047 13eb76e0 bellard
        if (is_write) {
3048 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3049 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3050 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3051 8da3ff18 pbrook
                if (p)
3052 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3053 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3054 6a00d601 bellard
                   potential bugs */
3055 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3056 1c213d19 bellard
                    /* 32 bit write access */
3057 c27004ec bellard
                    val = ldl_p(buf);
3058 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3059 13eb76e0 bellard
                    l = 4;
3060 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3061 1c213d19 bellard
                    /* 16 bit write access */
3062 c27004ec bellard
                    val = lduw_p(buf);
3063 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3064 13eb76e0 bellard
                    l = 2;
3065 13eb76e0 bellard
                } else {
3066 1c213d19 bellard
                    /* 8 bit write access */
3067 c27004ec bellard
                    val = ldub_p(buf);
3068 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3069 13eb76e0 bellard
                    l = 1;
3070 13eb76e0 bellard
                }
3071 13eb76e0 bellard
            } else {
3072 b448f2f3 bellard
                unsigned long addr1;
3073 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3074 13eb76e0 bellard
                /* RAM case */
3075 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3076 13eb76e0 bellard
                memcpy(ptr, buf, l);
3077 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3078 3a7d929e bellard
                    /* invalidate code */
3079 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3080 3a7d929e bellard
                    /* set dirty bit */
3081 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3082 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3083 3a7d929e bellard
                }
3084 13eb76e0 bellard
            }
3085 13eb76e0 bellard
        } else {
3086 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3087 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3088 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3089 13eb76e0 bellard
                /* I/O case */
3090 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3091 8da3ff18 pbrook
                if (p)
3092 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3093 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3094 13eb76e0 bellard
                    /* 32 bit read access */
3095 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3096 c27004ec bellard
                    stl_p(buf, val);
3097 13eb76e0 bellard
                    l = 4;
3098 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3099 13eb76e0 bellard
                    /* 16 bit read access */
3100 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3101 c27004ec bellard
                    stw_p(buf, val);
3102 13eb76e0 bellard
                    l = 2;
3103 13eb76e0 bellard
                } else {
3104 1c213d19 bellard
                    /* 8 bit read access */
3105 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3106 c27004ec bellard
                    stb_p(buf, val);
3107 13eb76e0 bellard
                    l = 1;
3108 13eb76e0 bellard
                }
3109 13eb76e0 bellard
            } else {
3110 13eb76e0 bellard
                /* RAM case */
3111 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3112 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3113 13eb76e0 bellard
                memcpy(buf, ptr, l);
3114 13eb76e0 bellard
            }
3115 13eb76e0 bellard
        }
3116 13eb76e0 bellard
        len -= l;
3117 13eb76e0 bellard
        buf += l;
3118 13eb76e0 bellard
        addr += l;
3119 13eb76e0 bellard
    }
3120 13eb76e0 bellard
}
3121 8df1cd07 bellard
3122 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3123 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3124 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3125 d0ecd2aa bellard
{
3126 d0ecd2aa bellard
    int l;
3127 d0ecd2aa bellard
    uint8_t *ptr;
3128 d0ecd2aa bellard
    target_phys_addr_t page;
3129 d0ecd2aa bellard
    unsigned long pd;
3130 d0ecd2aa bellard
    PhysPageDesc *p;
3131 3b46e624 ths
3132 d0ecd2aa bellard
    while (len > 0) {
3133 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3134 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3135 d0ecd2aa bellard
        if (l > len)
3136 d0ecd2aa bellard
            l = len;
3137 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3138 d0ecd2aa bellard
        if (!p) {
3139 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3140 d0ecd2aa bellard
        } else {
3141 d0ecd2aa bellard
            pd = p->phys_offset;
3142 d0ecd2aa bellard
        }
3143 3b46e624 ths
3144 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3145 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3146 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3147 d0ecd2aa bellard
            /* do nothing */
3148 d0ecd2aa bellard
        } else {
3149 d0ecd2aa bellard
            unsigned long addr1;
3150 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3151 d0ecd2aa bellard
            /* ROM/RAM case */
3152 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3153 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3154 d0ecd2aa bellard
        }
3155 d0ecd2aa bellard
        len -= l;
3156 d0ecd2aa bellard
        buf += l;
3157 d0ecd2aa bellard
        addr += l;
3158 d0ecd2aa bellard
    }
3159 d0ecd2aa bellard
}
3160 d0ecd2aa bellard
3161 6d16c2f8 aliguori
typedef struct {
3162 6d16c2f8 aliguori
    void *buffer;
3163 6d16c2f8 aliguori
    target_phys_addr_t addr;
3164 6d16c2f8 aliguori
    target_phys_addr_t len;
3165 6d16c2f8 aliguori
} BounceBuffer;
3166 6d16c2f8 aliguori
3167 6d16c2f8 aliguori
static BounceBuffer bounce;
3168 6d16c2f8 aliguori
3169 ba223c29 aliguori
typedef struct MapClient {
3170 ba223c29 aliguori
    void *opaque;
3171 ba223c29 aliguori
    void (*callback)(void *opaque);
3172 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3173 ba223c29 aliguori
} MapClient;
3174 ba223c29 aliguori
3175 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3176 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3177 ba223c29 aliguori
3178 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3179 ba223c29 aliguori
{
3180 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3181 ba223c29 aliguori
3182 ba223c29 aliguori
    client->opaque = opaque;
3183 ba223c29 aliguori
    client->callback = callback;
3184 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3185 ba223c29 aliguori
    return client;
3186 ba223c29 aliguori
}
3187 ba223c29 aliguori
3188 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3189 ba223c29 aliguori
{
3190 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3191 ba223c29 aliguori
3192 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3193 34d5e948 Isaku Yamahata
    qemu_free(client);
3194 ba223c29 aliguori
}
3195 ba223c29 aliguori
3196 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3197 ba223c29 aliguori
{
3198 ba223c29 aliguori
    MapClient *client;
3199 ba223c29 aliguori
3200 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3201 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3202 ba223c29 aliguori
        client->callback(client->opaque);
3203 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3204 ba223c29 aliguori
    }
3205 ba223c29 aliguori
}
3206 ba223c29 aliguori
3207 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3208 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3209 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3210 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3211 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3212 ba223c29 aliguori
 * likely to succeed.
3213 6d16c2f8 aliguori
 */
3214 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3215 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3216 6d16c2f8 aliguori
                              int is_write)
3217 6d16c2f8 aliguori
{
3218 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3219 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3220 6d16c2f8 aliguori
    int l;
3221 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3222 6d16c2f8 aliguori
    uint8_t *ptr;
3223 6d16c2f8 aliguori
    target_phys_addr_t page;
3224 6d16c2f8 aliguori
    unsigned long pd;
3225 6d16c2f8 aliguori
    PhysPageDesc *p;
3226 6d16c2f8 aliguori
    unsigned long addr1;
3227 6d16c2f8 aliguori
3228 6d16c2f8 aliguori
    while (len > 0) {
3229 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3230 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3231 6d16c2f8 aliguori
        if (l > len)
3232 6d16c2f8 aliguori
            l = len;
3233 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3234 6d16c2f8 aliguori
        if (!p) {
3235 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3236 6d16c2f8 aliguori
        } else {
3237 6d16c2f8 aliguori
            pd = p->phys_offset;
3238 6d16c2f8 aliguori
        }
3239 6d16c2f8 aliguori
3240 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3241 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3242 6d16c2f8 aliguori
                break;
3243 6d16c2f8 aliguori
            }
3244 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3245 6d16c2f8 aliguori
            bounce.addr = addr;
3246 6d16c2f8 aliguori
            bounce.len = l;
3247 6d16c2f8 aliguori
            if (!is_write) {
3248 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3249 6d16c2f8 aliguori
            }
3250 6d16c2f8 aliguori
            ptr = bounce.buffer;
3251 6d16c2f8 aliguori
        } else {
3252 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3253 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3254 6d16c2f8 aliguori
        }
3255 6d16c2f8 aliguori
        if (!done) {
3256 6d16c2f8 aliguori
            ret = ptr;
3257 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3258 6d16c2f8 aliguori
            break;
3259 6d16c2f8 aliguori
        }
3260 6d16c2f8 aliguori
3261 6d16c2f8 aliguori
        len -= l;
3262 6d16c2f8 aliguori
        addr += l;
3263 6d16c2f8 aliguori
        done += l;
3264 6d16c2f8 aliguori
    }
3265 6d16c2f8 aliguori
    *plen = done;
3266 6d16c2f8 aliguori
    return ret;
3267 6d16c2f8 aliguori
}
3268 6d16c2f8 aliguori
3269 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3270 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3271 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3272 6d16c2f8 aliguori
 */
3273 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3274 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3275 6d16c2f8 aliguori
{
3276 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3277 6d16c2f8 aliguori
        if (is_write) {
3278 5579c7f3 pbrook
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3279 6d16c2f8 aliguori
            while (access_len) {
3280 6d16c2f8 aliguori
                unsigned l;
3281 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3282 6d16c2f8 aliguori
                if (l > access_len)
3283 6d16c2f8 aliguori
                    l = access_len;
3284 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3285 6d16c2f8 aliguori
                    /* invalidate code */
3286 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3287 6d16c2f8 aliguori
                    /* set dirty bit */
3288 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3289 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3290 6d16c2f8 aliguori
                }
3291 6d16c2f8 aliguori
                addr1 += l;
3292 6d16c2f8 aliguori
                access_len -= l;
3293 6d16c2f8 aliguori
            }
3294 6d16c2f8 aliguori
        }
3295 6d16c2f8 aliguori
        return;
3296 6d16c2f8 aliguori
    }
3297 6d16c2f8 aliguori
    if (is_write) {
3298 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3299 6d16c2f8 aliguori
    }
3300 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3301 6d16c2f8 aliguori
    bounce.buffer = NULL;
3302 ba223c29 aliguori
    cpu_notify_map_clients();
3303 6d16c2f8 aliguori
}
3304 d0ecd2aa bellard
3305 8df1cd07 bellard
/* warning: addr must be aligned */
3306 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3307 8df1cd07 bellard
{
3308 8df1cd07 bellard
    int io_index;
3309 8df1cd07 bellard
    uint8_t *ptr;
3310 8df1cd07 bellard
    uint32_t val;
3311 8df1cd07 bellard
    unsigned long pd;
3312 8df1cd07 bellard
    PhysPageDesc *p;
3313 8df1cd07 bellard
3314 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3315 8df1cd07 bellard
    if (!p) {
3316 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3317 8df1cd07 bellard
    } else {
3318 8df1cd07 bellard
        pd = p->phys_offset;
3319 8df1cd07 bellard
    }
3320 3b46e624 ths
3321 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3322 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3323 8df1cd07 bellard
        /* I/O case */
3324 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3325 8da3ff18 pbrook
        if (p)
3326 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3327 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3328 8df1cd07 bellard
    } else {
3329 8df1cd07 bellard
        /* RAM case */
3330 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3331 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3332 8df1cd07 bellard
        val = ldl_p(ptr);
3333 8df1cd07 bellard
    }
3334 8df1cd07 bellard
    return val;
3335 8df1cd07 bellard
}
3336 8df1cd07 bellard
3337 84b7b8e7 bellard
/* warning: addr must be aligned */
3338 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3339 84b7b8e7 bellard
{
3340 84b7b8e7 bellard
    int io_index;
3341 84b7b8e7 bellard
    uint8_t *ptr;
3342 84b7b8e7 bellard
    uint64_t val;
3343 84b7b8e7 bellard
    unsigned long pd;
3344 84b7b8e7 bellard
    PhysPageDesc *p;
3345 84b7b8e7 bellard
3346 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3347 84b7b8e7 bellard
    if (!p) {
3348 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3349 84b7b8e7 bellard
    } else {
3350 84b7b8e7 bellard
        pd = p->phys_offset;
3351 84b7b8e7 bellard
    }
3352 3b46e624 ths
3353 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3354 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3355 84b7b8e7 bellard
        /* I/O case */
3356 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3357 8da3ff18 pbrook
        if (p)
3358 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3359 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3360 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3361 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3362 84b7b8e7 bellard
#else
3363 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3364 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3365 84b7b8e7 bellard
#endif
3366 84b7b8e7 bellard
    } else {
3367 84b7b8e7 bellard
        /* RAM case */
3368 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3369 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3370 84b7b8e7 bellard
        val = ldq_p(ptr);
3371 84b7b8e7 bellard
    }
3372 84b7b8e7 bellard
    return val;
3373 84b7b8e7 bellard
}
3374 84b7b8e7 bellard
3375 aab33094 bellard
/* XXX: optimize */
3376 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3377 aab33094 bellard
{
3378 aab33094 bellard
    uint8_t val;
3379 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3380 aab33094 bellard
    return val;
3381 aab33094 bellard
}
3382 aab33094 bellard
3383 aab33094 bellard
/* XXX: optimize */
3384 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3385 aab33094 bellard
{
3386 aab33094 bellard
    uint16_t val;
3387 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3388 aab33094 bellard
    return tswap16(val);
3389 aab33094 bellard
}
3390 aab33094 bellard
3391 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3392 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3393 8df1cd07 bellard
   bits are used to track modified PTEs */
3394 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3395 8df1cd07 bellard
{
3396 8df1cd07 bellard
    int io_index;
3397 8df1cd07 bellard
    uint8_t *ptr;
3398 8df1cd07 bellard
    unsigned long pd;
3399 8df1cd07 bellard
    PhysPageDesc *p;
3400 8df1cd07 bellard
3401 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3402 8df1cd07 bellard
    if (!p) {
3403 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3404 8df1cd07 bellard
    } else {
3405 8df1cd07 bellard
        pd = p->phys_offset;
3406 8df1cd07 bellard
    }
3407 3b46e624 ths
3408 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3409 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3410 8da3ff18 pbrook
        if (p)
3411 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3412 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3413 8df1cd07 bellard
    } else {
3414 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3415 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3416 8df1cd07 bellard
        stl_p(ptr, val);
3417 74576198 aliguori
3418 74576198 aliguori
        if (unlikely(in_migration)) {
3419 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3420 74576198 aliguori
                /* invalidate code */
3421 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3422 74576198 aliguori
                /* set dirty bit */
3423 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3424 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3425 74576198 aliguori
            }
3426 74576198 aliguori
        }
3427 8df1cd07 bellard
    }
3428 8df1cd07 bellard
}
3429 8df1cd07 bellard
3430 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3431 bc98a7ef j_mayer
{
3432 bc98a7ef j_mayer
    int io_index;
3433 bc98a7ef j_mayer
    uint8_t *ptr;
3434 bc98a7ef j_mayer
    unsigned long pd;
3435 bc98a7ef j_mayer
    PhysPageDesc *p;
3436 bc98a7ef j_mayer
3437 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438 bc98a7ef j_mayer
    if (!p) {
3439 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3440 bc98a7ef j_mayer
    } else {
3441 bc98a7ef j_mayer
        pd = p->phys_offset;
3442 bc98a7ef j_mayer
    }
3443 3b46e624 ths
3444 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3445 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3446 8da3ff18 pbrook
        if (p)
3447 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3448 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3449 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3450 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3451 bc98a7ef j_mayer
#else
3452 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3453 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3454 bc98a7ef j_mayer
#endif
3455 bc98a7ef j_mayer
    } else {
3456 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3457 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3458 bc98a7ef j_mayer
        stq_p(ptr, val);
3459 bc98a7ef j_mayer
    }
3460 bc98a7ef j_mayer
}
3461 bc98a7ef j_mayer
3462 8df1cd07 bellard
/* warning: addr must be aligned */
3463 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3464 8df1cd07 bellard
{
3465 8df1cd07 bellard
    int io_index;
3466 8df1cd07 bellard
    uint8_t *ptr;
3467 8df1cd07 bellard
    unsigned long pd;
3468 8df1cd07 bellard
    PhysPageDesc *p;
3469 8df1cd07 bellard
3470 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3471 8df1cd07 bellard
    if (!p) {
3472 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3473 8df1cd07 bellard
    } else {
3474 8df1cd07 bellard
        pd = p->phys_offset;
3475 8df1cd07 bellard
    }
3476 3b46e624 ths
3477 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3478 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3479 8da3ff18 pbrook
        if (p)
3480 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3481 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3482 8df1cd07 bellard
    } else {
3483 8df1cd07 bellard
        unsigned long addr1;
3484 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3485 8df1cd07 bellard
        /* RAM case */
3486 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3487 8df1cd07 bellard
        stl_p(ptr, val);
3488 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3489 3a7d929e bellard
            /* invalidate code */
3490 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3491 3a7d929e bellard
            /* set dirty bit */
3492 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3493 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3494 3a7d929e bellard
        }
3495 8df1cd07 bellard
    }
3496 8df1cd07 bellard
}
3497 8df1cd07 bellard
3498 aab33094 bellard
/* XXX: optimize */
3499 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3500 aab33094 bellard
{
3501 aab33094 bellard
    uint8_t v = val;
3502 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3503 aab33094 bellard
}
3504 aab33094 bellard
3505 aab33094 bellard
/* XXX: optimize */
3506 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3507 aab33094 bellard
{
3508 aab33094 bellard
    uint16_t v = tswap16(val);
3509 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3510 aab33094 bellard
}
3511 aab33094 bellard
3512 aab33094 bellard
/* XXX: optimize */
3513 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3514 aab33094 bellard
{
3515 aab33094 bellard
    val = tswap64(val);
3516 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3517 aab33094 bellard
}
3518 aab33094 bellard
3519 13eb76e0 bellard
#endif
3520 13eb76e0 bellard
3521 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3522 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3523 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3524 13eb76e0 bellard
{
3525 13eb76e0 bellard
    int l;
3526 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3527 9b3c35e0 j_mayer
    target_ulong page;
3528 13eb76e0 bellard
3529 13eb76e0 bellard
    while (len > 0) {
3530 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3531 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3532 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3533 13eb76e0 bellard
        if (phys_addr == -1)
3534 13eb76e0 bellard
            return -1;
3535 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3536 13eb76e0 bellard
        if (l > len)
3537 13eb76e0 bellard
            l = len;
3538 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3539 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3540 5e2972fd aliguori
        if (is_write)
3541 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3542 5e2972fd aliguori
        else
3543 5e2972fd aliguori
#endif
3544 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3545 13eb76e0 bellard
        len -= l;
3546 13eb76e0 bellard
        buf += l;
3547 13eb76e0 bellard
        addr += l;
3548 13eb76e0 bellard
    }
3549 13eb76e0 bellard
    return 0;
3550 13eb76e0 bellard
}
3551 13eb76e0 bellard
3552 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3553 2e70f6ef pbrook
   must be at the end of the TB */
3554 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3555 2e70f6ef pbrook
{
3556 2e70f6ef pbrook
    TranslationBlock *tb;
3557 2e70f6ef pbrook
    uint32_t n, cflags;
3558 2e70f6ef pbrook
    target_ulong pc, cs_base;
3559 2e70f6ef pbrook
    uint64_t flags;
3560 2e70f6ef pbrook
3561 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3562 2e70f6ef pbrook
    if (!tb) {
3563 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3564 2e70f6ef pbrook
                  retaddr);
3565 2e70f6ef pbrook
    }
3566 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3567 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3568 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3569 bf20dc07 ths
       occurred.  */
3570 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3571 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3572 2e70f6ef pbrook
    n++;
3573 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3574 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3575 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3576 2e70f6ef pbrook
       branch.  */
3577 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3578 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3579 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3580 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3581 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3582 2e70f6ef pbrook
    }
3583 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3584 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3585 2e70f6ef pbrook
            && n > 1) {
3586 2e70f6ef pbrook
        env->pc -= 2;
3587 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3588 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3589 2e70f6ef pbrook
    }
3590 2e70f6ef pbrook
#endif
3591 2e70f6ef pbrook
    /* This should never happen.  */
3592 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3593 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3594 2e70f6ef pbrook
3595 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3596 2e70f6ef pbrook
    pc = tb->pc;
3597 2e70f6ef pbrook
    cs_base = tb->cs_base;
3598 2e70f6ef pbrook
    flags = tb->flags;
3599 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3600 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3601 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3602 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3603 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3604 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3605 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3606 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3607 2e70f6ef pbrook
       second new TB.  */
3608 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3609 2e70f6ef pbrook
}
3610 2e70f6ef pbrook
3611 e3db7226 bellard
void dump_exec_info(FILE *f,
3612 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3613 e3db7226 bellard
{
3614 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3615 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3616 e3db7226 bellard
    TranslationBlock *tb;
3617 3b46e624 ths
3618 e3db7226 bellard
    target_code_size = 0;
3619 e3db7226 bellard
    max_target_code_size = 0;
3620 e3db7226 bellard
    cross_page = 0;
3621 e3db7226 bellard
    direct_jmp_count = 0;
3622 e3db7226 bellard
    direct_jmp2_count = 0;
3623 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3624 e3db7226 bellard
        tb = &tbs[i];
3625 e3db7226 bellard
        target_code_size += tb->size;
3626 e3db7226 bellard
        if (tb->size > max_target_code_size)
3627 e3db7226 bellard
            max_target_code_size = tb->size;
3628 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3629 e3db7226 bellard
            cross_page++;
3630 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3631 e3db7226 bellard
            direct_jmp_count++;
3632 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3633 e3db7226 bellard
                direct_jmp2_count++;
3634 e3db7226 bellard
            }
3635 e3db7226 bellard
        }
3636 e3db7226 bellard
    }
3637 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3638 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3639 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3640 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3641 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3642 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3643 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3644 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3645 e3db7226 bellard
                max_target_code_size);
3646 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3647 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3648 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3649 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3650 5fafdf24 ths
            cross_page,
3651 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3652 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3653 5fafdf24 ths
                direct_jmp_count,
3654 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3655 e3db7226 bellard
                direct_jmp2_count,
3656 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3657 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3658 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3659 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3660 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3661 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3662 e3db7226 bellard
}
3663 e3db7226 bellard
3664 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3665 61382a50 bellard
3666 61382a50 bellard
#define MMUSUFFIX _cmmu
3667 61382a50 bellard
#define GETPC() NULL
3668 61382a50 bellard
#define env cpu_single_env
3669 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3670 61382a50 bellard
3671 61382a50 bellard
#define SHIFT 0
3672 61382a50 bellard
#include "softmmu_template.h"
3673 61382a50 bellard
3674 61382a50 bellard
#define SHIFT 1
3675 61382a50 bellard
#include "softmmu_template.h"
3676 61382a50 bellard
3677 61382a50 bellard
#define SHIFT 2
3678 61382a50 bellard
#include "softmmu_template.h"
3679 61382a50 bellard
3680 61382a50 bellard
#define SHIFT 3
3681 61382a50 bellard
#include "softmmu_template.h"
3682 61382a50 bellard
3683 61382a50 bellard
#undef env
3684 61382a50 bellard
3685 61382a50 bellard
#endif