Statistics
| Branch: | Revision:

root / exec.c @ 640f42e4

History | View | Annotate | Download (112 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 54936004 bellard
 * License along with this library; if not, write to the Free Software
18 fad6cb1a aurel32
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19 54936004 bellard
 */
20 67b915a5 bellard
#include "config.h"
21 d5a8f07c bellard
#ifdef _WIN32
22 d5a8f07c bellard
#include <windows.h>
23 d5a8f07c bellard
#else
24 a98d49b1 bellard
#include <sys/types.h>
25 d5a8f07c bellard
#include <sys/mman.h>
26 d5a8f07c bellard
#endif
27 54936004 bellard
#include <stdlib.h>
28 54936004 bellard
#include <stdio.h>
29 54936004 bellard
#include <stdarg.h>
30 54936004 bellard
#include <string.h>
31 54936004 bellard
#include <errno.h>
32 54936004 bellard
#include <unistd.h>
33 54936004 bellard
#include <inttypes.h>
34 54936004 bellard
35 6180a181 bellard
#include "cpu.h"
36 6180a181 bellard
#include "exec-all.h"
37 ca10f867 aurel32
#include "qemu-common.h"
38 b67d9a52 bellard
#include "tcg.h"
39 b3c7724c pbrook
#include "hw/hw.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 640f42e4 blueswir1
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 640f42e4 blueswir1
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81 108c49b8 bellard
#endif
82 108c49b8 bellard
83 bdaf78e0 blueswir1
static TranslationBlock *tbs;
84 26a5f13b bellard
int code_gen_max_blocks;
85 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 bdaf78e0 blueswir1
static int nb_tbs;
87 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
88 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 fd6ce8f6 bellard
90 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
91 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
93 d03d860b blueswir1
 section close to code segment. */
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
96 d03d860b blueswir1
    __attribute__((aligned (32)))
97 d03d860b blueswir1
#else
98 d03d860b blueswir1
#define code_gen_section                                \
99 d03d860b blueswir1
    __attribute__((aligned (32)))
100 d03d860b blueswir1
#endif
101 d03d860b blueswir1
102 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
103 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
105 26a5f13b bellard
/* threshold to flush the translated code buffer */
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
107 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
108 fd6ce8f6 bellard
109 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
110 9fa3e853 bellard
int phys_ram_fd;
111 1ccde1cb bellard
uint8_t *phys_ram_dirty;
112 74576198 aliguori
static int in_migration;
113 94a6b54f pbrook
114 94a6b54f pbrook
typedef struct RAMBlock {
115 94a6b54f pbrook
    uint8_t *host;
116 94a6b54f pbrook
    ram_addr_t offset;
117 94a6b54f pbrook
    ram_addr_t length;
118 94a6b54f pbrook
    struct RAMBlock *next;
119 94a6b54f pbrook
} RAMBlock;
120 94a6b54f pbrook
121 94a6b54f pbrook
static RAMBlock *ram_blocks;
122 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 94a6b54f pbrook
   then we can no longet assume contiguous ram offsets, and external uses
124 94a6b54f pbrook
   of this variable will break.  */
125 94a6b54f pbrook
ram_addr_t last_ram_offset;
126 e2eef170 pbrook
#endif
127 9fa3e853 bellard
128 6a00d601 bellard
CPUState *first_cpu;
129 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
130 6a00d601 bellard
   cpu_exec() */
131 5fafdf24 ths
CPUState *cpu_single_env;
132 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
133 bf20dc07 ths
   1 = Precise instruction counting.
134 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
135 2e70f6ef pbrook
int use_icount = 0;
136 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
137 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
138 2e70f6ef pbrook
int64_t qemu_icount;
139 6a00d601 bellard
140 54936004 bellard
typedef struct PageDesc {
141 92e873b9 bellard
    /* list of TBs intersecting this ram page */
142 fd6ce8f6 bellard
    TranslationBlock *first_tb;
143 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
144 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
145 9fa3e853 bellard
    unsigned int code_write_count;
146 9fa3e853 bellard
    uint8_t *code_bitmap;
147 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
148 9fa3e853 bellard
    unsigned long flags;
149 9fa3e853 bellard
#endif
150 54936004 bellard
} PageDesc;
151 54936004 bellard
152 92e873b9 bellard
typedef struct PhysPageDesc {
153 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
154 00f82b8a aurel32
    ram_addr_t phys_offset;
155 8da3ff18 pbrook
    ram_addr_t region_offset;
156 92e873b9 bellard
} PhysPageDesc;
157 92e873b9 bellard
158 54936004 bellard
#define L2_BITS 10
159 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
161 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
162 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
163 bedb69ea j_mayer
 */
164 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165 bedb69ea j_mayer
#else
166 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167 bedb69ea j_mayer
#endif
168 54936004 bellard
169 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
170 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
171 54936004 bellard
172 83fb7adf bellard
unsigned long qemu_real_host_page_size;
173 83fb7adf bellard
unsigned long qemu_host_page_bits;
174 83fb7adf bellard
unsigned long qemu_host_page_size;
175 83fb7adf bellard
unsigned long qemu_host_page_mask;
176 54936004 bellard
177 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
178 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
179 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
180 54936004 bellard
181 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
182 e2eef170 pbrook
static void io_mem_init(void);
183 e2eef170 pbrook
184 33417e70 bellard
/* io memory support */
185 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
189 6658ffb8 pbrook
static int io_mem_watch;
190 6658ffb8 pbrook
#endif
191 33417e70 bellard
192 34865134 bellard
/* log support */
193 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
194 34865134 bellard
FILE *logfile;
195 34865134 bellard
int loglevel;
196 e735b91c pbrook
static int log_append = 0;
197 34865134 bellard
198 e3db7226 bellard
/* statistics */
199 e3db7226 bellard
static int tlb_flush_count;
200 e3db7226 bellard
static int tb_flush_count;
201 e3db7226 bellard
static int tb_phys_invalidate_count;
202 e3db7226 bellard
203 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 db7b5426 blueswir1
typedef struct subpage_t {
205 db7b5426 blueswir1
    target_phys_addr_t base;
206 3ee89922 blueswir1
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207 3ee89922 blueswir1
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
209 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210 db7b5426 blueswir1
} subpage_t;
211 db7b5426 blueswir1
212 7cb69cae bellard
#ifdef _WIN32
213 7cb69cae bellard
static void map_exec(void *addr, long size)
214 7cb69cae bellard
{
215 7cb69cae bellard
    DWORD old_protect;
216 7cb69cae bellard
    VirtualProtect(addr, size,
217 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
218 7cb69cae bellard
    
219 7cb69cae bellard
}
220 7cb69cae bellard
#else
221 7cb69cae bellard
static void map_exec(void *addr, long size)
222 7cb69cae bellard
{
223 4369415f bellard
    unsigned long start, end, page_size;
224 7cb69cae bellard
    
225 4369415f bellard
    page_size = getpagesize();
226 7cb69cae bellard
    start = (unsigned long)addr;
227 4369415f bellard
    start &= ~(page_size - 1);
228 7cb69cae bellard
    
229 7cb69cae bellard
    end = (unsigned long)addr + size;
230 4369415f bellard
    end += page_size - 1;
231 4369415f bellard
    end &= ~(page_size - 1);
232 7cb69cae bellard
    
233 7cb69cae bellard
    mprotect((void *)start, end - start,
234 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
235 7cb69cae bellard
}
236 7cb69cae bellard
#endif
237 7cb69cae bellard
238 b346ff46 bellard
static void page_init(void)
239 54936004 bellard
{
240 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
241 54936004 bellard
       TARGET_PAGE_SIZE */
242 c2b48b69 aliguori
#ifdef _WIN32
243 c2b48b69 aliguori
    {
244 c2b48b69 aliguori
        SYSTEM_INFO system_info;
245 c2b48b69 aliguori
246 c2b48b69 aliguori
        GetSystemInfo(&system_info);
247 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
248 c2b48b69 aliguori
    }
249 c2b48b69 aliguori
#else
250 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
251 c2b48b69 aliguori
#endif
252 83fb7adf bellard
    if (qemu_host_page_size == 0)
253 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
254 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
256 83fb7adf bellard
    qemu_host_page_bits = 0;
257 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 83fb7adf bellard
        qemu_host_page_bits++;
259 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 50a9569b balrog
263 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 50a9569b balrog
    {
265 50a9569b balrog
        long long startaddr, endaddr;
266 50a9569b balrog
        FILE *f;
267 50a9569b balrog
        int n;
268 50a9569b balrog
269 c8a706fe pbrook
        mmap_lock();
270 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
271 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
272 50a9569b balrog
        if (f) {
273 50a9569b balrog
            do {
274 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 50a9569b balrog
                if (n == 2) {
276 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
277 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
279 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
282 50a9569b balrog
                                   PAGE_RESERVED); 
283 50a9569b balrog
                }
284 50a9569b balrog
            } while (!feof(f));
285 50a9569b balrog
            fclose(f);
286 50a9569b balrog
        }
287 c8a706fe pbrook
        mmap_unlock();
288 50a9569b balrog
    }
289 50a9569b balrog
#endif
290 54936004 bellard
}
291 54936004 bellard
292 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
293 54936004 bellard
{
294 17e2377a pbrook
#if TARGET_LONG_BITS > 32
295 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
296 17e2377a pbrook
       excluded high addresses.  */
297 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298 17e2377a pbrook
        return NULL;
299 17e2377a pbrook
#endif
300 434929bf aliguori
    return &l1_map[index >> L2_BITS];
301 434929bf aliguori
}
302 434929bf aliguori
303 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
304 434929bf aliguori
{
305 434929bf aliguori
    PageDesc **lp, *p;
306 434929bf aliguori
    lp = page_l1_map(index);
307 434929bf aliguori
    if (!lp)
308 434929bf aliguori
        return NULL;
309 434929bf aliguori
310 54936004 bellard
    p = *lp;
311 54936004 bellard
    if (!p) {
312 54936004 bellard
        /* allocate if not found */
313 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
314 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
315 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
316 17e2377a pbrook
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318 54936004 bellard
        *lp = p;
319 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
320 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
321 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
322 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
323 17e2377a pbrook
                           PAGE_RESERVED); 
324 17e2377a pbrook
        }
325 17e2377a pbrook
#else
326 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327 17e2377a pbrook
        *lp = p;
328 17e2377a pbrook
#endif
329 54936004 bellard
    }
330 54936004 bellard
    return p + (index & (L2_SIZE - 1));
331 54936004 bellard
}
332 54936004 bellard
333 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
334 54936004 bellard
{
335 434929bf aliguori
    PageDesc **lp, *p;
336 434929bf aliguori
    lp = page_l1_map(index);
337 434929bf aliguori
    if (!lp)
338 434929bf aliguori
        return NULL;
339 54936004 bellard
340 434929bf aliguori
    p = *lp;
341 54936004 bellard
    if (!p)
342 54936004 bellard
        return 0;
343 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
344 fd6ce8f6 bellard
}
345 fd6ce8f6 bellard
346 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347 92e873b9 bellard
{
348 108c49b8 bellard
    void **lp, **p;
349 e3f4e2a4 pbrook
    PhysPageDesc *pd;
350 92e873b9 bellard
351 108c49b8 bellard
    p = (void **)l1_phys_map;
352 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353 108c49b8 bellard
354 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 108c49b8 bellard
#endif
357 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358 92e873b9 bellard
    p = *lp;
359 92e873b9 bellard
    if (!p) {
360 92e873b9 bellard
        /* allocate if not found */
361 108c49b8 bellard
        if (!alloc)
362 108c49b8 bellard
            return NULL;
363 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
365 108c49b8 bellard
        *lp = p;
366 108c49b8 bellard
    }
367 108c49b8 bellard
#endif
368 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369 e3f4e2a4 pbrook
    pd = *lp;
370 e3f4e2a4 pbrook
    if (!pd) {
371 e3f4e2a4 pbrook
        int i;
372 108c49b8 bellard
        /* allocate if not found */
373 108c49b8 bellard
        if (!alloc)
374 108c49b8 bellard
            return NULL;
375 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 e3f4e2a4 pbrook
        *lp = pd;
377 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
378 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 67c4d23c pbrook
        }
381 92e873b9 bellard
    }
382 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 92e873b9 bellard
}
384 92e873b9 bellard
385 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 92e873b9 bellard
{
387 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
388 92e873b9 bellard
}
389 92e873b9 bellard
390 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
391 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
392 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 3a7d929e bellard
                                    target_ulong vaddr);
394 c8a706fe pbrook
#define mmap_lock() do { } while(0)
395 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
396 9fa3e853 bellard
#endif
397 fd6ce8f6 bellard
398 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 4369415f bellard
400 4369415f bellard
#if defined(CONFIG_USER_ONLY)
401 4369415f bellard
/* Currently it is not recommanded to allocate big chunks of data in
402 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
403 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
404 4369415f bellard
#endif
405 4369415f bellard
406 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
407 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 4369415f bellard
#endif
409 4369415f bellard
410 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
411 26a5f13b bellard
{
412 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
413 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
414 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
416 4369415f bellard
#else
417 26a5f13b bellard
    code_gen_buffer_size = tb_size;
418 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
419 4369415f bellard
#if defined(CONFIG_USER_ONLY)
420 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
421 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 4369415f bellard
#else
423 26a5f13b bellard
        /* XXX: needs ajustments */
424 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425 4369415f bellard
#endif
426 26a5f13b bellard
    }
427 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
430 26a5f13b bellard
       the host cpu and OS */
431 26a5f13b bellard
#if defined(__linux__) 
432 26a5f13b bellard
    {
433 26a5f13b bellard
        int flags;
434 141ac468 blueswir1
        void *start = NULL;
435 141ac468 blueswir1
436 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437 26a5f13b bellard
#if defined(__x86_64__)
438 26a5f13b bellard
        flags |= MAP_32BIT;
439 26a5f13b bellard
        /* Cannot map more than that */
440 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
442 141ac468 blueswir1
#elif defined(__sparc_v9__)
443 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
444 141ac468 blueswir1
        flags |= MAP_FIXED;
445 141ac468 blueswir1
        start = (void *) 0x60000000UL;
446 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
448 1cb0661e balrog
#elif defined(__arm__)
449 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
450 1cb0661e balrog
        flags |= MAP_FIXED;
451 1cb0661e balrog
        start = (void *) 0x01000000UL;
452 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
454 26a5f13b bellard
#endif
455 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457 26a5f13b bellard
                               flags, -1, 0);
458 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
459 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460 26a5f13b bellard
            exit(1);
461 26a5f13b bellard
        }
462 26a5f13b bellard
    }
463 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464 06e67a82 aliguori
    {
465 06e67a82 aliguori
        int flags;
466 06e67a82 aliguori
        void *addr = NULL;
467 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468 06e67a82 aliguori
#if defined(__x86_64__)
469 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 06e67a82 aliguori
         * 0x40000000 is free */
471 06e67a82 aliguori
        flags |= MAP_FIXED;
472 06e67a82 aliguori
        addr = (void *)0x40000000;
473 06e67a82 aliguori
        /* Cannot map more than that */
474 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
476 06e67a82 aliguori
#endif
477 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479 06e67a82 aliguori
                               flags, -1, 0);
480 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
481 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 06e67a82 aliguori
            exit(1);
483 06e67a82 aliguori
        }
484 06e67a82 aliguori
    }
485 26a5f13b bellard
#else
486 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
488 26a5f13b bellard
#endif
489 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
492 26a5f13b bellard
        code_gen_max_block_size();
493 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495 26a5f13b bellard
}
496 26a5f13b bellard
497 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
499 26a5f13b bellard
   size. */
500 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
501 26a5f13b bellard
{
502 26a5f13b bellard
    cpu_gen_init();
503 26a5f13b bellard
    code_gen_alloc(tb_size);
504 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
505 4369415f bellard
    page_init();
506 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
507 26a5f13b bellard
    io_mem_init();
508 e2eef170 pbrook
#endif
509 26a5f13b bellard
}
510 26a5f13b bellard
511 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 9656f324 pbrook
513 9656f324 pbrook
#define CPU_COMMON_SAVE_VERSION 1
514 9656f324 pbrook
515 9656f324 pbrook
static void cpu_common_save(QEMUFile *f, void *opaque)
516 9656f324 pbrook
{
517 9656f324 pbrook
    CPUState *env = opaque;
518 9656f324 pbrook
519 9656f324 pbrook
    qemu_put_be32s(f, &env->halted);
520 9656f324 pbrook
    qemu_put_be32s(f, &env->interrupt_request);
521 9656f324 pbrook
}
522 9656f324 pbrook
523 9656f324 pbrook
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524 9656f324 pbrook
{
525 9656f324 pbrook
    CPUState *env = opaque;
526 9656f324 pbrook
527 9656f324 pbrook
    if (version_id != CPU_COMMON_SAVE_VERSION)
528 9656f324 pbrook
        return -EINVAL;
529 9656f324 pbrook
530 9656f324 pbrook
    qemu_get_be32s(f, &env->halted);
531 75f482ae pbrook
    qemu_get_be32s(f, &env->interrupt_request);
532 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533 3098dba0 aurel32
       version_id is increased. */
534 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
535 9656f324 pbrook
    tlb_flush(env, 1);
536 9656f324 pbrook
537 9656f324 pbrook
    return 0;
538 9656f324 pbrook
}
539 9656f324 pbrook
#endif
540 9656f324 pbrook
541 6a00d601 bellard
void cpu_exec_init(CPUState *env)
542 fd6ce8f6 bellard
{
543 6a00d601 bellard
    CPUState **penv;
544 6a00d601 bellard
    int cpu_index;
545 6a00d601 bellard
546 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
547 c2764719 pbrook
    cpu_list_lock();
548 c2764719 pbrook
#endif
549 6a00d601 bellard
    env->next_cpu = NULL;
550 6a00d601 bellard
    penv = &first_cpu;
551 6a00d601 bellard
    cpu_index = 0;
552 6a00d601 bellard
    while (*penv != NULL) {
553 6a00d601 bellard
        penv = (CPUState **)&(*penv)->next_cpu;
554 6a00d601 bellard
        cpu_index++;
555 6a00d601 bellard
    }
556 6a00d601 bellard
    env->cpu_index = cpu_index;
557 c0ce998e aliguori
    TAILQ_INIT(&env->breakpoints);
558 c0ce998e aliguori
    TAILQ_INIT(&env->watchpoints);
559 6a00d601 bellard
    *penv = env;
560 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
561 c2764719 pbrook
    cpu_list_unlock();
562 c2764719 pbrook
#endif
563 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
564 9656f324 pbrook
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
565 9656f324 pbrook
                    cpu_common_save, cpu_common_load, env);
566 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
567 b3c7724c pbrook
                    cpu_save, cpu_load, env);
568 b3c7724c pbrook
#endif
569 fd6ce8f6 bellard
}
570 fd6ce8f6 bellard
571 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
572 9fa3e853 bellard
{
573 9fa3e853 bellard
    if (p->code_bitmap) {
574 59817ccb bellard
        qemu_free(p->code_bitmap);
575 9fa3e853 bellard
        p->code_bitmap = NULL;
576 9fa3e853 bellard
    }
577 9fa3e853 bellard
    p->code_write_count = 0;
578 9fa3e853 bellard
}
579 9fa3e853 bellard
580 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
581 fd6ce8f6 bellard
static void page_flush_tb(void)
582 fd6ce8f6 bellard
{
583 fd6ce8f6 bellard
    int i, j;
584 fd6ce8f6 bellard
    PageDesc *p;
585 fd6ce8f6 bellard
586 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
587 fd6ce8f6 bellard
        p = l1_map[i];
588 fd6ce8f6 bellard
        if (p) {
589 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
590 9fa3e853 bellard
                p->first_tb = NULL;
591 9fa3e853 bellard
                invalidate_page_bitmap(p);
592 9fa3e853 bellard
                p++;
593 9fa3e853 bellard
            }
594 fd6ce8f6 bellard
        }
595 fd6ce8f6 bellard
    }
596 fd6ce8f6 bellard
}
597 fd6ce8f6 bellard
598 fd6ce8f6 bellard
/* flush all the translation blocks */
599 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
600 6a00d601 bellard
void tb_flush(CPUState *env1)
601 fd6ce8f6 bellard
{
602 6a00d601 bellard
    CPUState *env;
603 0124311e bellard
#if defined(DEBUG_FLUSH)
604 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
605 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
606 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
607 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
608 fd6ce8f6 bellard
#endif
609 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
610 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
611 a208e54a pbrook
612 fd6ce8f6 bellard
    nb_tbs = 0;
613 3b46e624 ths
614 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
615 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
616 6a00d601 bellard
    }
617 9fa3e853 bellard
618 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
619 fd6ce8f6 bellard
    page_flush_tb();
620 9fa3e853 bellard
621 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
622 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
623 d4e8164f bellard
       expensive */
624 e3db7226 bellard
    tb_flush_count++;
625 fd6ce8f6 bellard
}
626 fd6ce8f6 bellard
627 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
628 fd6ce8f6 bellard
629 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
630 fd6ce8f6 bellard
{
631 fd6ce8f6 bellard
    TranslationBlock *tb;
632 fd6ce8f6 bellard
    int i;
633 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
634 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
635 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
636 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
637 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
638 fd6ce8f6 bellard
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
639 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
640 fd6ce8f6 bellard
            }
641 fd6ce8f6 bellard
        }
642 fd6ce8f6 bellard
    }
643 fd6ce8f6 bellard
}
644 fd6ce8f6 bellard
645 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
646 fd6ce8f6 bellard
static void tb_page_check(void)
647 fd6ce8f6 bellard
{
648 fd6ce8f6 bellard
    TranslationBlock *tb;
649 fd6ce8f6 bellard
    int i, flags1, flags2;
650 3b46e624 ths
651 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
652 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
653 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
654 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
655 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
656 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
657 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
658 fd6ce8f6 bellard
            }
659 fd6ce8f6 bellard
        }
660 fd6ce8f6 bellard
    }
661 fd6ce8f6 bellard
}
662 fd6ce8f6 bellard
663 bdaf78e0 blueswir1
static void tb_jmp_check(TranslationBlock *tb)
664 d4e8164f bellard
{
665 d4e8164f bellard
    TranslationBlock *tb1;
666 d4e8164f bellard
    unsigned int n1;
667 d4e8164f bellard
668 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
669 d4e8164f bellard
    tb1 = tb->jmp_first;
670 d4e8164f bellard
    for(;;) {
671 d4e8164f bellard
        n1 = (long)tb1 & 3;
672 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
673 d4e8164f bellard
        if (n1 == 2)
674 d4e8164f bellard
            break;
675 d4e8164f bellard
        tb1 = tb1->jmp_next[n1];
676 d4e8164f bellard
    }
677 d4e8164f bellard
    /* check end of list */
678 d4e8164f bellard
    if (tb1 != tb) {
679 d4e8164f bellard
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
680 d4e8164f bellard
    }
681 d4e8164f bellard
}
682 d4e8164f bellard
683 fd6ce8f6 bellard
#endif
684 fd6ce8f6 bellard
685 fd6ce8f6 bellard
/* invalidate one TB */
686 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
687 fd6ce8f6 bellard
                             int next_offset)
688 fd6ce8f6 bellard
{
689 fd6ce8f6 bellard
    TranslationBlock *tb1;
690 fd6ce8f6 bellard
    for(;;) {
691 fd6ce8f6 bellard
        tb1 = *ptb;
692 fd6ce8f6 bellard
        if (tb1 == tb) {
693 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
694 fd6ce8f6 bellard
            break;
695 fd6ce8f6 bellard
        }
696 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
697 fd6ce8f6 bellard
    }
698 fd6ce8f6 bellard
}
699 fd6ce8f6 bellard
700 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
701 9fa3e853 bellard
{
702 9fa3e853 bellard
    TranslationBlock *tb1;
703 9fa3e853 bellard
    unsigned int n1;
704 9fa3e853 bellard
705 9fa3e853 bellard
    for(;;) {
706 9fa3e853 bellard
        tb1 = *ptb;
707 9fa3e853 bellard
        n1 = (long)tb1 & 3;
708 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
709 9fa3e853 bellard
        if (tb1 == tb) {
710 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
711 9fa3e853 bellard
            break;
712 9fa3e853 bellard
        }
713 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
714 9fa3e853 bellard
    }
715 9fa3e853 bellard
}
716 9fa3e853 bellard
717 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
718 d4e8164f bellard
{
719 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
720 d4e8164f bellard
    unsigned int n1;
721 d4e8164f bellard
722 d4e8164f bellard
    ptb = &tb->jmp_next[n];
723 d4e8164f bellard
    tb1 = *ptb;
724 d4e8164f bellard
    if (tb1) {
725 d4e8164f bellard
        /* find tb(n) in circular list */
726 d4e8164f bellard
        for(;;) {
727 d4e8164f bellard
            tb1 = *ptb;
728 d4e8164f bellard
            n1 = (long)tb1 & 3;
729 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
730 d4e8164f bellard
            if (n1 == n && tb1 == tb)
731 d4e8164f bellard
                break;
732 d4e8164f bellard
            if (n1 == 2) {
733 d4e8164f bellard
                ptb = &tb1->jmp_first;
734 d4e8164f bellard
            } else {
735 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
736 d4e8164f bellard
            }
737 d4e8164f bellard
        }
738 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
739 d4e8164f bellard
        *ptb = tb->jmp_next[n];
740 d4e8164f bellard
741 d4e8164f bellard
        tb->jmp_next[n] = NULL;
742 d4e8164f bellard
    }
743 d4e8164f bellard
}
744 d4e8164f bellard
745 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
746 d4e8164f bellard
   another TB */
747 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
748 d4e8164f bellard
{
749 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
750 d4e8164f bellard
}
751 d4e8164f bellard
752 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
753 fd6ce8f6 bellard
{
754 6a00d601 bellard
    CPUState *env;
755 8a40a180 bellard
    PageDesc *p;
756 d4e8164f bellard
    unsigned int h, n1;
757 00f82b8a aurel32
    target_phys_addr_t phys_pc;
758 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
759 3b46e624 ths
760 8a40a180 bellard
    /* remove the TB from the hash list */
761 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
762 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
763 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
764 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
765 8a40a180 bellard
766 8a40a180 bellard
    /* remove the TB from the page list */
767 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
768 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
769 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
770 8a40a180 bellard
        invalidate_page_bitmap(p);
771 8a40a180 bellard
    }
772 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
773 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
774 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
775 8a40a180 bellard
        invalidate_page_bitmap(p);
776 8a40a180 bellard
    }
777 8a40a180 bellard
778 36bdbe54 bellard
    tb_invalidated_flag = 1;
779 59817ccb bellard
780 fd6ce8f6 bellard
    /* remove the TB from the hash list */
781 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
782 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
783 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
784 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
785 6a00d601 bellard
    }
786 d4e8164f bellard
787 d4e8164f bellard
    /* suppress this TB from the two jump lists */
788 d4e8164f bellard
    tb_jmp_remove(tb, 0);
789 d4e8164f bellard
    tb_jmp_remove(tb, 1);
790 d4e8164f bellard
791 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
792 d4e8164f bellard
    tb1 = tb->jmp_first;
793 d4e8164f bellard
    for(;;) {
794 d4e8164f bellard
        n1 = (long)tb1 & 3;
795 d4e8164f bellard
        if (n1 == 2)
796 d4e8164f bellard
            break;
797 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
798 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
799 d4e8164f bellard
        tb_reset_jump(tb1, n1);
800 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
801 d4e8164f bellard
        tb1 = tb2;
802 d4e8164f bellard
    }
803 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
804 9fa3e853 bellard
805 e3db7226 bellard
    tb_phys_invalidate_count++;
806 9fa3e853 bellard
}
807 9fa3e853 bellard
808 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
809 9fa3e853 bellard
{
810 9fa3e853 bellard
    int end, mask, end1;
811 9fa3e853 bellard
812 9fa3e853 bellard
    end = start + len;
813 9fa3e853 bellard
    tab += start >> 3;
814 9fa3e853 bellard
    mask = 0xff << (start & 7);
815 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
816 9fa3e853 bellard
        if (start < end) {
817 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
818 9fa3e853 bellard
            *tab |= mask;
819 9fa3e853 bellard
        }
820 9fa3e853 bellard
    } else {
821 9fa3e853 bellard
        *tab++ |= mask;
822 9fa3e853 bellard
        start = (start + 8) & ~7;
823 9fa3e853 bellard
        end1 = end & ~7;
824 9fa3e853 bellard
        while (start < end1) {
825 9fa3e853 bellard
            *tab++ = 0xff;
826 9fa3e853 bellard
            start += 8;
827 9fa3e853 bellard
        }
828 9fa3e853 bellard
        if (start < end) {
829 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
830 9fa3e853 bellard
            *tab |= mask;
831 9fa3e853 bellard
        }
832 9fa3e853 bellard
    }
833 9fa3e853 bellard
}
834 9fa3e853 bellard
835 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
836 9fa3e853 bellard
{
837 9fa3e853 bellard
    int n, tb_start, tb_end;
838 9fa3e853 bellard
    TranslationBlock *tb;
839 3b46e624 ths
840 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
841 9fa3e853 bellard
842 9fa3e853 bellard
    tb = p->first_tb;
843 9fa3e853 bellard
    while (tb != NULL) {
844 9fa3e853 bellard
        n = (long)tb & 3;
845 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
846 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
847 9fa3e853 bellard
        if (n == 0) {
848 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
849 9fa3e853 bellard
               it is not a problem */
850 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
851 9fa3e853 bellard
            tb_end = tb_start + tb->size;
852 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
853 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
854 9fa3e853 bellard
        } else {
855 9fa3e853 bellard
            tb_start = 0;
856 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
857 9fa3e853 bellard
        }
858 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
859 9fa3e853 bellard
        tb = tb->page_next[n];
860 9fa3e853 bellard
    }
861 9fa3e853 bellard
}
862 9fa3e853 bellard
863 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
864 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
865 2e70f6ef pbrook
                              int flags, int cflags)
866 d720b93d bellard
{
867 d720b93d bellard
    TranslationBlock *tb;
868 d720b93d bellard
    uint8_t *tc_ptr;
869 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
870 d720b93d bellard
    int code_gen_size;
871 d720b93d bellard
872 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
873 c27004ec bellard
    tb = tb_alloc(pc);
874 d720b93d bellard
    if (!tb) {
875 d720b93d bellard
        /* flush must be done */
876 d720b93d bellard
        tb_flush(env);
877 d720b93d bellard
        /* cannot fail at this point */
878 c27004ec bellard
        tb = tb_alloc(pc);
879 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
880 2e70f6ef pbrook
        tb_invalidated_flag = 1;
881 d720b93d bellard
    }
882 d720b93d bellard
    tc_ptr = code_gen_ptr;
883 d720b93d bellard
    tb->tc_ptr = tc_ptr;
884 d720b93d bellard
    tb->cs_base = cs_base;
885 d720b93d bellard
    tb->flags = flags;
886 d720b93d bellard
    tb->cflags = cflags;
887 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
888 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
889 3b46e624 ths
890 d720b93d bellard
    /* check next page if needed */
891 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
892 d720b93d bellard
    phys_page2 = -1;
893 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
894 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
895 d720b93d bellard
    }
896 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
897 2e70f6ef pbrook
    return tb;
898 d720b93d bellard
}
899 3b46e624 ths
900 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
901 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
902 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
903 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
904 d720b93d bellard
   TB if code is modified inside this TB. */
905 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
906 d720b93d bellard
                                   int is_cpu_write_access)
907 d720b93d bellard
{
908 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
909 d720b93d bellard
    CPUState *env = cpu_single_env;
910 9fa3e853 bellard
    target_ulong tb_start, tb_end;
911 6b917547 aliguori
    PageDesc *p;
912 6b917547 aliguori
    int n;
913 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
914 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
915 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
916 6b917547 aliguori
    int current_tb_modified = 0;
917 6b917547 aliguori
    target_ulong current_pc = 0;
918 6b917547 aliguori
    target_ulong current_cs_base = 0;
919 6b917547 aliguori
    int current_flags = 0;
920 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
921 9fa3e853 bellard
922 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
923 5fafdf24 ths
    if (!p)
924 9fa3e853 bellard
        return;
925 5fafdf24 ths
    if (!p->code_bitmap &&
926 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
927 d720b93d bellard
        is_cpu_write_access) {
928 9fa3e853 bellard
        /* build code bitmap */
929 9fa3e853 bellard
        build_page_bitmap(p);
930 9fa3e853 bellard
    }
931 9fa3e853 bellard
932 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
933 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
934 9fa3e853 bellard
    tb = p->first_tb;
935 9fa3e853 bellard
    while (tb != NULL) {
936 9fa3e853 bellard
        n = (long)tb & 3;
937 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
938 9fa3e853 bellard
        tb_next = tb->page_next[n];
939 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
940 9fa3e853 bellard
        if (n == 0) {
941 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
942 9fa3e853 bellard
               it is not a problem */
943 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
944 9fa3e853 bellard
            tb_end = tb_start + tb->size;
945 9fa3e853 bellard
        } else {
946 9fa3e853 bellard
            tb_start = tb->page_addr[1];
947 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
948 9fa3e853 bellard
        }
949 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
950 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
951 d720b93d bellard
            if (current_tb_not_found) {
952 d720b93d bellard
                current_tb_not_found = 0;
953 d720b93d bellard
                current_tb = NULL;
954 2e70f6ef pbrook
                if (env->mem_io_pc) {
955 d720b93d bellard
                    /* now we have a real cpu fault */
956 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
957 d720b93d bellard
                }
958 d720b93d bellard
            }
959 d720b93d bellard
            if (current_tb == tb &&
960 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
961 d720b93d bellard
                /* If we are modifying the current TB, we must stop
962 d720b93d bellard
                its execution. We could be more precise by checking
963 d720b93d bellard
                that the modification is after the current PC, but it
964 d720b93d bellard
                would require a specialized function to partially
965 d720b93d bellard
                restore the CPU state */
966 3b46e624 ths
967 d720b93d bellard
                current_tb_modified = 1;
968 5fafdf24 ths
                cpu_restore_state(current_tb, env,
969 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
970 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
971 6b917547 aliguori
                                     &current_flags);
972 d720b93d bellard
            }
973 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
974 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
975 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
976 6f5a9f7e bellard
            saved_tb = NULL;
977 6f5a9f7e bellard
            if (env) {
978 6f5a9f7e bellard
                saved_tb = env->current_tb;
979 6f5a9f7e bellard
                env->current_tb = NULL;
980 6f5a9f7e bellard
            }
981 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
982 6f5a9f7e bellard
            if (env) {
983 6f5a9f7e bellard
                env->current_tb = saved_tb;
984 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
985 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
986 6f5a9f7e bellard
            }
987 9fa3e853 bellard
        }
988 9fa3e853 bellard
        tb = tb_next;
989 9fa3e853 bellard
    }
990 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
991 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
992 9fa3e853 bellard
    if (!p->first_tb) {
993 9fa3e853 bellard
        invalidate_page_bitmap(p);
994 d720b93d bellard
        if (is_cpu_write_access) {
995 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
996 d720b93d bellard
        }
997 d720b93d bellard
    }
998 d720b93d bellard
#endif
999 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1000 d720b93d bellard
    if (current_tb_modified) {
1001 d720b93d bellard
        /* we generate a block containing just the instruction
1002 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1003 d720b93d bellard
           itself */
1004 ea1c1802 bellard
        env->current_tb = NULL;
1005 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1006 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1007 9fa3e853 bellard
    }
1008 fd6ce8f6 bellard
#endif
1009 9fa3e853 bellard
}
1010 fd6ce8f6 bellard
1011 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1012 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1013 9fa3e853 bellard
{
1014 9fa3e853 bellard
    PageDesc *p;
1015 9fa3e853 bellard
    int offset, b;
1016 59817ccb bellard
#if 0
1017 a4193c8a bellard
    if (1) {
1018 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1019 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1020 93fcfe39 aliguori
                  cpu_single_env->eip,
1021 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1022 59817ccb bellard
    }
1023 59817ccb bellard
#endif
1024 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1025 5fafdf24 ths
    if (!p)
1026 9fa3e853 bellard
        return;
1027 9fa3e853 bellard
    if (p->code_bitmap) {
1028 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1029 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1030 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1031 9fa3e853 bellard
            goto do_invalidate;
1032 9fa3e853 bellard
    } else {
1033 9fa3e853 bellard
    do_invalidate:
1034 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1035 9fa3e853 bellard
    }
1036 9fa3e853 bellard
}
1037 9fa3e853 bellard
1038 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1039 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1040 d720b93d bellard
                                    unsigned long pc, void *puc)
1041 9fa3e853 bellard
{
1042 6b917547 aliguori
    TranslationBlock *tb;
1043 9fa3e853 bellard
    PageDesc *p;
1044 6b917547 aliguori
    int n;
1045 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1046 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1047 d720b93d bellard
    CPUState *env = cpu_single_env;
1048 6b917547 aliguori
    int current_tb_modified = 0;
1049 6b917547 aliguori
    target_ulong current_pc = 0;
1050 6b917547 aliguori
    target_ulong current_cs_base = 0;
1051 6b917547 aliguori
    int current_flags = 0;
1052 d720b93d bellard
#endif
1053 9fa3e853 bellard
1054 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1055 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1056 5fafdf24 ths
    if (!p)
1057 9fa3e853 bellard
        return;
1058 9fa3e853 bellard
    tb = p->first_tb;
1059 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1060 d720b93d bellard
    if (tb && pc != 0) {
1061 d720b93d bellard
        current_tb = tb_find_pc(pc);
1062 d720b93d bellard
    }
1063 d720b93d bellard
#endif
1064 9fa3e853 bellard
    while (tb != NULL) {
1065 9fa3e853 bellard
        n = (long)tb & 3;
1066 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1067 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1068 d720b93d bellard
        if (current_tb == tb &&
1069 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1070 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1071 d720b93d bellard
                   its execution. We could be more precise by checking
1072 d720b93d bellard
                   that the modification is after the current PC, but it
1073 d720b93d bellard
                   would require a specialized function to partially
1074 d720b93d bellard
                   restore the CPU state */
1075 3b46e624 ths
1076 d720b93d bellard
            current_tb_modified = 1;
1077 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1078 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1079 6b917547 aliguori
                                 &current_flags);
1080 d720b93d bellard
        }
1081 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1082 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1083 9fa3e853 bellard
        tb = tb->page_next[n];
1084 9fa3e853 bellard
    }
1085 fd6ce8f6 bellard
    p->first_tb = NULL;
1086 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1087 d720b93d bellard
    if (current_tb_modified) {
1088 d720b93d bellard
        /* we generate a block containing just the instruction
1089 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1090 d720b93d bellard
           itself */
1091 ea1c1802 bellard
        env->current_tb = NULL;
1092 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1093 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1094 d720b93d bellard
    }
1095 d720b93d bellard
#endif
1096 fd6ce8f6 bellard
}
1097 9fa3e853 bellard
#endif
1098 fd6ce8f6 bellard
1099 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1100 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1101 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1102 fd6ce8f6 bellard
{
1103 fd6ce8f6 bellard
    PageDesc *p;
1104 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1105 9fa3e853 bellard
1106 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1107 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1108 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1109 9fa3e853 bellard
    last_first_tb = p->first_tb;
1110 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1111 9fa3e853 bellard
    invalidate_page_bitmap(p);
1112 fd6ce8f6 bellard
1113 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1114 d720b93d bellard
1115 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1116 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1117 53a5960a pbrook
        target_ulong addr;
1118 53a5960a pbrook
        PageDesc *p2;
1119 9fa3e853 bellard
        int prot;
1120 9fa3e853 bellard
1121 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1122 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1123 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1124 fd6ce8f6 bellard
        prot = 0;
1125 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1126 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1127 53a5960a pbrook
1128 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1129 53a5960a pbrook
            if (!p2)
1130 53a5960a pbrook
                continue;
1131 53a5960a pbrook
            prot |= p2->flags;
1132 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1133 53a5960a pbrook
            page_get_flags(addr);
1134 53a5960a pbrook
          }
1135 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1136 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1137 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1138 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1139 53a5960a pbrook
               page_addr);
1140 fd6ce8f6 bellard
#endif
1141 fd6ce8f6 bellard
    }
1142 9fa3e853 bellard
#else
1143 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1144 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1145 9fa3e853 bellard
       allocated in a physical page */
1146 9fa3e853 bellard
    if (!last_first_tb) {
1147 6a00d601 bellard
        tlb_protect_code(page_addr);
1148 9fa3e853 bellard
    }
1149 9fa3e853 bellard
#endif
1150 d720b93d bellard
1151 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1152 fd6ce8f6 bellard
}
1153 fd6ce8f6 bellard
1154 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1155 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1156 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1157 fd6ce8f6 bellard
{
1158 fd6ce8f6 bellard
    TranslationBlock *tb;
1159 fd6ce8f6 bellard
1160 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1161 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1162 d4e8164f bellard
        return NULL;
1163 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1164 fd6ce8f6 bellard
    tb->pc = pc;
1165 b448f2f3 bellard
    tb->cflags = 0;
1166 d4e8164f bellard
    return tb;
1167 d4e8164f bellard
}
1168 d4e8164f bellard
1169 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1170 2e70f6ef pbrook
{
1171 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1172 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1173 2e70f6ef pbrook
       be the last one generated.  */
1174 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1175 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1176 2e70f6ef pbrook
        nb_tbs--;
1177 2e70f6ef pbrook
    }
1178 2e70f6ef pbrook
}
1179 2e70f6ef pbrook
1180 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1181 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1182 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1183 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1184 d4e8164f bellard
{
1185 9fa3e853 bellard
    unsigned int h;
1186 9fa3e853 bellard
    TranslationBlock **ptb;
1187 9fa3e853 bellard
1188 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1189 c8a706fe pbrook
       before we are done.  */
1190 c8a706fe pbrook
    mmap_lock();
1191 9fa3e853 bellard
    /* add in the physical hash table */
1192 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1193 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1194 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1195 9fa3e853 bellard
    *ptb = tb;
1196 fd6ce8f6 bellard
1197 fd6ce8f6 bellard
    /* add in the page list */
1198 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1199 9fa3e853 bellard
    if (phys_page2 != -1)
1200 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1201 9fa3e853 bellard
    else
1202 9fa3e853 bellard
        tb->page_addr[1] = -1;
1203 9fa3e853 bellard
1204 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1205 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1206 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1207 d4e8164f bellard
1208 d4e8164f bellard
    /* init original jump addresses */
1209 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1210 d4e8164f bellard
        tb_reset_jump(tb, 0);
1211 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1212 d4e8164f bellard
        tb_reset_jump(tb, 1);
1213 8a40a180 bellard
1214 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1215 8a40a180 bellard
    tb_page_check();
1216 8a40a180 bellard
#endif
1217 c8a706fe pbrook
    mmap_unlock();
1218 fd6ce8f6 bellard
}
1219 fd6ce8f6 bellard
1220 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1221 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1222 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1223 fd6ce8f6 bellard
{
1224 9fa3e853 bellard
    int m_min, m_max, m;
1225 9fa3e853 bellard
    unsigned long v;
1226 9fa3e853 bellard
    TranslationBlock *tb;
1227 a513fe19 bellard
1228 a513fe19 bellard
    if (nb_tbs <= 0)
1229 a513fe19 bellard
        return NULL;
1230 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1231 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1232 a513fe19 bellard
        return NULL;
1233 a513fe19 bellard
    /* binary search (cf Knuth) */
1234 a513fe19 bellard
    m_min = 0;
1235 a513fe19 bellard
    m_max = nb_tbs - 1;
1236 a513fe19 bellard
    while (m_min <= m_max) {
1237 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1238 a513fe19 bellard
        tb = &tbs[m];
1239 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1240 a513fe19 bellard
        if (v == tc_ptr)
1241 a513fe19 bellard
            return tb;
1242 a513fe19 bellard
        else if (tc_ptr < v) {
1243 a513fe19 bellard
            m_max = m - 1;
1244 a513fe19 bellard
        } else {
1245 a513fe19 bellard
            m_min = m + 1;
1246 a513fe19 bellard
        }
1247 5fafdf24 ths
    }
1248 a513fe19 bellard
    return &tbs[m_max];
1249 a513fe19 bellard
}
1250 7501267e bellard
1251 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1252 ea041c0e bellard
1253 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1254 ea041c0e bellard
{
1255 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1256 ea041c0e bellard
    unsigned int n1;
1257 ea041c0e bellard
1258 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1259 ea041c0e bellard
    if (tb1 != NULL) {
1260 ea041c0e bellard
        /* find head of list */
1261 ea041c0e bellard
        for(;;) {
1262 ea041c0e bellard
            n1 = (long)tb1 & 3;
1263 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1264 ea041c0e bellard
            if (n1 == 2)
1265 ea041c0e bellard
                break;
1266 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1267 ea041c0e bellard
        }
1268 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1269 ea041c0e bellard
        tb_next = tb1;
1270 ea041c0e bellard
1271 ea041c0e bellard
        /* remove tb from the jmp_first list */
1272 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1273 ea041c0e bellard
        for(;;) {
1274 ea041c0e bellard
            tb1 = *ptb;
1275 ea041c0e bellard
            n1 = (long)tb1 & 3;
1276 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1277 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1278 ea041c0e bellard
                break;
1279 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1280 ea041c0e bellard
        }
1281 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1282 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1283 3b46e624 ths
1284 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1285 ea041c0e bellard
        tb_reset_jump(tb, n);
1286 ea041c0e bellard
1287 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1288 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1289 ea041c0e bellard
    }
1290 ea041c0e bellard
}
1291 ea041c0e bellard
1292 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1293 ea041c0e bellard
{
1294 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1295 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1296 ea041c0e bellard
}
1297 ea041c0e bellard
1298 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1299 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300 d720b93d bellard
{
1301 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1302 9b3c35e0 j_mayer
    target_ulong pd;
1303 c2f07f81 pbrook
    ram_addr_t ram_addr;
1304 c2f07f81 pbrook
    PhysPageDesc *p;
1305 d720b93d bellard
1306 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1307 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308 c2f07f81 pbrook
    if (!p) {
1309 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1310 c2f07f81 pbrook
    } else {
1311 c2f07f81 pbrook
        pd = p->phys_offset;
1312 c2f07f81 pbrook
    }
1313 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1314 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1315 d720b93d bellard
}
1316 c27004ec bellard
#endif
1317 d720b93d bellard
1318 6658ffb8 pbrook
/* Add a watchpoint.  */
1319 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1320 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1321 6658ffb8 pbrook
{
1322 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1323 c0ce998e aliguori
    CPUWatchpoint *wp;
1324 6658ffb8 pbrook
1325 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1326 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1327 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1328 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1329 b4051334 aliguori
        return -EINVAL;
1330 b4051334 aliguori
    }
1331 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1332 a1d1bb31 aliguori
1333 a1d1bb31 aliguori
    wp->vaddr = addr;
1334 b4051334 aliguori
    wp->len_mask = len_mask;
1335 a1d1bb31 aliguori
    wp->flags = flags;
1336 a1d1bb31 aliguori
1337 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1338 c0ce998e aliguori
    if (flags & BP_GDB)
1339 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1340 c0ce998e aliguori
    else
1341 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1342 6658ffb8 pbrook
1343 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1344 a1d1bb31 aliguori
1345 a1d1bb31 aliguori
    if (watchpoint)
1346 a1d1bb31 aliguori
        *watchpoint = wp;
1347 a1d1bb31 aliguori
    return 0;
1348 6658ffb8 pbrook
}
1349 6658ffb8 pbrook
1350 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1351 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1352 a1d1bb31 aliguori
                          int flags)
1353 6658ffb8 pbrook
{
1354 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1355 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1356 6658ffb8 pbrook
1357 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1358 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1359 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1360 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1361 6658ffb8 pbrook
            return 0;
1362 6658ffb8 pbrook
        }
1363 6658ffb8 pbrook
    }
1364 a1d1bb31 aliguori
    return -ENOENT;
1365 6658ffb8 pbrook
}
1366 6658ffb8 pbrook
1367 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1368 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1369 a1d1bb31 aliguori
{
1370 c0ce998e aliguori
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1371 7d03f82f edgar_igl
1372 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1373 a1d1bb31 aliguori
1374 a1d1bb31 aliguori
    qemu_free(watchpoint);
1375 a1d1bb31 aliguori
}
1376 a1d1bb31 aliguori
1377 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1378 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1379 a1d1bb31 aliguori
{
1380 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1381 a1d1bb31 aliguori
1382 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1383 a1d1bb31 aliguori
        if (wp->flags & mask)
1384 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1385 c0ce998e aliguori
    }
1386 7d03f82f edgar_igl
}
1387 7d03f82f edgar_igl
1388 a1d1bb31 aliguori
/* Add a breakpoint.  */
1389 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1390 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1391 4c3a88a2 bellard
{
1392 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1393 c0ce998e aliguori
    CPUBreakpoint *bp;
1394 3b46e624 ths
1395 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1396 4c3a88a2 bellard
1397 a1d1bb31 aliguori
    bp->pc = pc;
1398 a1d1bb31 aliguori
    bp->flags = flags;
1399 a1d1bb31 aliguori
1400 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1401 c0ce998e aliguori
    if (flags & BP_GDB)
1402 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1403 c0ce998e aliguori
    else
1404 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1405 3b46e624 ths
1406 d720b93d bellard
    breakpoint_invalidate(env, pc);
1407 a1d1bb31 aliguori
1408 a1d1bb31 aliguori
    if (breakpoint)
1409 a1d1bb31 aliguori
        *breakpoint = bp;
1410 4c3a88a2 bellard
    return 0;
1411 4c3a88a2 bellard
#else
1412 a1d1bb31 aliguori
    return -ENOSYS;
1413 4c3a88a2 bellard
#endif
1414 4c3a88a2 bellard
}
1415 4c3a88a2 bellard
1416 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1417 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1418 a1d1bb31 aliguori
{
1419 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1420 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1421 a1d1bb31 aliguori
1422 c0ce998e aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1423 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1424 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1425 a1d1bb31 aliguori
            return 0;
1426 a1d1bb31 aliguori
        }
1427 7d03f82f edgar_igl
    }
1428 a1d1bb31 aliguori
    return -ENOENT;
1429 a1d1bb31 aliguori
#else
1430 a1d1bb31 aliguori
    return -ENOSYS;
1431 7d03f82f edgar_igl
#endif
1432 7d03f82f edgar_igl
}
1433 7d03f82f edgar_igl
1434 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1435 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1436 4c3a88a2 bellard
{
1437 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1438 c0ce998e aliguori
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1439 d720b93d bellard
1440 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1441 a1d1bb31 aliguori
1442 a1d1bb31 aliguori
    qemu_free(breakpoint);
1443 a1d1bb31 aliguori
#endif
1444 a1d1bb31 aliguori
}
1445 a1d1bb31 aliguori
1446 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1447 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1448 a1d1bb31 aliguori
{
1449 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1450 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1451 a1d1bb31 aliguori
1452 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1453 a1d1bb31 aliguori
        if (bp->flags & mask)
1454 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1455 c0ce998e aliguori
    }
1456 4c3a88a2 bellard
#endif
1457 4c3a88a2 bellard
}
1458 4c3a88a2 bellard
1459 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1460 c33a346e bellard
   CPU loop after each instruction */
1461 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1462 c33a346e bellard
{
1463 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1464 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1465 c33a346e bellard
        env->singlestep_enabled = enabled;
1466 e22a25c9 aliguori
        if (kvm_enabled())
1467 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1468 e22a25c9 aliguori
        else {
1469 e22a25c9 aliguori
            /* must flush all the translated code to avoid inconsistancies */
1470 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1471 e22a25c9 aliguori
            tb_flush(env);
1472 e22a25c9 aliguori
        }
1473 c33a346e bellard
    }
1474 c33a346e bellard
#endif
1475 c33a346e bellard
}
1476 c33a346e bellard
1477 34865134 bellard
/* enable or disable low levels log */
1478 34865134 bellard
void cpu_set_log(int log_flags)
1479 34865134 bellard
{
1480 34865134 bellard
    loglevel = log_flags;
1481 34865134 bellard
    if (loglevel && !logfile) {
1482 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1483 34865134 bellard
        if (!logfile) {
1484 34865134 bellard
            perror(logfilename);
1485 34865134 bellard
            _exit(1);
1486 34865134 bellard
        }
1487 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1488 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1489 9fa3e853 bellard
        {
1490 b55266b5 blueswir1
            static char logfile_buf[4096];
1491 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1492 9fa3e853 bellard
        }
1493 9fa3e853 bellard
#else
1494 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1495 9fa3e853 bellard
#endif
1496 e735b91c pbrook
        log_append = 1;
1497 e735b91c pbrook
    }
1498 e735b91c pbrook
    if (!loglevel && logfile) {
1499 e735b91c pbrook
        fclose(logfile);
1500 e735b91c pbrook
        logfile = NULL;
1501 34865134 bellard
    }
1502 34865134 bellard
}
1503 34865134 bellard
1504 34865134 bellard
void cpu_set_log_filename(const char *filename)
1505 34865134 bellard
{
1506 34865134 bellard
    logfilename = strdup(filename);
1507 e735b91c pbrook
    if (logfile) {
1508 e735b91c pbrook
        fclose(logfile);
1509 e735b91c pbrook
        logfile = NULL;
1510 e735b91c pbrook
    }
1511 e735b91c pbrook
    cpu_set_log(loglevel);
1512 34865134 bellard
}
1513 c33a346e bellard
1514 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1515 ea041c0e bellard
{
1516 3098dba0 aurel32
#if defined(USE_NPTL)
1517 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1518 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1519 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1520 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1521 3098dba0 aurel32
#else
1522 ea041c0e bellard
    TranslationBlock *tb;
1523 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1524 59817ccb bellard
1525 3098dba0 aurel32
    tb = env->current_tb;
1526 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1527 3098dba0 aurel32
       all the potentially executing TB */
1528 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1529 3098dba0 aurel32
        env->current_tb = NULL;
1530 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1531 3098dba0 aurel32
        resetlock(&interrupt_lock);
1532 be214e6c aurel32
    }
1533 3098dba0 aurel32
#endif
1534 3098dba0 aurel32
}
1535 3098dba0 aurel32
1536 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1537 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1538 3098dba0 aurel32
{
1539 3098dba0 aurel32
    int old_mask;
1540 be214e6c aurel32
1541 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1542 68a79315 bellard
    env->interrupt_request |= mask;
1543 3098dba0 aurel32
1544 2e70f6ef pbrook
    if (use_icount) {
1545 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1546 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1547 2e70f6ef pbrook
        if (!can_do_io(env)
1548 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1549 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1550 2e70f6ef pbrook
        }
1551 2e70f6ef pbrook
#endif
1552 2e70f6ef pbrook
    } else {
1553 3098dba0 aurel32
        cpu_unlink_tb(env);
1554 ea041c0e bellard
    }
1555 ea041c0e bellard
}
1556 ea041c0e bellard
1557 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1558 b54ad049 bellard
{
1559 b54ad049 bellard
    env->interrupt_request &= ~mask;
1560 b54ad049 bellard
}
1561 b54ad049 bellard
1562 3098dba0 aurel32
void cpu_exit(CPUState *env)
1563 3098dba0 aurel32
{
1564 3098dba0 aurel32
    env->exit_request = 1;
1565 3098dba0 aurel32
    cpu_unlink_tb(env);
1566 3098dba0 aurel32
}
1567 3098dba0 aurel32
1568 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1569 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1570 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1571 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1572 f193c797 bellard
      "show target assembly code for each compiled TB" },
1573 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1574 57fec1fe bellard
      "show micro ops for each compiled TB" },
1575 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1576 e01a1157 blueswir1
      "show micro ops "
1577 e01a1157 blueswir1
#ifdef TARGET_I386
1578 e01a1157 blueswir1
      "before eflags optimization and "
1579 f193c797 bellard
#endif
1580 e01a1157 blueswir1
      "after liveness analysis" },
1581 f193c797 bellard
    { CPU_LOG_INT, "int",
1582 f193c797 bellard
      "show interrupts/exceptions in short format" },
1583 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1584 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1585 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1586 e91c8a77 ths
      "show CPU state before block translation" },
1587 f193c797 bellard
#ifdef TARGET_I386
1588 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1589 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1590 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1591 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1592 f193c797 bellard
#endif
1593 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1594 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1595 fd872598 bellard
      "show all i/o ports accesses" },
1596 8e3a9fd2 bellard
#endif
1597 f193c797 bellard
    { 0, NULL, NULL },
1598 f193c797 bellard
};
1599 f193c797 bellard
1600 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1601 f193c797 bellard
{
1602 f193c797 bellard
    if (strlen(s2) != n)
1603 f193c797 bellard
        return 0;
1604 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1605 f193c797 bellard
}
1606 3b46e624 ths
1607 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1608 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1609 f193c797 bellard
{
1610 c7cd6a37 blueswir1
    const CPULogItem *item;
1611 f193c797 bellard
    int mask;
1612 f193c797 bellard
    const char *p, *p1;
1613 f193c797 bellard
1614 f193c797 bellard
    p = str;
1615 f193c797 bellard
    mask = 0;
1616 f193c797 bellard
    for(;;) {
1617 f193c797 bellard
        p1 = strchr(p, ',');
1618 f193c797 bellard
        if (!p1)
1619 f193c797 bellard
            p1 = p + strlen(p);
1620 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1621 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1622 8e3a9fd2 bellard
                        mask |= item->mask;
1623 8e3a9fd2 bellard
                }
1624 8e3a9fd2 bellard
        } else {
1625 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1626 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1627 f193c797 bellard
                goto found;
1628 f193c797 bellard
        }
1629 f193c797 bellard
        return 0;
1630 8e3a9fd2 bellard
        }
1631 f193c797 bellard
    found:
1632 f193c797 bellard
        mask |= item->mask;
1633 f193c797 bellard
        if (*p1 != ',')
1634 f193c797 bellard
            break;
1635 f193c797 bellard
        p = p1 + 1;
1636 f193c797 bellard
    }
1637 f193c797 bellard
    return mask;
1638 f193c797 bellard
}
1639 ea041c0e bellard
1640 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1641 7501267e bellard
{
1642 7501267e bellard
    va_list ap;
1643 493ae1f0 pbrook
    va_list ap2;
1644 7501267e bellard
1645 7501267e bellard
    va_start(ap, fmt);
1646 493ae1f0 pbrook
    va_copy(ap2, ap);
1647 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1648 7501267e bellard
    vfprintf(stderr, fmt, ap);
1649 7501267e bellard
    fprintf(stderr, "\n");
1650 7501267e bellard
#ifdef TARGET_I386
1651 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1652 7fe48483 bellard
#else
1653 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1654 7501267e bellard
#endif
1655 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1656 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1657 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1658 93fcfe39 aliguori
        qemu_log("\n");
1659 f9373291 j_mayer
#ifdef TARGET_I386
1660 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1661 f9373291 j_mayer
#else
1662 93fcfe39 aliguori
        log_cpu_state(env, 0);
1663 f9373291 j_mayer
#endif
1664 31b1a7b4 aliguori
        qemu_log_flush();
1665 93fcfe39 aliguori
        qemu_log_close();
1666 924edcae balrog
    }
1667 493ae1f0 pbrook
    va_end(ap2);
1668 f9373291 j_mayer
    va_end(ap);
1669 7501267e bellard
    abort();
1670 7501267e bellard
}
1671 7501267e bellard
1672 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1673 c5be9f08 ths
{
1674 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1675 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1676 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1677 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1678 5a38f081 aliguori
    CPUBreakpoint *bp;
1679 5a38f081 aliguori
    CPUWatchpoint *wp;
1680 5a38f081 aliguori
#endif
1681 5a38f081 aliguori
1682 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1683 5a38f081 aliguori
1684 5a38f081 aliguori
    /* Preserve chaining and index. */
1685 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1686 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1687 5a38f081 aliguori
1688 5a38f081 aliguori
    /* Clone all break/watchpoints.
1689 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1690 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1691 5a38f081 aliguori
    TAILQ_INIT(&env->breakpoints);
1692 5a38f081 aliguori
    TAILQ_INIT(&env->watchpoints);
1693 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1694 5a38f081 aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1695 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1696 5a38f081 aliguori
    }
1697 5a38f081 aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1698 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1699 5a38f081 aliguori
                              wp->flags, NULL);
1700 5a38f081 aliguori
    }
1701 5a38f081 aliguori
#endif
1702 5a38f081 aliguori
1703 c5be9f08 ths
    return new_env;
1704 c5be9f08 ths
}
1705 c5be9f08 ths
1706 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1707 0124311e bellard
1708 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1709 5c751e99 edgar_igl
{
1710 5c751e99 edgar_igl
    unsigned int i;
1711 5c751e99 edgar_igl
1712 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1713 5c751e99 edgar_igl
       overlap the flushed page.  */
1714 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1715 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1716 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1717 5c751e99 edgar_igl
1718 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1719 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1720 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1721 5c751e99 edgar_igl
}
1722 5c751e99 edgar_igl
1723 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1724 ee8b7021 bellard
   implemented yet) */
1725 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1726 33417e70 bellard
{
1727 33417e70 bellard
    int i;
1728 0124311e bellard
1729 9fa3e853 bellard
#if defined(DEBUG_TLB)
1730 9fa3e853 bellard
    printf("tlb_flush:\n");
1731 9fa3e853 bellard
#endif
1732 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1733 0124311e bellard
       links while we are modifying them */
1734 0124311e bellard
    env->current_tb = NULL;
1735 0124311e bellard
1736 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1737 84b7b8e7 bellard
        env->tlb_table[0][i].addr_read = -1;
1738 84b7b8e7 bellard
        env->tlb_table[0][i].addr_write = -1;
1739 84b7b8e7 bellard
        env->tlb_table[0][i].addr_code = -1;
1740 84b7b8e7 bellard
        env->tlb_table[1][i].addr_read = -1;
1741 84b7b8e7 bellard
        env->tlb_table[1][i].addr_write = -1;
1742 84b7b8e7 bellard
        env->tlb_table[1][i].addr_code = -1;
1743 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1744 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_read = -1;
1745 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_write = -1;
1746 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_code = -1;
1747 e37e6ee6 aurel32
#endif
1748 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1749 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_read = -1;
1750 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_write = -1;
1751 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_code = -1;
1752 6fa4cea9 j_mayer
#endif
1753 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1754 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_read = -1;
1755 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_write = -1;
1756 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_code = -1;
1757 6fa4cea9 j_mayer
#endif
1758 e37e6ee6 aurel32
1759 33417e70 bellard
    }
1760 9fa3e853 bellard
1761 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1762 9fa3e853 bellard
1763 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1764 0a962c02 bellard
    if (env->kqemu_enabled) {
1765 0a962c02 bellard
        kqemu_flush(env, flush_global);
1766 0a962c02 bellard
    }
1767 0a962c02 bellard
#endif
1768 e3db7226 bellard
    tlb_flush_count++;
1769 33417e70 bellard
}
1770 33417e70 bellard
1771 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1772 61382a50 bellard
{
1773 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1774 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1775 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1776 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1777 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1778 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1779 84b7b8e7 bellard
        tlb_entry->addr_read = -1;
1780 84b7b8e7 bellard
        tlb_entry->addr_write = -1;
1781 84b7b8e7 bellard
        tlb_entry->addr_code = -1;
1782 84b7b8e7 bellard
    }
1783 61382a50 bellard
}
1784 61382a50 bellard
1785 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1786 33417e70 bellard
{
1787 8a40a180 bellard
    int i;
1788 0124311e bellard
1789 9fa3e853 bellard
#if defined(DEBUG_TLB)
1790 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1791 9fa3e853 bellard
#endif
1792 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1793 0124311e bellard
       links while we are modifying them */
1794 0124311e bellard
    env->current_tb = NULL;
1795 61382a50 bellard
1796 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1797 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1798 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1799 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1800 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1801 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1802 e37e6ee6 aurel32
#endif
1803 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1804 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1805 6fa4cea9 j_mayer
#endif
1806 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1807 e37e6ee6 aurel32
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1808 6fa4cea9 j_mayer
#endif
1809 0124311e bellard
1810 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1811 9fa3e853 bellard
1812 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1813 0a962c02 bellard
    if (env->kqemu_enabled) {
1814 0a962c02 bellard
        kqemu_flush_page(env, addr);
1815 0a962c02 bellard
    }
1816 0a962c02 bellard
#endif
1817 9fa3e853 bellard
}
1818 9fa3e853 bellard
1819 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1820 9fa3e853 bellard
   can be detected */
1821 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1822 9fa3e853 bellard
{
1823 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1824 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1825 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1826 9fa3e853 bellard
}
1827 9fa3e853 bellard
1828 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1829 3a7d929e bellard
   tested for self modifying code */
1830 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1831 3a7d929e bellard
                                    target_ulong vaddr)
1832 9fa3e853 bellard
{
1833 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1834 1ccde1cb bellard
}
1835 1ccde1cb bellard
1836 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1837 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1838 1ccde1cb bellard
{
1839 1ccde1cb bellard
    unsigned long addr;
1840 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1841 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1842 1ccde1cb bellard
        if ((addr - start) < length) {
1843 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1844 1ccde1cb bellard
        }
1845 1ccde1cb bellard
    }
1846 1ccde1cb bellard
}
1847 1ccde1cb bellard
1848 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1849 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1850 0a962c02 bellard
                                     int dirty_flags)
1851 1ccde1cb bellard
{
1852 1ccde1cb bellard
    CPUState *env;
1853 4f2ac237 bellard
    unsigned long length, start1;
1854 0a962c02 bellard
    int i, mask, len;
1855 0a962c02 bellard
    uint8_t *p;
1856 1ccde1cb bellard
1857 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1858 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1859 1ccde1cb bellard
1860 1ccde1cb bellard
    length = end - start;
1861 1ccde1cb bellard
    if (length == 0)
1862 1ccde1cb bellard
        return;
1863 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1864 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
1865 6a00d601 bellard
    /* XXX: should not depend on cpu context */
1866 6a00d601 bellard
    env = first_cpu;
1867 3a7d929e bellard
    if (env->kqemu_enabled) {
1868 f23db169 bellard
        ram_addr_t addr;
1869 f23db169 bellard
        addr = start;
1870 f23db169 bellard
        for(i = 0; i < len; i++) {
1871 f23db169 bellard
            kqemu_set_notdirty(env, addr);
1872 f23db169 bellard
            addr += TARGET_PAGE_SIZE;
1873 f23db169 bellard
        }
1874 3a7d929e bellard
    }
1875 3a7d929e bellard
#endif
1876 f23db169 bellard
    mask = ~dirty_flags;
1877 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1878 f23db169 bellard
    for(i = 0; i < len; i++)
1879 f23db169 bellard
        p[i] &= mask;
1880 f23db169 bellard
1881 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1882 1ccde1cb bellard
       when accessing the range */
1883 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1884 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1885 5579c7f3 pbrook
       address comparisons below.  */
1886 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1887 5579c7f3 pbrook
            != (end - 1) - start) {
1888 5579c7f3 pbrook
        abort();
1889 5579c7f3 pbrook
    }
1890 5579c7f3 pbrook
1891 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1892 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1893 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1894 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1895 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1896 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1897 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1898 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1899 e37e6ee6 aurel32
#endif
1900 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1901 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1902 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1903 6fa4cea9 j_mayer
#endif
1904 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1905 e37e6ee6 aurel32
        for(i = 0; i < CPU_TLB_SIZE; i++)
1906 e37e6ee6 aurel32
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1907 6fa4cea9 j_mayer
#endif
1908 6a00d601 bellard
    }
1909 1ccde1cb bellard
}
1910 1ccde1cb bellard
1911 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1912 74576198 aliguori
{
1913 74576198 aliguori
    in_migration = enable;
1914 74576198 aliguori
    return 0;
1915 74576198 aliguori
}
1916 74576198 aliguori
1917 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1918 74576198 aliguori
{
1919 74576198 aliguori
    return in_migration;
1920 74576198 aliguori
}
1921 74576198 aliguori
1922 2bec46dc aliguori
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1923 2bec46dc aliguori
{
1924 2bec46dc aliguori
    if (kvm_enabled())
1925 2bec46dc aliguori
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1926 2bec46dc aliguori
}
1927 2bec46dc aliguori
1928 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1929 3a7d929e bellard
{
1930 3a7d929e bellard
    ram_addr_t ram_addr;
1931 5579c7f3 pbrook
    void *p;
1932 3a7d929e bellard
1933 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1934 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1935 5579c7f3 pbrook
            + tlb_entry->addend);
1936 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
1937 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1938 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1939 3a7d929e bellard
        }
1940 3a7d929e bellard
    }
1941 3a7d929e bellard
}
1942 3a7d929e bellard
1943 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1944 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1945 3a7d929e bellard
{
1946 3a7d929e bellard
    int i;
1947 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1948 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[0][i]);
1949 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1950 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[1][i]);
1951 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1952 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1953 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[2][i]);
1954 e37e6ee6 aurel32
#endif
1955 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1956 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1957 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[3][i]);
1958 6fa4cea9 j_mayer
#endif
1959 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1960 e37e6ee6 aurel32
    for(i = 0; i < CPU_TLB_SIZE; i++)
1961 e37e6ee6 aurel32
        tlb_update_dirty(&env->tlb_table[4][i]);
1962 6fa4cea9 j_mayer
#endif
1963 3a7d929e bellard
}
1964 3a7d929e bellard
1965 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1966 1ccde1cb bellard
{
1967 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1968 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1969 1ccde1cb bellard
}
1970 1ccde1cb bellard
1971 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1972 0f459d16 pbrook
   so that it is no longer dirty */
1973 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1974 1ccde1cb bellard
{
1975 1ccde1cb bellard
    int i;
1976 1ccde1cb bellard
1977 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1978 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1979 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1980 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1981 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1982 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1983 e37e6ee6 aurel32
#endif
1984 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1985 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1986 6fa4cea9 j_mayer
#endif
1987 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1988 e37e6ee6 aurel32
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1989 6fa4cea9 j_mayer
#endif
1990 9fa3e853 bellard
}
1991 9fa3e853 bellard
1992 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
1993 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1994 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
1995 59817ccb bellard
   conflicting with the host address space). */
1996 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1997 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
1998 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
1999 9fa3e853 bellard
{
2000 92e873b9 bellard
    PhysPageDesc *p;
2001 4f2ac237 bellard
    unsigned long pd;
2002 9fa3e853 bellard
    unsigned int index;
2003 4f2ac237 bellard
    target_ulong address;
2004 0f459d16 pbrook
    target_ulong code_address;
2005 108c49b8 bellard
    target_phys_addr_t addend;
2006 9fa3e853 bellard
    int ret;
2007 84b7b8e7 bellard
    CPUTLBEntry *te;
2008 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2009 0f459d16 pbrook
    target_phys_addr_t iotlb;
2010 9fa3e853 bellard
2011 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2012 9fa3e853 bellard
    if (!p) {
2013 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2014 9fa3e853 bellard
    } else {
2015 9fa3e853 bellard
        pd = p->phys_offset;
2016 9fa3e853 bellard
    }
2017 9fa3e853 bellard
#if defined(DEBUG_TLB)
2018 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2019 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2020 9fa3e853 bellard
#endif
2021 9fa3e853 bellard
2022 9fa3e853 bellard
    ret = 0;
2023 0f459d16 pbrook
    address = vaddr;
2024 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2025 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2026 0f459d16 pbrook
        address |= TLB_MMIO;
2027 0f459d16 pbrook
    }
2028 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2029 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2030 0f459d16 pbrook
        /* Normal RAM.  */
2031 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2032 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2033 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2034 0f459d16 pbrook
        else
2035 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2036 0f459d16 pbrook
    } else {
2037 0f459d16 pbrook
        /* IO handlers are currently passed a phsical address.
2038 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2039 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2040 0f459d16 pbrook
           and avoid full address decoding in every device.
2041 0f459d16 pbrook
           We can't use the high bits of pd for this because
2042 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2043 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2044 8da3ff18 pbrook
        if (p) {
2045 8da3ff18 pbrook
            iotlb += p->region_offset;
2046 8da3ff18 pbrook
        } else {
2047 8da3ff18 pbrook
            iotlb += paddr;
2048 8da3ff18 pbrook
        }
2049 0f459d16 pbrook
    }
2050 0f459d16 pbrook
2051 0f459d16 pbrook
    code_address = address;
2052 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2053 0f459d16 pbrook
       watchpoint trap routines.  */
2054 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2055 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2056 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2057 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2058 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2059 0f459d16 pbrook
            address |= TLB_MMIO;
2060 6658ffb8 pbrook
        }
2061 0f459d16 pbrook
    }
2062 d79acba4 balrog
2063 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2064 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2065 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2066 0f459d16 pbrook
    te->addend = addend - vaddr;
2067 0f459d16 pbrook
    if (prot & PAGE_READ) {
2068 0f459d16 pbrook
        te->addr_read = address;
2069 0f459d16 pbrook
    } else {
2070 0f459d16 pbrook
        te->addr_read = -1;
2071 0f459d16 pbrook
    }
2072 5c751e99 edgar_igl
2073 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2074 0f459d16 pbrook
        te->addr_code = code_address;
2075 0f459d16 pbrook
    } else {
2076 0f459d16 pbrook
        te->addr_code = -1;
2077 0f459d16 pbrook
    }
2078 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2079 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2080 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2081 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2082 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2083 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2084 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2085 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2086 9fa3e853 bellard
        } else {
2087 0f459d16 pbrook
            te->addr_write = address;
2088 9fa3e853 bellard
        }
2089 0f459d16 pbrook
    } else {
2090 0f459d16 pbrook
        te->addr_write = -1;
2091 9fa3e853 bellard
    }
2092 9fa3e853 bellard
    return ret;
2093 9fa3e853 bellard
}
2094 9fa3e853 bellard
2095 0124311e bellard
#else
2096 0124311e bellard
2097 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2098 0124311e bellard
{
2099 0124311e bellard
}
2100 0124311e bellard
2101 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2102 0124311e bellard
{
2103 0124311e bellard
}
2104 0124311e bellard
2105 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2106 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2107 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2108 9fa3e853 bellard
{
2109 9fa3e853 bellard
    return 0;
2110 9fa3e853 bellard
}
2111 0124311e bellard
2112 9fa3e853 bellard
/* dump memory mappings */
2113 9fa3e853 bellard
void page_dump(FILE *f)
2114 33417e70 bellard
{
2115 9fa3e853 bellard
    unsigned long start, end;
2116 9fa3e853 bellard
    int i, j, prot, prot1;
2117 9fa3e853 bellard
    PageDesc *p;
2118 33417e70 bellard
2119 9fa3e853 bellard
    fprintf(f, "%-8s %-8s %-8s %s\n",
2120 9fa3e853 bellard
            "start", "end", "size", "prot");
2121 9fa3e853 bellard
    start = -1;
2122 9fa3e853 bellard
    end = -1;
2123 9fa3e853 bellard
    prot = 0;
2124 9fa3e853 bellard
    for(i = 0; i <= L1_SIZE; i++) {
2125 9fa3e853 bellard
        if (i < L1_SIZE)
2126 9fa3e853 bellard
            p = l1_map[i];
2127 9fa3e853 bellard
        else
2128 9fa3e853 bellard
            p = NULL;
2129 9fa3e853 bellard
        for(j = 0;j < L2_SIZE; j++) {
2130 9fa3e853 bellard
            if (!p)
2131 9fa3e853 bellard
                prot1 = 0;
2132 9fa3e853 bellard
            else
2133 9fa3e853 bellard
                prot1 = p[j].flags;
2134 9fa3e853 bellard
            if (prot1 != prot) {
2135 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2136 9fa3e853 bellard
                if (start != -1) {
2137 9fa3e853 bellard
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2138 5fafdf24 ths
                            start, end, end - start,
2139 9fa3e853 bellard
                            prot & PAGE_READ ? 'r' : '-',
2140 9fa3e853 bellard
                            prot & PAGE_WRITE ? 'w' : '-',
2141 9fa3e853 bellard
                            prot & PAGE_EXEC ? 'x' : '-');
2142 9fa3e853 bellard
                }
2143 9fa3e853 bellard
                if (prot1 != 0)
2144 9fa3e853 bellard
                    start = end;
2145 9fa3e853 bellard
                else
2146 9fa3e853 bellard
                    start = -1;
2147 9fa3e853 bellard
                prot = prot1;
2148 9fa3e853 bellard
            }
2149 9fa3e853 bellard
            if (!p)
2150 9fa3e853 bellard
                break;
2151 9fa3e853 bellard
        }
2152 33417e70 bellard
    }
2153 33417e70 bellard
}
2154 33417e70 bellard
2155 53a5960a pbrook
int page_get_flags(target_ulong address)
2156 33417e70 bellard
{
2157 9fa3e853 bellard
    PageDesc *p;
2158 9fa3e853 bellard
2159 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2160 33417e70 bellard
    if (!p)
2161 9fa3e853 bellard
        return 0;
2162 9fa3e853 bellard
    return p->flags;
2163 9fa3e853 bellard
}
2164 9fa3e853 bellard
2165 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2166 9fa3e853 bellard
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2167 9fa3e853 bellard
   depending on PAGE_WRITE */
2168 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2169 9fa3e853 bellard
{
2170 9fa3e853 bellard
    PageDesc *p;
2171 53a5960a pbrook
    target_ulong addr;
2172 9fa3e853 bellard
2173 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2174 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2175 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2176 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2177 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2178 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2179 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2180 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2181 17e2377a pbrook
           address space.  */
2182 17e2377a pbrook
        if (!p)
2183 17e2377a pbrook
            return;
2184 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2185 9fa3e853 bellard
           inside */
2186 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2187 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2188 9fa3e853 bellard
            p->first_tb) {
2189 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2190 9fa3e853 bellard
        }
2191 9fa3e853 bellard
        p->flags = flags;
2192 9fa3e853 bellard
    }
2193 33417e70 bellard
}
2194 33417e70 bellard
2195 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2196 3d97b40b ths
{
2197 3d97b40b ths
    PageDesc *p;
2198 3d97b40b ths
    target_ulong end;
2199 3d97b40b ths
    target_ulong addr;
2200 3d97b40b ths
2201 55f280c9 balrog
    if (start + len < start)
2202 55f280c9 balrog
        /* we've wrapped around */
2203 55f280c9 balrog
        return -1;
2204 55f280c9 balrog
2205 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2206 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2207 3d97b40b ths
2208 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2209 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2210 3d97b40b ths
        if( !p )
2211 3d97b40b ths
            return -1;
2212 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2213 3d97b40b ths
            return -1;
2214 3d97b40b ths
2215 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2216 3d97b40b ths
            return -1;
2217 dae3270c bellard
        if (flags & PAGE_WRITE) {
2218 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2219 dae3270c bellard
                return -1;
2220 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2221 dae3270c bellard
               contains translated code */
2222 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2223 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2224 dae3270c bellard
                    return -1;
2225 dae3270c bellard
            }
2226 dae3270c bellard
            return 0;
2227 dae3270c bellard
        }
2228 3d97b40b ths
    }
2229 3d97b40b ths
    return 0;
2230 3d97b40b ths
}
2231 3d97b40b ths
2232 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2233 9fa3e853 bellard
   page. Return TRUE if the fault was succesfully handled. */
2234 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2235 9fa3e853 bellard
{
2236 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2237 9fa3e853 bellard
    PageDesc *p, *p1;
2238 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2239 9fa3e853 bellard
2240 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2241 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2242 c8a706fe pbrook
       practice it seems to be ok.  */
2243 c8a706fe pbrook
    mmap_lock();
2244 c8a706fe pbrook
2245 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2246 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2247 9fa3e853 bellard
    p1 = page_find(page_index);
2248 c8a706fe pbrook
    if (!p1) {
2249 c8a706fe pbrook
        mmap_unlock();
2250 9fa3e853 bellard
        return 0;
2251 c8a706fe pbrook
    }
2252 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2253 9fa3e853 bellard
    p = p1;
2254 9fa3e853 bellard
    prot = 0;
2255 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2256 9fa3e853 bellard
        prot |= p->flags;
2257 9fa3e853 bellard
        p++;
2258 9fa3e853 bellard
    }
2259 9fa3e853 bellard
    /* if the page was really writable, then we change its
2260 9fa3e853 bellard
       protection back to writable */
2261 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2262 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2263 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2264 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2265 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2266 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2267 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2268 9fa3e853 bellard
               the corresponding translated code. */
2269 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2270 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2271 9fa3e853 bellard
            tb_invalidate_check(address);
2272 9fa3e853 bellard
#endif
2273 c8a706fe pbrook
            mmap_unlock();
2274 9fa3e853 bellard
            return 1;
2275 9fa3e853 bellard
        }
2276 9fa3e853 bellard
    }
2277 c8a706fe pbrook
    mmap_unlock();
2278 9fa3e853 bellard
    return 0;
2279 9fa3e853 bellard
}
2280 9fa3e853 bellard
2281 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2282 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2283 1ccde1cb bellard
{
2284 1ccde1cb bellard
}
2285 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2286 9fa3e853 bellard
2287 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2288 8da3ff18 pbrook
2289 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2290 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2291 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2292 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2293 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2294 db7b5426 blueswir1
                      need_subpage)                                     \
2295 db7b5426 blueswir1
    do {                                                                \
2296 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2297 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2298 db7b5426 blueswir1
        else {                                                          \
2299 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2300 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2301 db7b5426 blueswir1
                need_subpage = 1;                                       \
2302 db7b5426 blueswir1
        }                                                               \
2303 db7b5426 blueswir1
                                                                        \
2304 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2305 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2306 db7b5426 blueswir1
        else {                                                          \
2307 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2308 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2309 db7b5426 blueswir1
                need_subpage = 1;                                       \
2310 db7b5426 blueswir1
        }                                                               \
2311 db7b5426 blueswir1
    } while (0)
2312 db7b5426 blueswir1
2313 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2314 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2315 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2316 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2317 8da3ff18 pbrook
   start_region and regon_offset are rounded down to a page boundary
2318 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2319 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2320 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2321 8da3ff18 pbrook
                                         ram_addr_t size,
2322 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2323 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2324 33417e70 bellard
{
2325 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2326 92e873b9 bellard
    PhysPageDesc *p;
2327 9d42037b bellard
    CPUState *env;
2328 00f82b8a aurel32
    ram_addr_t orig_size = size;
2329 db7b5426 blueswir1
    void *subpage;
2330 33417e70 bellard
2331 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2332 da260249 bellard
    /* XXX: should not depend on cpu context */
2333 da260249 bellard
    env = first_cpu;
2334 da260249 bellard
    if (env->kqemu_enabled) {
2335 da260249 bellard
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2336 da260249 bellard
    }
2337 da260249 bellard
#endif
2338 7ba1e619 aliguori
    if (kvm_enabled())
2339 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2340 7ba1e619 aliguori
2341 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2342 67c4d23c pbrook
        region_offset = start_addr;
2343 67c4d23c pbrook
    }
2344 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2345 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2346 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2347 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2348 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2349 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2350 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2351 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2352 db7b5426 blueswir1
            int need_subpage = 0;
2353 db7b5426 blueswir1
2354 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2355 db7b5426 blueswir1
                          need_subpage);
2356 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2357 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2358 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2359 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2360 8da3ff18 pbrook
                                           p->region_offset);
2361 db7b5426 blueswir1
                } else {
2362 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2363 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2364 db7b5426 blueswir1
                }
2365 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2366 8da3ff18 pbrook
                                 region_offset);
2367 8da3ff18 pbrook
                p->region_offset = 0;
2368 db7b5426 blueswir1
            } else {
2369 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2370 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2371 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2372 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2373 db7b5426 blueswir1
            }
2374 db7b5426 blueswir1
        } else {
2375 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2376 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2377 8da3ff18 pbrook
            p->region_offset = region_offset;
2378 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2379 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2380 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2381 0e8f0967 pbrook
            } else {
2382 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2383 db7b5426 blueswir1
                int need_subpage = 0;
2384 db7b5426 blueswir1
2385 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2386 db7b5426 blueswir1
                              end_addr2, need_subpage);
2387 db7b5426 blueswir1
2388 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2389 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2390 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2391 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2392 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2393 8da3ff18 pbrook
                                     phys_offset, region_offset);
2394 8da3ff18 pbrook
                    p->region_offset = 0;
2395 db7b5426 blueswir1
                }
2396 db7b5426 blueswir1
            }
2397 db7b5426 blueswir1
        }
2398 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2399 33417e70 bellard
    }
2400 3b46e624 ths
2401 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2402 9d42037b bellard
       reset the modified entries */
2403 9d42037b bellard
    /* XXX: slow ! */
2404 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2405 9d42037b bellard
        tlb_flush(env, 1);
2406 9d42037b bellard
    }
2407 33417e70 bellard
}
2408 33417e70 bellard
2409 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2410 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2411 ba863458 bellard
{
2412 ba863458 bellard
    PhysPageDesc *p;
2413 ba863458 bellard
2414 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2415 ba863458 bellard
    if (!p)
2416 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2417 ba863458 bellard
    return p->phys_offset;
2418 ba863458 bellard
}
2419 ba863458 bellard
2420 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2421 f65ed4c1 aliguori
{
2422 f65ed4c1 aliguori
    if (kvm_enabled())
2423 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2424 f65ed4c1 aliguori
}
2425 f65ed4c1 aliguori
2426 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2427 f65ed4c1 aliguori
{
2428 f65ed4c1 aliguori
    if (kvm_enabled())
2429 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2430 f65ed4c1 aliguori
}
2431 f65ed4c1 aliguori
2432 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2433 e9a1ab19 bellard
/* XXX: better than nothing */
2434 94a6b54f pbrook
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2435 e9a1ab19 bellard
{
2436 e9a1ab19 bellard
    ram_addr_t addr;
2437 94a6b54f pbrook
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2438 012a7045 ths
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2439 94a6b54f pbrook
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2440 e9a1ab19 bellard
        abort();
2441 e9a1ab19 bellard
    }
2442 94a6b54f pbrook
    addr = last_ram_offset;
2443 94a6b54f pbrook
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2444 e9a1ab19 bellard
    return addr;
2445 e9a1ab19 bellard
}
2446 94a6b54f pbrook
#endif
2447 94a6b54f pbrook
2448 94a6b54f pbrook
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2449 94a6b54f pbrook
{
2450 94a6b54f pbrook
    RAMBlock *new_block;
2451 94a6b54f pbrook
2452 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2453 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2454 94a6b54f pbrook
        return kqemu_ram_alloc(size);
2455 94a6b54f pbrook
    }
2456 94a6b54f pbrook
#endif
2457 94a6b54f pbrook
2458 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2459 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2460 94a6b54f pbrook
2461 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2462 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2463 94a6b54f pbrook
    new_block->length = size;
2464 94a6b54f pbrook
2465 94a6b54f pbrook
    new_block->next = ram_blocks;
2466 94a6b54f pbrook
    ram_blocks = new_block;
2467 94a6b54f pbrook
2468 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2469 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2470 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2471 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2472 94a6b54f pbrook
2473 94a6b54f pbrook
    last_ram_offset += size;
2474 94a6b54f pbrook
2475 94a6b54f pbrook
    return new_block->offset;
2476 94a6b54f pbrook
}
2477 e9a1ab19 bellard
2478 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2479 e9a1ab19 bellard
{
2480 94a6b54f pbrook
    /* TODO: implement this.  */
2481 e9a1ab19 bellard
}
2482 e9a1ab19 bellard
2483 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2484 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2485 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2486 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2487 5579c7f3 pbrook

2488 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2489 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2490 5579c7f3 pbrook
 */
2491 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2492 dc828ca1 pbrook
{
2493 94a6b54f pbrook
    RAMBlock *prev;
2494 94a6b54f pbrook
    RAMBlock **prevp;
2495 94a6b54f pbrook
    RAMBlock *block;
2496 94a6b54f pbrook
2497 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2498 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2499 94a6b54f pbrook
        return kqemu_phys_ram_base + addr;
2500 94a6b54f pbrook
    }
2501 94a6b54f pbrook
#endif
2502 94a6b54f pbrook
2503 94a6b54f pbrook
    prev = NULL;
2504 94a6b54f pbrook
    prevp = &ram_blocks;
2505 94a6b54f pbrook
    block = ram_blocks;
2506 94a6b54f pbrook
    while (block && (block->offset > addr
2507 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2508 94a6b54f pbrook
        if (prev)
2509 94a6b54f pbrook
          prevp = &prev->next;
2510 94a6b54f pbrook
        prev = block;
2511 94a6b54f pbrook
        block = block->next;
2512 94a6b54f pbrook
    }
2513 94a6b54f pbrook
    if (!block) {
2514 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2515 94a6b54f pbrook
        abort();
2516 94a6b54f pbrook
    }
2517 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2518 94a6b54f pbrook
    if (prev) {
2519 94a6b54f pbrook
        prev->next = block->next;
2520 94a6b54f pbrook
        block->next = *prevp;
2521 94a6b54f pbrook
        *prevp = block;
2522 94a6b54f pbrook
    }
2523 94a6b54f pbrook
    return block->host + (addr - block->offset);
2524 dc828ca1 pbrook
}
2525 dc828ca1 pbrook
2526 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2527 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2528 5579c7f3 pbrook
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2529 5579c7f3 pbrook
{
2530 94a6b54f pbrook
    RAMBlock *prev;
2531 94a6b54f pbrook
    RAMBlock **prevp;
2532 94a6b54f pbrook
    RAMBlock *block;
2533 94a6b54f pbrook
    uint8_t *host = ptr;
2534 94a6b54f pbrook
2535 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2536 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2537 94a6b54f pbrook
        return host - kqemu_phys_ram_base;
2538 94a6b54f pbrook
    }
2539 94a6b54f pbrook
#endif
2540 94a6b54f pbrook
2541 94a6b54f pbrook
    prev = NULL;
2542 94a6b54f pbrook
    prevp = &ram_blocks;
2543 94a6b54f pbrook
    block = ram_blocks;
2544 94a6b54f pbrook
    while (block && (block->host > host
2545 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2546 94a6b54f pbrook
        if (prev)
2547 94a6b54f pbrook
          prevp = &prev->next;
2548 94a6b54f pbrook
        prev = block;
2549 94a6b54f pbrook
        block = block->next;
2550 94a6b54f pbrook
    }
2551 94a6b54f pbrook
    if (!block) {
2552 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2553 94a6b54f pbrook
        abort();
2554 94a6b54f pbrook
    }
2555 94a6b54f pbrook
    return block->offset + (host - block->host);
2556 5579c7f3 pbrook
}
2557 5579c7f3 pbrook
2558 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2559 33417e70 bellard
{
2560 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2561 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2562 67d3b957 pbrook
#endif
2563 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2564 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2565 e18231a3 blueswir1
#endif
2566 e18231a3 blueswir1
    return 0;
2567 e18231a3 blueswir1
}
2568 e18231a3 blueswir1
2569 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2570 e18231a3 blueswir1
{
2571 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2572 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2573 e18231a3 blueswir1
#endif
2574 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2575 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2576 e18231a3 blueswir1
#endif
2577 e18231a3 blueswir1
    return 0;
2578 e18231a3 blueswir1
}
2579 e18231a3 blueswir1
2580 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2581 e18231a3 blueswir1
{
2582 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2583 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2584 e18231a3 blueswir1
#endif
2585 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2586 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2587 b4f0a316 blueswir1
#endif
2588 33417e70 bellard
    return 0;
2589 33417e70 bellard
}
2590 33417e70 bellard
2591 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2592 33417e70 bellard
{
2593 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2594 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2595 67d3b957 pbrook
#endif
2596 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2597 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2598 e18231a3 blueswir1
#endif
2599 e18231a3 blueswir1
}
2600 e18231a3 blueswir1
2601 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2602 e18231a3 blueswir1
{
2603 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2604 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2605 e18231a3 blueswir1
#endif
2606 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2607 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2608 e18231a3 blueswir1
#endif
2609 e18231a3 blueswir1
}
2610 e18231a3 blueswir1
2611 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2612 e18231a3 blueswir1
{
2613 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2614 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2615 e18231a3 blueswir1
#endif
2616 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2617 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2618 b4f0a316 blueswir1
#endif
2619 33417e70 bellard
}
2620 33417e70 bellard
2621 33417e70 bellard
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2622 33417e70 bellard
    unassigned_mem_readb,
2623 e18231a3 blueswir1
    unassigned_mem_readw,
2624 e18231a3 blueswir1
    unassigned_mem_readl,
2625 33417e70 bellard
};
2626 33417e70 bellard
2627 33417e70 bellard
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2628 33417e70 bellard
    unassigned_mem_writeb,
2629 e18231a3 blueswir1
    unassigned_mem_writew,
2630 e18231a3 blueswir1
    unassigned_mem_writel,
2631 33417e70 bellard
};
2632 33417e70 bellard
2633 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2634 0f459d16 pbrook
                                uint32_t val)
2635 9fa3e853 bellard
{
2636 3a7d929e bellard
    int dirty_flags;
2637 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2638 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2639 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2640 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2641 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2642 9fa3e853 bellard
#endif
2643 3a7d929e bellard
    }
2644 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2645 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2646 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2647 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2648 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2649 f32fc648 bellard
#endif
2650 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2651 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2652 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2653 f23db169 bellard
       flushed */
2654 f23db169 bellard
    if (dirty_flags == 0xff)
2655 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2656 9fa3e853 bellard
}
2657 9fa3e853 bellard
2658 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2659 0f459d16 pbrook
                                uint32_t val)
2660 9fa3e853 bellard
{
2661 3a7d929e bellard
    int dirty_flags;
2662 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2664 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2665 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2666 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2667 9fa3e853 bellard
#endif
2668 3a7d929e bellard
    }
2669 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2670 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2671 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2672 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2673 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2674 f32fc648 bellard
#endif
2675 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2676 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2677 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2678 f23db169 bellard
       flushed */
2679 f23db169 bellard
    if (dirty_flags == 0xff)
2680 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2681 9fa3e853 bellard
}
2682 9fa3e853 bellard
2683 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2684 0f459d16 pbrook
                                uint32_t val)
2685 9fa3e853 bellard
{
2686 3a7d929e bellard
    int dirty_flags;
2687 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2689 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2690 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2691 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2692 9fa3e853 bellard
#endif
2693 3a7d929e bellard
    }
2694 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2695 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2696 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2697 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2698 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2699 f32fc648 bellard
#endif
2700 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2701 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2702 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2703 f23db169 bellard
       flushed */
2704 f23db169 bellard
    if (dirty_flags == 0xff)
2705 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2706 9fa3e853 bellard
}
2707 9fa3e853 bellard
2708 3a7d929e bellard
static CPUReadMemoryFunc *error_mem_read[3] = {
2709 9fa3e853 bellard
    NULL, /* never used */
2710 9fa3e853 bellard
    NULL, /* never used */
2711 9fa3e853 bellard
    NULL, /* never used */
2712 9fa3e853 bellard
};
2713 9fa3e853 bellard
2714 1ccde1cb bellard
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2715 1ccde1cb bellard
    notdirty_mem_writeb,
2716 1ccde1cb bellard
    notdirty_mem_writew,
2717 1ccde1cb bellard
    notdirty_mem_writel,
2718 1ccde1cb bellard
};
2719 1ccde1cb bellard
2720 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2721 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2722 0f459d16 pbrook
{
2723 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2724 06d55cc1 aliguori
    target_ulong pc, cs_base;
2725 06d55cc1 aliguori
    TranslationBlock *tb;
2726 0f459d16 pbrook
    target_ulong vaddr;
2727 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2728 06d55cc1 aliguori
    int cpu_flags;
2729 0f459d16 pbrook
2730 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2731 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2732 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2733 06d55cc1 aliguori
         * current instruction. */
2734 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2735 06d55cc1 aliguori
        return;
2736 06d55cc1 aliguori
    }
2737 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2738 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2739 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2740 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2741 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2742 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2743 6e140f28 aliguori
                env->watchpoint_hit = wp;
2744 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2745 6e140f28 aliguori
                if (!tb) {
2746 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2747 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2748 6e140f28 aliguori
                }
2749 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2750 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2751 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2752 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2753 6e140f28 aliguori
                } else {
2754 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2755 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2756 6e140f28 aliguori
                }
2757 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2758 06d55cc1 aliguori
            }
2759 6e140f28 aliguori
        } else {
2760 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2761 0f459d16 pbrook
        }
2762 0f459d16 pbrook
    }
2763 0f459d16 pbrook
}
2764 0f459d16 pbrook
2765 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2766 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2767 6658ffb8 pbrook
   phys routines.  */
2768 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2769 6658ffb8 pbrook
{
2770 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2771 6658ffb8 pbrook
    return ldub_phys(addr);
2772 6658ffb8 pbrook
}
2773 6658ffb8 pbrook
2774 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2775 6658ffb8 pbrook
{
2776 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2777 6658ffb8 pbrook
    return lduw_phys(addr);
2778 6658ffb8 pbrook
}
2779 6658ffb8 pbrook
2780 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2781 6658ffb8 pbrook
{
2782 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2783 6658ffb8 pbrook
    return ldl_phys(addr);
2784 6658ffb8 pbrook
}
2785 6658ffb8 pbrook
2786 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2787 6658ffb8 pbrook
                             uint32_t val)
2788 6658ffb8 pbrook
{
2789 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2790 6658ffb8 pbrook
    stb_phys(addr, val);
2791 6658ffb8 pbrook
}
2792 6658ffb8 pbrook
2793 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2794 6658ffb8 pbrook
                             uint32_t val)
2795 6658ffb8 pbrook
{
2796 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2797 6658ffb8 pbrook
    stw_phys(addr, val);
2798 6658ffb8 pbrook
}
2799 6658ffb8 pbrook
2800 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2801 6658ffb8 pbrook
                             uint32_t val)
2802 6658ffb8 pbrook
{
2803 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2804 6658ffb8 pbrook
    stl_phys(addr, val);
2805 6658ffb8 pbrook
}
2806 6658ffb8 pbrook
2807 6658ffb8 pbrook
static CPUReadMemoryFunc *watch_mem_read[3] = {
2808 6658ffb8 pbrook
    watch_mem_readb,
2809 6658ffb8 pbrook
    watch_mem_readw,
2810 6658ffb8 pbrook
    watch_mem_readl,
2811 6658ffb8 pbrook
};
2812 6658ffb8 pbrook
2813 6658ffb8 pbrook
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2814 6658ffb8 pbrook
    watch_mem_writeb,
2815 6658ffb8 pbrook
    watch_mem_writew,
2816 6658ffb8 pbrook
    watch_mem_writel,
2817 6658ffb8 pbrook
};
2818 6658ffb8 pbrook
2819 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2820 db7b5426 blueswir1
                                 unsigned int len)
2821 db7b5426 blueswir1
{
2822 db7b5426 blueswir1
    uint32_t ret;
2823 db7b5426 blueswir1
    unsigned int idx;
2824 db7b5426 blueswir1
2825 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2826 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2827 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2828 db7b5426 blueswir1
           mmio, len, addr, idx);
2829 db7b5426 blueswir1
#endif
2830 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2831 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2832 db7b5426 blueswir1
2833 db7b5426 blueswir1
    return ret;
2834 db7b5426 blueswir1
}
2835 db7b5426 blueswir1
2836 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2837 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2838 db7b5426 blueswir1
{
2839 db7b5426 blueswir1
    unsigned int idx;
2840 db7b5426 blueswir1
2841 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2842 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2843 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2844 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2845 db7b5426 blueswir1
#endif
2846 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2847 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2848 8da3ff18 pbrook
                                  value);
2849 db7b5426 blueswir1
}
2850 db7b5426 blueswir1
2851 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2852 db7b5426 blueswir1
{
2853 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2854 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2855 db7b5426 blueswir1
#endif
2856 db7b5426 blueswir1
2857 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2858 db7b5426 blueswir1
}
2859 db7b5426 blueswir1
2860 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2861 db7b5426 blueswir1
                            uint32_t value)
2862 db7b5426 blueswir1
{
2863 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2864 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2865 db7b5426 blueswir1
#endif
2866 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2867 db7b5426 blueswir1
}
2868 db7b5426 blueswir1
2869 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2870 db7b5426 blueswir1
{
2871 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2872 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2873 db7b5426 blueswir1
#endif
2874 db7b5426 blueswir1
2875 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2876 db7b5426 blueswir1
}
2877 db7b5426 blueswir1
2878 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2879 db7b5426 blueswir1
                            uint32_t value)
2880 db7b5426 blueswir1
{
2881 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2882 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2883 db7b5426 blueswir1
#endif
2884 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2885 db7b5426 blueswir1
}
2886 db7b5426 blueswir1
2887 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2888 db7b5426 blueswir1
{
2889 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2890 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2891 db7b5426 blueswir1
#endif
2892 db7b5426 blueswir1
2893 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2894 db7b5426 blueswir1
}
2895 db7b5426 blueswir1
2896 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2897 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2898 db7b5426 blueswir1
{
2899 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2900 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2901 db7b5426 blueswir1
#endif
2902 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2903 db7b5426 blueswir1
}
2904 db7b5426 blueswir1
2905 db7b5426 blueswir1
static CPUReadMemoryFunc *subpage_read[] = {
2906 db7b5426 blueswir1
    &subpage_readb,
2907 db7b5426 blueswir1
    &subpage_readw,
2908 db7b5426 blueswir1
    &subpage_readl,
2909 db7b5426 blueswir1
};
2910 db7b5426 blueswir1
2911 db7b5426 blueswir1
static CPUWriteMemoryFunc *subpage_write[] = {
2912 db7b5426 blueswir1
    &subpage_writeb,
2913 db7b5426 blueswir1
    &subpage_writew,
2914 db7b5426 blueswir1
    &subpage_writel,
2915 db7b5426 blueswir1
};
2916 db7b5426 blueswir1
2917 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2918 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2919 db7b5426 blueswir1
{
2920 db7b5426 blueswir1
    int idx, eidx;
2921 4254fab8 blueswir1
    unsigned int i;
2922 db7b5426 blueswir1
2923 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2924 db7b5426 blueswir1
        return -1;
2925 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2926 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2927 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2928 db7b5426 blueswir1
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2929 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2930 db7b5426 blueswir1
#endif
2931 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2932 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2933 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2934 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2935 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2936 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2937 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2938 3ee89922 blueswir1
            }
2939 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2940 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2941 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2942 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2943 3ee89922 blueswir1
            }
2944 4254fab8 blueswir1
        }
2945 db7b5426 blueswir1
    }
2946 db7b5426 blueswir1
2947 db7b5426 blueswir1
    return 0;
2948 db7b5426 blueswir1
}
2949 db7b5426 blueswir1
2950 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2951 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2952 db7b5426 blueswir1
{
2953 db7b5426 blueswir1
    subpage_t *mmio;
2954 db7b5426 blueswir1
    int subpage_memory;
2955 db7b5426 blueswir1
2956 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2957 1eec614b aliguori
2958 1eec614b aliguori
    mmio->base = base;
2959 1eec614b aliguori
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2960 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2961 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2962 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2963 db7b5426 blueswir1
#endif
2964 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2965 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2966 8da3ff18 pbrook
                         region_offset);
2967 db7b5426 blueswir1
2968 db7b5426 blueswir1
    return mmio;
2969 db7b5426 blueswir1
}
2970 db7b5426 blueswir1
2971 88715657 aliguori
static int get_free_io_mem_idx(void)
2972 88715657 aliguori
{
2973 88715657 aliguori
    int i;
2974 88715657 aliguori
2975 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2976 88715657 aliguori
        if (!io_mem_used[i]) {
2977 88715657 aliguori
            io_mem_used[i] = 1;
2978 88715657 aliguori
            return i;
2979 88715657 aliguori
        }
2980 88715657 aliguori
2981 88715657 aliguori
    return -1;
2982 88715657 aliguori
}
2983 88715657 aliguori
2984 33417e70 bellard
static void io_mem_init(void)
2985 33417e70 bellard
{
2986 88715657 aliguori
    int i;
2987 88715657 aliguori
2988 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2989 a4193c8a bellard
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2990 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2991 88715657 aliguori
    for (i=0; i<5; i++)
2992 88715657 aliguori
        io_mem_used[i] = 1;
2993 1ccde1cb bellard
2994 0f459d16 pbrook
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2995 6658ffb8 pbrook
                                          watch_mem_write, NULL);
2996 640f42e4 blueswir1
#ifdef CONFIG_KQEMU
2997 94a6b54f pbrook
    if (kqemu_phys_ram_base) {
2998 94a6b54f pbrook
        /* alloc dirty bits array */
2999 94a6b54f pbrook
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3000 94a6b54f pbrook
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3001 94a6b54f pbrook
    }
3002 94a6b54f pbrook
#endif
3003 33417e70 bellard
}
3004 33417e70 bellard
3005 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3006 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3007 3ee89922 blueswir1
   2). Functions can be omitted with a NULL function pointer. The
3008 3ee89922 blueswir1
   registered functions may be modified dynamically later.
3009 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3010 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3011 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3012 4254fab8 blueswir1
   returned if error. */
3013 33417e70 bellard
int cpu_register_io_memory(int io_index,
3014 33417e70 bellard
                           CPUReadMemoryFunc **mem_read,
3015 a4193c8a bellard
                           CPUWriteMemoryFunc **mem_write,
3016 a4193c8a bellard
                           void *opaque)
3017 33417e70 bellard
{
3018 4254fab8 blueswir1
    int i, subwidth = 0;
3019 33417e70 bellard
3020 33417e70 bellard
    if (io_index <= 0) {
3021 88715657 aliguori
        io_index = get_free_io_mem_idx();
3022 88715657 aliguori
        if (io_index == -1)
3023 88715657 aliguori
            return io_index;
3024 33417e70 bellard
    } else {
3025 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3026 33417e70 bellard
            return -1;
3027 33417e70 bellard
    }
3028 b5ff1b31 bellard
3029 33417e70 bellard
    for(i = 0;i < 3; i++) {
3030 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3031 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3032 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3033 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3034 33417e70 bellard
    }
3035 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3036 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3037 33417e70 bellard
}
3038 61382a50 bellard
3039 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3040 88715657 aliguori
{
3041 88715657 aliguori
    int i;
3042 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3043 88715657 aliguori
3044 88715657 aliguori
    for (i=0;i < 3; i++) {
3045 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3046 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3047 88715657 aliguori
    }
3048 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3049 88715657 aliguori
    io_mem_used[io_index] = 0;
3050 88715657 aliguori
}
3051 88715657 aliguori
3052 8926b517 bellard
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3053 8926b517 bellard
{
3054 8926b517 bellard
    return io_mem_write[io_index >> IO_MEM_SHIFT];
3055 8926b517 bellard
}
3056 8926b517 bellard
3057 8926b517 bellard
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3058 8926b517 bellard
{
3059 8926b517 bellard
    return io_mem_read[io_index >> IO_MEM_SHIFT];
3060 8926b517 bellard
}
3061 8926b517 bellard
3062 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3063 e2eef170 pbrook
3064 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3065 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3066 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3067 13eb76e0 bellard
                            int len, int is_write)
3068 13eb76e0 bellard
{
3069 13eb76e0 bellard
    int l, flags;
3070 13eb76e0 bellard
    target_ulong page;
3071 53a5960a pbrook
    void * p;
3072 13eb76e0 bellard
3073 13eb76e0 bellard
    while (len > 0) {
3074 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3075 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3076 13eb76e0 bellard
        if (l > len)
3077 13eb76e0 bellard
            l = len;
3078 13eb76e0 bellard
        flags = page_get_flags(page);
3079 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3080 13eb76e0 bellard
            return;
3081 13eb76e0 bellard
        if (is_write) {
3082 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3083 13eb76e0 bellard
                return;
3084 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3085 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3086 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3087 579a97f7 bellard
                return;
3088 72fb7daa aurel32
            memcpy(p, buf, l);
3089 72fb7daa aurel32
            unlock_user(p, addr, l);
3090 13eb76e0 bellard
        } else {
3091 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3092 13eb76e0 bellard
                return;
3093 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3094 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3095 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3096 579a97f7 bellard
                return;
3097 72fb7daa aurel32
            memcpy(buf, p, l);
3098 5b257578 aurel32
            unlock_user(p, addr, 0);
3099 13eb76e0 bellard
        }
3100 13eb76e0 bellard
        len -= l;
3101 13eb76e0 bellard
        buf += l;
3102 13eb76e0 bellard
        addr += l;
3103 13eb76e0 bellard
    }
3104 13eb76e0 bellard
}
3105 8df1cd07 bellard
3106 13eb76e0 bellard
#else
3107 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3108 13eb76e0 bellard
                            int len, int is_write)
3109 13eb76e0 bellard
{
3110 13eb76e0 bellard
    int l, io_index;
3111 13eb76e0 bellard
    uint8_t *ptr;
3112 13eb76e0 bellard
    uint32_t val;
3113 2e12669a bellard
    target_phys_addr_t page;
3114 2e12669a bellard
    unsigned long pd;
3115 92e873b9 bellard
    PhysPageDesc *p;
3116 3b46e624 ths
3117 13eb76e0 bellard
    while (len > 0) {
3118 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3119 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3120 13eb76e0 bellard
        if (l > len)
3121 13eb76e0 bellard
            l = len;
3122 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3123 13eb76e0 bellard
        if (!p) {
3124 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3125 13eb76e0 bellard
        } else {
3126 13eb76e0 bellard
            pd = p->phys_offset;
3127 13eb76e0 bellard
        }
3128 3b46e624 ths
3129 13eb76e0 bellard
        if (is_write) {
3130 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3131 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3132 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3133 8da3ff18 pbrook
                if (p)
3134 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3135 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3136 6a00d601 bellard
                   potential bugs */
3137 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3138 1c213d19 bellard
                    /* 32 bit write access */
3139 c27004ec bellard
                    val = ldl_p(buf);
3140 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3141 13eb76e0 bellard
                    l = 4;
3142 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3143 1c213d19 bellard
                    /* 16 bit write access */
3144 c27004ec bellard
                    val = lduw_p(buf);
3145 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3146 13eb76e0 bellard
                    l = 2;
3147 13eb76e0 bellard
                } else {
3148 1c213d19 bellard
                    /* 8 bit write access */
3149 c27004ec bellard
                    val = ldub_p(buf);
3150 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3151 13eb76e0 bellard
                    l = 1;
3152 13eb76e0 bellard
                }
3153 13eb76e0 bellard
            } else {
3154 b448f2f3 bellard
                unsigned long addr1;
3155 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3156 13eb76e0 bellard
                /* RAM case */
3157 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3158 13eb76e0 bellard
                memcpy(ptr, buf, l);
3159 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3160 3a7d929e bellard
                    /* invalidate code */
3161 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3162 3a7d929e bellard
                    /* set dirty bit */
3163 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3164 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3165 3a7d929e bellard
                }
3166 13eb76e0 bellard
            }
3167 13eb76e0 bellard
        } else {
3168 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3169 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3170 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3171 13eb76e0 bellard
                /* I/O case */
3172 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3173 8da3ff18 pbrook
                if (p)
3174 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3175 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3176 13eb76e0 bellard
                    /* 32 bit read access */
3177 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3178 c27004ec bellard
                    stl_p(buf, val);
3179 13eb76e0 bellard
                    l = 4;
3180 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3181 13eb76e0 bellard
                    /* 16 bit read access */
3182 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3183 c27004ec bellard
                    stw_p(buf, val);
3184 13eb76e0 bellard
                    l = 2;
3185 13eb76e0 bellard
                } else {
3186 1c213d19 bellard
                    /* 8 bit read access */
3187 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3188 c27004ec bellard
                    stb_p(buf, val);
3189 13eb76e0 bellard
                    l = 1;
3190 13eb76e0 bellard
                }
3191 13eb76e0 bellard
            } else {
3192 13eb76e0 bellard
                /* RAM case */
3193 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3194 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3195 13eb76e0 bellard
                memcpy(buf, ptr, l);
3196 13eb76e0 bellard
            }
3197 13eb76e0 bellard
        }
3198 13eb76e0 bellard
        len -= l;
3199 13eb76e0 bellard
        buf += l;
3200 13eb76e0 bellard
        addr += l;
3201 13eb76e0 bellard
    }
3202 13eb76e0 bellard
}
3203 8df1cd07 bellard
3204 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3205 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3206 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3207 d0ecd2aa bellard
{
3208 d0ecd2aa bellard
    int l;
3209 d0ecd2aa bellard
    uint8_t *ptr;
3210 d0ecd2aa bellard
    target_phys_addr_t page;
3211 d0ecd2aa bellard
    unsigned long pd;
3212 d0ecd2aa bellard
    PhysPageDesc *p;
3213 3b46e624 ths
3214 d0ecd2aa bellard
    while (len > 0) {
3215 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3216 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3217 d0ecd2aa bellard
        if (l > len)
3218 d0ecd2aa bellard
            l = len;
3219 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3220 d0ecd2aa bellard
        if (!p) {
3221 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3222 d0ecd2aa bellard
        } else {
3223 d0ecd2aa bellard
            pd = p->phys_offset;
3224 d0ecd2aa bellard
        }
3225 3b46e624 ths
3226 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3227 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3228 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3229 d0ecd2aa bellard
            /* do nothing */
3230 d0ecd2aa bellard
        } else {
3231 d0ecd2aa bellard
            unsigned long addr1;
3232 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3233 d0ecd2aa bellard
            /* ROM/RAM case */
3234 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3235 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3236 d0ecd2aa bellard
        }
3237 d0ecd2aa bellard
        len -= l;
3238 d0ecd2aa bellard
        buf += l;
3239 d0ecd2aa bellard
        addr += l;
3240 d0ecd2aa bellard
    }
3241 d0ecd2aa bellard
}
3242 d0ecd2aa bellard
3243 6d16c2f8 aliguori
typedef struct {
3244 6d16c2f8 aliguori
    void *buffer;
3245 6d16c2f8 aliguori
    target_phys_addr_t addr;
3246 6d16c2f8 aliguori
    target_phys_addr_t len;
3247 6d16c2f8 aliguori
} BounceBuffer;
3248 6d16c2f8 aliguori
3249 6d16c2f8 aliguori
static BounceBuffer bounce;
3250 6d16c2f8 aliguori
3251 ba223c29 aliguori
typedef struct MapClient {
3252 ba223c29 aliguori
    void *opaque;
3253 ba223c29 aliguori
    void (*callback)(void *opaque);
3254 ba223c29 aliguori
    LIST_ENTRY(MapClient) link;
3255 ba223c29 aliguori
} MapClient;
3256 ba223c29 aliguori
3257 ba223c29 aliguori
static LIST_HEAD(map_client_list, MapClient) map_client_list
3258 ba223c29 aliguori
    = LIST_HEAD_INITIALIZER(map_client_list);
3259 ba223c29 aliguori
3260 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3261 ba223c29 aliguori
{
3262 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3263 ba223c29 aliguori
3264 ba223c29 aliguori
    client->opaque = opaque;
3265 ba223c29 aliguori
    client->callback = callback;
3266 ba223c29 aliguori
    LIST_INSERT_HEAD(&map_client_list, client, link);
3267 ba223c29 aliguori
    return client;
3268 ba223c29 aliguori
}
3269 ba223c29 aliguori
3270 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3271 ba223c29 aliguori
{
3272 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3273 ba223c29 aliguori
3274 ba223c29 aliguori
    LIST_REMOVE(client, link);
3275 ba223c29 aliguori
}
3276 ba223c29 aliguori
3277 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3278 ba223c29 aliguori
{
3279 ba223c29 aliguori
    MapClient *client;
3280 ba223c29 aliguori
3281 ba223c29 aliguori
    while (!LIST_EMPTY(&map_client_list)) {
3282 ba223c29 aliguori
        client = LIST_FIRST(&map_client_list);
3283 ba223c29 aliguori
        client->callback(client->opaque);
3284 ba223c29 aliguori
        LIST_REMOVE(client, link);
3285 ba223c29 aliguori
    }
3286 ba223c29 aliguori
}
3287 ba223c29 aliguori
3288 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3289 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3290 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3291 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3292 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3293 ba223c29 aliguori
 * likely to succeed.
3294 6d16c2f8 aliguori
 */
3295 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3296 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3297 6d16c2f8 aliguori
                              int is_write)
3298 6d16c2f8 aliguori
{
3299 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3300 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3301 6d16c2f8 aliguori
    int l;
3302 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3303 6d16c2f8 aliguori
    uint8_t *ptr;
3304 6d16c2f8 aliguori
    target_phys_addr_t page;
3305 6d16c2f8 aliguori
    unsigned long pd;
3306 6d16c2f8 aliguori
    PhysPageDesc *p;
3307 6d16c2f8 aliguori
    unsigned long addr1;
3308 6d16c2f8 aliguori
3309 6d16c2f8 aliguori
    while (len > 0) {
3310 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3311 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3312 6d16c2f8 aliguori
        if (l > len)
3313 6d16c2f8 aliguori
            l = len;
3314 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3315 6d16c2f8 aliguori
        if (!p) {
3316 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3317 6d16c2f8 aliguori
        } else {
3318 6d16c2f8 aliguori
            pd = p->phys_offset;
3319 6d16c2f8 aliguori
        }
3320 6d16c2f8 aliguori
3321 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3322 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3323 6d16c2f8 aliguori
                break;
3324 6d16c2f8 aliguori
            }
3325 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3326 6d16c2f8 aliguori
            bounce.addr = addr;
3327 6d16c2f8 aliguori
            bounce.len = l;
3328 6d16c2f8 aliguori
            if (!is_write) {
3329 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3330 6d16c2f8 aliguori
            }
3331 6d16c2f8 aliguori
            ptr = bounce.buffer;
3332 6d16c2f8 aliguori
        } else {
3333 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3334 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3335 6d16c2f8 aliguori
        }
3336 6d16c2f8 aliguori
        if (!done) {
3337 6d16c2f8 aliguori
            ret = ptr;
3338 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3339 6d16c2f8 aliguori
            break;
3340 6d16c2f8 aliguori
        }
3341 6d16c2f8 aliguori
3342 6d16c2f8 aliguori
        len -= l;
3343 6d16c2f8 aliguori
        addr += l;
3344 6d16c2f8 aliguori
        done += l;
3345 6d16c2f8 aliguori
    }
3346 6d16c2f8 aliguori
    *plen = done;
3347 6d16c2f8 aliguori
    return ret;
3348 6d16c2f8 aliguori
}
3349 6d16c2f8 aliguori
3350 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3351 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3352 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3353 6d16c2f8 aliguori
 */
3354 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3355 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3356 6d16c2f8 aliguori
{
3357 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3358 6d16c2f8 aliguori
        if (is_write) {
3359 5579c7f3 pbrook
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3360 6d16c2f8 aliguori
            while (access_len) {
3361 6d16c2f8 aliguori
                unsigned l;
3362 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3363 6d16c2f8 aliguori
                if (l > access_len)
3364 6d16c2f8 aliguori
                    l = access_len;
3365 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3366 6d16c2f8 aliguori
                    /* invalidate code */
3367 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3368 6d16c2f8 aliguori
                    /* set dirty bit */
3369 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3370 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3371 6d16c2f8 aliguori
                }
3372 6d16c2f8 aliguori
                addr1 += l;
3373 6d16c2f8 aliguori
                access_len -= l;
3374 6d16c2f8 aliguori
            }
3375 6d16c2f8 aliguori
        }
3376 6d16c2f8 aliguori
        return;
3377 6d16c2f8 aliguori
    }
3378 6d16c2f8 aliguori
    if (is_write) {
3379 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3380 6d16c2f8 aliguori
    }
3381 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3382 6d16c2f8 aliguori
    bounce.buffer = NULL;
3383 ba223c29 aliguori
    cpu_notify_map_clients();
3384 6d16c2f8 aliguori
}
3385 d0ecd2aa bellard
3386 8df1cd07 bellard
/* warning: addr must be aligned */
3387 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3388 8df1cd07 bellard
{
3389 8df1cd07 bellard
    int io_index;
3390 8df1cd07 bellard
    uint8_t *ptr;
3391 8df1cd07 bellard
    uint32_t val;
3392 8df1cd07 bellard
    unsigned long pd;
3393 8df1cd07 bellard
    PhysPageDesc *p;
3394 8df1cd07 bellard
3395 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3396 8df1cd07 bellard
    if (!p) {
3397 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3398 8df1cd07 bellard
    } else {
3399 8df1cd07 bellard
        pd = p->phys_offset;
3400 8df1cd07 bellard
    }
3401 3b46e624 ths
3402 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3403 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3404 8df1cd07 bellard
        /* I/O case */
3405 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3406 8da3ff18 pbrook
        if (p)
3407 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3408 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3409 8df1cd07 bellard
    } else {
3410 8df1cd07 bellard
        /* RAM case */
3411 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3412 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3413 8df1cd07 bellard
        val = ldl_p(ptr);
3414 8df1cd07 bellard
    }
3415 8df1cd07 bellard
    return val;
3416 8df1cd07 bellard
}
3417 8df1cd07 bellard
3418 84b7b8e7 bellard
/* warning: addr must be aligned */
3419 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3420 84b7b8e7 bellard
{
3421 84b7b8e7 bellard
    int io_index;
3422 84b7b8e7 bellard
    uint8_t *ptr;
3423 84b7b8e7 bellard
    uint64_t val;
3424 84b7b8e7 bellard
    unsigned long pd;
3425 84b7b8e7 bellard
    PhysPageDesc *p;
3426 84b7b8e7 bellard
3427 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428 84b7b8e7 bellard
    if (!p) {
3429 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3430 84b7b8e7 bellard
    } else {
3431 84b7b8e7 bellard
        pd = p->phys_offset;
3432 84b7b8e7 bellard
    }
3433 3b46e624 ths
3434 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3435 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3436 84b7b8e7 bellard
        /* I/O case */
3437 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3438 8da3ff18 pbrook
        if (p)
3439 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3440 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3441 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3442 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3443 84b7b8e7 bellard
#else
3444 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3445 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3446 84b7b8e7 bellard
#endif
3447 84b7b8e7 bellard
    } else {
3448 84b7b8e7 bellard
        /* RAM case */
3449 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3450 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3451 84b7b8e7 bellard
        val = ldq_p(ptr);
3452 84b7b8e7 bellard
    }
3453 84b7b8e7 bellard
    return val;
3454 84b7b8e7 bellard
}
3455 84b7b8e7 bellard
3456 aab33094 bellard
/* XXX: optimize */
3457 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3458 aab33094 bellard
{
3459 aab33094 bellard
    uint8_t val;
3460 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3461 aab33094 bellard
    return val;
3462 aab33094 bellard
}
3463 aab33094 bellard
3464 aab33094 bellard
/* XXX: optimize */
3465 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3466 aab33094 bellard
{
3467 aab33094 bellard
    uint16_t val;
3468 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3469 aab33094 bellard
    return tswap16(val);
3470 aab33094 bellard
}
3471 aab33094 bellard
3472 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3473 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3474 8df1cd07 bellard
   bits are used to track modified PTEs */
3475 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3476 8df1cd07 bellard
{
3477 8df1cd07 bellard
    int io_index;
3478 8df1cd07 bellard
    uint8_t *ptr;
3479 8df1cd07 bellard
    unsigned long pd;
3480 8df1cd07 bellard
    PhysPageDesc *p;
3481 8df1cd07 bellard
3482 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3483 8df1cd07 bellard
    if (!p) {
3484 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3485 8df1cd07 bellard
    } else {
3486 8df1cd07 bellard
        pd = p->phys_offset;
3487 8df1cd07 bellard
    }
3488 3b46e624 ths
3489 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3490 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3491 8da3ff18 pbrook
        if (p)
3492 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3493 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3494 8df1cd07 bellard
    } else {
3495 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3496 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3497 8df1cd07 bellard
        stl_p(ptr, val);
3498 74576198 aliguori
3499 74576198 aliguori
        if (unlikely(in_migration)) {
3500 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3501 74576198 aliguori
                /* invalidate code */
3502 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3503 74576198 aliguori
                /* set dirty bit */
3504 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3506 74576198 aliguori
            }
3507 74576198 aliguori
        }
3508 8df1cd07 bellard
    }
3509 8df1cd07 bellard
}
3510 8df1cd07 bellard
3511 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3512 bc98a7ef j_mayer
{
3513 bc98a7ef j_mayer
    int io_index;
3514 bc98a7ef j_mayer
    uint8_t *ptr;
3515 bc98a7ef j_mayer
    unsigned long pd;
3516 bc98a7ef j_mayer
    PhysPageDesc *p;
3517 bc98a7ef j_mayer
3518 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3519 bc98a7ef j_mayer
    if (!p) {
3520 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3521 bc98a7ef j_mayer
    } else {
3522 bc98a7ef j_mayer
        pd = p->phys_offset;
3523 bc98a7ef j_mayer
    }
3524 3b46e624 ths
3525 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3526 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527 8da3ff18 pbrook
        if (p)
3528 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3529 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3530 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3531 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3532 bc98a7ef j_mayer
#else
3533 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3534 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3535 bc98a7ef j_mayer
#endif
3536 bc98a7ef j_mayer
    } else {
3537 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3538 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3539 bc98a7ef j_mayer
        stq_p(ptr, val);
3540 bc98a7ef j_mayer
    }
3541 bc98a7ef j_mayer
}
3542 bc98a7ef j_mayer
3543 8df1cd07 bellard
/* warning: addr must be aligned */
3544 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3545 8df1cd07 bellard
{
3546 8df1cd07 bellard
    int io_index;
3547 8df1cd07 bellard
    uint8_t *ptr;
3548 8df1cd07 bellard
    unsigned long pd;
3549 8df1cd07 bellard
    PhysPageDesc *p;
3550 8df1cd07 bellard
3551 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3552 8df1cd07 bellard
    if (!p) {
3553 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3554 8df1cd07 bellard
    } else {
3555 8df1cd07 bellard
        pd = p->phys_offset;
3556 8df1cd07 bellard
    }
3557 3b46e624 ths
3558 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3559 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3560 8da3ff18 pbrook
        if (p)
3561 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3562 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3563 8df1cd07 bellard
    } else {
3564 8df1cd07 bellard
        unsigned long addr1;
3565 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3566 8df1cd07 bellard
        /* RAM case */
3567 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3568 8df1cd07 bellard
        stl_p(ptr, val);
3569 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3570 3a7d929e bellard
            /* invalidate code */
3571 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3572 3a7d929e bellard
            /* set dirty bit */
3573 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3574 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3575 3a7d929e bellard
        }
3576 8df1cd07 bellard
    }
3577 8df1cd07 bellard
}
3578 8df1cd07 bellard
3579 aab33094 bellard
/* XXX: optimize */
3580 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3581 aab33094 bellard
{
3582 aab33094 bellard
    uint8_t v = val;
3583 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3584 aab33094 bellard
}
3585 aab33094 bellard
3586 aab33094 bellard
/* XXX: optimize */
3587 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3588 aab33094 bellard
{
3589 aab33094 bellard
    uint16_t v = tswap16(val);
3590 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3591 aab33094 bellard
}
3592 aab33094 bellard
3593 aab33094 bellard
/* XXX: optimize */
3594 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3595 aab33094 bellard
{
3596 aab33094 bellard
    val = tswap64(val);
3597 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3598 aab33094 bellard
}
3599 aab33094 bellard
3600 13eb76e0 bellard
#endif
3601 13eb76e0 bellard
3602 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3603 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3604 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3605 13eb76e0 bellard
{
3606 13eb76e0 bellard
    int l;
3607 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3608 9b3c35e0 j_mayer
    target_ulong page;
3609 13eb76e0 bellard
3610 13eb76e0 bellard
    while (len > 0) {
3611 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3612 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3613 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3614 13eb76e0 bellard
        if (phys_addr == -1)
3615 13eb76e0 bellard
            return -1;
3616 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3617 13eb76e0 bellard
        if (l > len)
3618 13eb76e0 bellard
            l = len;
3619 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3620 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3621 5e2972fd aliguori
        if (is_write)
3622 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3623 5e2972fd aliguori
        else
3624 5e2972fd aliguori
#endif
3625 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3626 13eb76e0 bellard
        len -= l;
3627 13eb76e0 bellard
        buf += l;
3628 13eb76e0 bellard
        addr += l;
3629 13eb76e0 bellard
    }
3630 13eb76e0 bellard
    return 0;
3631 13eb76e0 bellard
}
3632 13eb76e0 bellard
3633 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3634 2e70f6ef pbrook
   must be at the end of the TB */
3635 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3636 2e70f6ef pbrook
{
3637 2e70f6ef pbrook
    TranslationBlock *tb;
3638 2e70f6ef pbrook
    uint32_t n, cflags;
3639 2e70f6ef pbrook
    target_ulong pc, cs_base;
3640 2e70f6ef pbrook
    uint64_t flags;
3641 2e70f6ef pbrook
3642 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3643 2e70f6ef pbrook
    if (!tb) {
3644 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3645 2e70f6ef pbrook
                  retaddr);
3646 2e70f6ef pbrook
    }
3647 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3648 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3649 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3650 bf20dc07 ths
       occurred.  */
3651 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3652 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3653 2e70f6ef pbrook
    n++;
3654 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3655 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3656 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3657 2e70f6ef pbrook
       branch.  */
3658 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3659 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3660 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3661 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3662 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3663 2e70f6ef pbrook
    }
3664 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3665 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3666 2e70f6ef pbrook
            && n > 1) {
3667 2e70f6ef pbrook
        env->pc -= 2;
3668 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3669 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3670 2e70f6ef pbrook
    }
3671 2e70f6ef pbrook
#endif
3672 2e70f6ef pbrook
    /* This should never happen.  */
3673 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3674 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3675 2e70f6ef pbrook
3676 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3677 2e70f6ef pbrook
    pc = tb->pc;
3678 2e70f6ef pbrook
    cs_base = tb->cs_base;
3679 2e70f6ef pbrook
    flags = tb->flags;
3680 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3681 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3682 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3683 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3684 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3685 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3686 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3687 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3688 2e70f6ef pbrook
       second new TB.  */
3689 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3690 2e70f6ef pbrook
}
3691 2e70f6ef pbrook
3692 e3db7226 bellard
void dump_exec_info(FILE *f,
3693 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3694 e3db7226 bellard
{
3695 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3696 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3697 e3db7226 bellard
    TranslationBlock *tb;
3698 3b46e624 ths
3699 e3db7226 bellard
    target_code_size = 0;
3700 e3db7226 bellard
    max_target_code_size = 0;
3701 e3db7226 bellard
    cross_page = 0;
3702 e3db7226 bellard
    direct_jmp_count = 0;
3703 e3db7226 bellard
    direct_jmp2_count = 0;
3704 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3705 e3db7226 bellard
        tb = &tbs[i];
3706 e3db7226 bellard
        target_code_size += tb->size;
3707 e3db7226 bellard
        if (tb->size > max_target_code_size)
3708 e3db7226 bellard
            max_target_code_size = tb->size;
3709 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3710 e3db7226 bellard
            cross_page++;
3711 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3712 e3db7226 bellard
            direct_jmp_count++;
3713 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3714 e3db7226 bellard
                direct_jmp2_count++;
3715 e3db7226 bellard
            }
3716 e3db7226 bellard
        }
3717 e3db7226 bellard
    }
3718 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3719 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3720 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3721 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3722 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3723 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3724 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3725 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3726 e3db7226 bellard
                max_target_code_size);
3727 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3728 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3729 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3730 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3731 5fafdf24 ths
            cross_page,
3732 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3733 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3734 5fafdf24 ths
                direct_jmp_count,
3735 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3736 e3db7226 bellard
                direct_jmp2_count,
3737 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3738 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3739 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3740 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3741 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3742 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3743 e3db7226 bellard
}
3744 e3db7226 bellard
3745 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3746 61382a50 bellard
3747 61382a50 bellard
#define MMUSUFFIX _cmmu
3748 61382a50 bellard
#define GETPC() NULL
3749 61382a50 bellard
#define env cpu_single_env
3750 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3751 61382a50 bellard
3752 61382a50 bellard
#define SHIFT 0
3753 61382a50 bellard
#include "softmmu_template.h"
3754 61382a50 bellard
3755 61382a50 bellard
#define SHIFT 1
3756 61382a50 bellard
#include "softmmu_template.h"
3757 61382a50 bellard
3758 61382a50 bellard
#define SHIFT 2
3759 61382a50 bellard
#include "softmmu_template.h"
3760 61382a50 bellard
3761 61382a50 bellard
#define SHIFT 3
3762 61382a50 bellard
#include "softmmu_template.h"
3763 61382a50 bellard
3764 61382a50 bellard
#undef env
3765 61382a50 bellard
3766 61382a50 bellard
#endif