Statistics
| Branch: | Revision:

root / exec.c @ dc828ca1

History | View | Annotate | Download (108.8 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 54936004 bellard
 * License along with this library; if not, write to the Free Software
18 fad6cb1a aurel32
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19 54936004 bellard
 */
20 67b915a5 bellard
#include "config.h"
21 d5a8f07c bellard
#ifdef _WIN32
22 d5a8f07c bellard
#include <windows.h>
23 d5a8f07c bellard
#else
24 a98d49b1 bellard
#include <sys/types.h>
25 d5a8f07c bellard
#include <sys/mman.h>
26 d5a8f07c bellard
#endif
27 54936004 bellard
#include <stdlib.h>
28 54936004 bellard
#include <stdio.h>
29 54936004 bellard
#include <stdarg.h>
30 54936004 bellard
#include <string.h>
31 54936004 bellard
#include <errno.h>
32 54936004 bellard
#include <unistd.h>
33 54936004 bellard
#include <inttypes.h>
34 54936004 bellard
35 6180a181 bellard
#include "cpu.h"
36 6180a181 bellard
#include "exec-all.h"
37 ca10f867 aurel32
#include "qemu-common.h"
38 b67d9a52 bellard
#include "tcg.h"
39 b3c7724c pbrook
#include "hw/hw.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 00f82b8a aurel32
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 00f82b8a aurel32
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81 108c49b8 bellard
#endif
82 108c49b8 bellard
83 bdaf78e0 blueswir1
static TranslationBlock *tbs;
84 26a5f13b bellard
int code_gen_max_blocks;
85 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 bdaf78e0 blueswir1
static int nb_tbs;
87 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
88 eb51d102 bellard
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 fd6ce8f6 bellard
90 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
91 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
93 d03d860b blueswir1
 section close to code segment. */
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
96 d03d860b blueswir1
    __attribute__((aligned (32)))
97 d03d860b blueswir1
#else
98 d03d860b blueswir1
#define code_gen_section                                \
99 d03d860b blueswir1
    __attribute__((aligned (32)))
100 d03d860b blueswir1
#endif
101 d03d860b blueswir1
102 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
103 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
105 26a5f13b bellard
/* threshold to flush the translated code buffer */
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
107 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
108 fd6ce8f6 bellard
109 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
110 00f82b8a aurel32
ram_addr_t phys_ram_size;
111 9fa3e853 bellard
int phys_ram_fd;
112 9fa3e853 bellard
uint8_t *phys_ram_base;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 e9a1ab19 bellard
static ram_addr_t phys_ram_alloc_offset = 0;
116 e2eef170 pbrook
#endif
117 9fa3e853 bellard
118 6a00d601 bellard
CPUState *first_cpu;
119 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
120 6a00d601 bellard
   cpu_exec() */
121 5fafdf24 ths
CPUState *cpu_single_env;
122 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
123 bf20dc07 ths
   1 = Precise instruction counting.
124 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
125 2e70f6ef pbrook
int use_icount = 0;
126 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
127 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
128 2e70f6ef pbrook
int64_t qemu_icount;
129 6a00d601 bellard
130 54936004 bellard
typedef struct PageDesc {
131 92e873b9 bellard
    /* list of TBs intersecting this ram page */
132 fd6ce8f6 bellard
    TranslationBlock *first_tb;
133 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
134 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
135 9fa3e853 bellard
    unsigned int code_write_count;
136 9fa3e853 bellard
    uint8_t *code_bitmap;
137 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
138 9fa3e853 bellard
    unsigned long flags;
139 9fa3e853 bellard
#endif
140 54936004 bellard
} PageDesc;
141 54936004 bellard
142 92e873b9 bellard
typedef struct PhysPageDesc {
143 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
144 00f82b8a aurel32
    ram_addr_t phys_offset;
145 8da3ff18 pbrook
    ram_addr_t region_offset;
146 92e873b9 bellard
} PhysPageDesc;
147 92e873b9 bellard
148 54936004 bellard
#define L2_BITS 10
149 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
151 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
152 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
153 bedb69ea j_mayer
 */
154 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155 bedb69ea j_mayer
#else
156 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157 bedb69ea j_mayer
#endif
158 54936004 bellard
159 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
160 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
161 54936004 bellard
162 83fb7adf bellard
unsigned long qemu_real_host_page_size;
163 83fb7adf bellard
unsigned long qemu_host_page_bits;
164 83fb7adf bellard
unsigned long qemu_host_page_size;
165 83fb7adf bellard
unsigned long qemu_host_page_mask;
166 54936004 bellard
167 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
168 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
169 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
170 54936004 bellard
171 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
172 e2eef170 pbrook
static void io_mem_init(void);
173 e2eef170 pbrook
174 33417e70 bellard
/* io memory support */
175 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
179 6658ffb8 pbrook
static int io_mem_watch;
180 6658ffb8 pbrook
#endif
181 33417e70 bellard
182 34865134 bellard
/* log support */
183 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
184 34865134 bellard
FILE *logfile;
185 34865134 bellard
int loglevel;
186 e735b91c pbrook
static int log_append = 0;
187 34865134 bellard
188 e3db7226 bellard
/* statistics */
189 e3db7226 bellard
static int tlb_flush_count;
190 e3db7226 bellard
static int tb_flush_count;
191 e3db7226 bellard
static int tb_phys_invalidate_count;
192 e3db7226 bellard
193 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 db7b5426 blueswir1
typedef struct subpage_t {
195 db7b5426 blueswir1
    target_phys_addr_t base;
196 3ee89922 blueswir1
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 3ee89922 blueswir1
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
199 8da3ff18 pbrook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
200 db7b5426 blueswir1
} subpage_t;
201 db7b5426 blueswir1
202 7cb69cae bellard
#ifdef _WIN32
203 7cb69cae bellard
static void map_exec(void *addr, long size)
204 7cb69cae bellard
{
205 7cb69cae bellard
    DWORD old_protect;
206 7cb69cae bellard
    VirtualProtect(addr, size,
207 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
208 7cb69cae bellard
    
209 7cb69cae bellard
}
210 7cb69cae bellard
#else
211 7cb69cae bellard
static void map_exec(void *addr, long size)
212 7cb69cae bellard
{
213 4369415f bellard
    unsigned long start, end, page_size;
214 7cb69cae bellard
    
215 4369415f bellard
    page_size = getpagesize();
216 7cb69cae bellard
    start = (unsigned long)addr;
217 4369415f bellard
    start &= ~(page_size - 1);
218 7cb69cae bellard
    
219 7cb69cae bellard
    end = (unsigned long)addr + size;
220 4369415f bellard
    end += page_size - 1;
221 4369415f bellard
    end &= ~(page_size - 1);
222 7cb69cae bellard
    
223 7cb69cae bellard
    mprotect((void *)start, end - start,
224 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
225 7cb69cae bellard
}
226 7cb69cae bellard
#endif
227 7cb69cae bellard
228 b346ff46 bellard
static void page_init(void)
229 54936004 bellard
{
230 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
231 54936004 bellard
       TARGET_PAGE_SIZE */
232 c2b48b69 aliguori
#ifdef _WIN32
233 c2b48b69 aliguori
    {
234 c2b48b69 aliguori
        SYSTEM_INFO system_info;
235 c2b48b69 aliguori
236 c2b48b69 aliguori
        GetSystemInfo(&system_info);
237 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
238 c2b48b69 aliguori
    }
239 c2b48b69 aliguori
#else
240 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
241 c2b48b69 aliguori
#endif
242 83fb7adf bellard
    if (qemu_host_page_size == 0)
243 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
244 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
246 83fb7adf bellard
    qemu_host_page_bits = 0;
247 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 83fb7adf bellard
        qemu_host_page_bits++;
249 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252 50a9569b balrog
253 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 50a9569b balrog
    {
255 50a9569b balrog
        long long startaddr, endaddr;
256 50a9569b balrog
        FILE *f;
257 50a9569b balrog
        int n;
258 50a9569b balrog
259 c8a706fe pbrook
        mmap_lock();
260 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
261 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
262 50a9569b balrog
        if (f) {
263 50a9569b balrog
            do {
264 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 50a9569b balrog
                if (n == 2) {
266 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
267 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
269 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
272 50a9569b balrog
                                   PAGE_RESERVED); 
273 50a9569b balrog
                }
274 50a9569b balrog
            } while (!feof(f));
275 50a9569b balrog
            fclose(f);
276 50a9569b balrog
        }
277 c8a706fe pbrook
        mmap_unlock();
278 50a9569b balrog
    }
279 50a9569b balrog
#endif
280 54936004 bellard
}
281 54936004 bellard
282 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
283 54936004 bellard
{
284 17e2377a pbrook
#if TARGET_LONG_BITS > 32
285 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
286 17e2377a pbrook
       excluded high addresses.  */
287 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288 17e2377a pbrook
        return NULL;
289 17e2377a pbrook
#endif
290 434929bf aliguori
    return &l1_map[index >> L2_BITS];
291 434929bf aliguori
}
292 434929bf aliguori
293 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
294 434929bf aliguori
{
295 434929bf aliguori
    PageDesc **lp, *p;
296 434929bf aliguori
    lp = page_l1_map(index);
297 434929bf aliguori
    if (!lp)
298 434929bf aliguori
        return NULL;
299 434929bf aliguori
300 54936004 bellard
    p = *lp;
301 54936004 bellard
    if (!p) {
302 54936004 bellard
        /* allocate if not found */
303 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
304 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
305 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
306 17e2377a pbrook
        p = mmap(0, len, PROT_READ | PROT_WRITE,
307 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
308 54936004 bellard
        *lp = p;
309 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
310 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
311 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
312 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
313 17e2377a pbrook
                           PAGE_RESERVED); 
314 17e2377a pbrook
        }
315 17e2377a pbrook
#else
316 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317 17e2377a pbrook
        *lp = p;
318 17e2377a pbrook
#endif
319 54936004 bellard
    }
320 54936004 bellard
    return p + (index & (L2_SIZE - 1));
321 54936004 bellard
}
322 54936004 bellard
323 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
324 54936004 bellard
{
325 434929bf aliguori
    PageDesc **lp, *p;
326 434929bf aliguori
    lp = page_l1_map(index);
327 434929bf aliguori
    if (!lp)
328 434929bf aliguori
        return NULL;
329 54936004 bellard
330 434929bf aliguori
    p = *lp;
331 54936004 bellard
    if (!p)
332 54936004 bellard
        return 0;
333 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
334 fd6ce8f6 bellard
}
335 fd6ce8f6 bellard
336 108c49b8 bellard
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
337 92e873b9 bellard
{
338 108c49b8 bellard
    void **lp, **p;
339 e3f4e2a4 pbrook
    PhysPageDesc *pd;
340 92e873b9 bellard
341 108c49b8 bellard
    p = (void **)l1_phys_map;
342 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
343 108c49b8 bellard
344 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346 108c49b8 bellard
#endif
347 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
348 92e873b9 bellard
    p = *lp;
349 92e873b9 bellard
    if (!p) {
350 92e873b9 bellard
        /* allocate if not found */
351 108c49b8 bellard
        if (!alloc)
352 108c49b8 bellard
            return NULL;
353 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
355 108c49b8 bellard
        *lp = p;
356 108c49b8 bellard
    }
357 108c49b8 bellard
#endif
358 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
359 e3f4e2a4 pbrook
    pd = *lp;
360 e3f4e2a4 pbrook
    if (!pd) {
361 e3f4e2a4 pbrook
        int i;
362 108c49b8 bellard
        /* allocate if not found */
363 108c49b8 bellard
        if (!alloc)
364 108c49b8 bellard
            return NULL;
365 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366 e3f4e2a4 pbrook
        *lp = pd;
367 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
368 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
369 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370 67c4d23c pbrook
        }
371 92e873b9 bellard
    }
372 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
373 92e873b9 bellard
}
374 92e873b9 bellard
375 108c49b8 bellard
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
376 92e873b9 bellard
{
377 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
378 92e873b9 bellard
}
379 92e873b9 bellard
380 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
381 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr);
382 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
383 3a7d929e bellard
                                    target_ulong vaddr);
384 c8a706fe pbrook
#define mmap_lock() do { } while(0)
385 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
386 9fa3e853 bellard
#endif
387 fd6ce8f6 bellard
388 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389 4369415f bellard
390 4369415f bellard
#if defined(CONFIG_USER_ONLY)
391 4369415f bellard
/* Currently it is not recommanded to allocate big chunks of data in
392 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
393 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
394 4369415f bellard
#endif
395 4369415f bellard
396 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
397 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398 4369415f bellard
#endif
399 4369415f bellard
400 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
401 26a5f13b bellard
{
402 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
403 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
404 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
406 4369415f bellard
#else
407 26a5f13b bellard
    code_gen_buffer_size = tb_size;
408 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
409 4369415f bellard
#if defined(CONFIG_USER_ONLY)
410 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
411 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412 4369415f bellard
#else
413 26a5f13b bellard
        /* XXX: needs ajustments */
414 174a9a1f aliguori
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
415 4369415f bellard
#endif
416 26a5f13b bellard
    }
417 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
420 26a5f13b bellard
       the host cpu and OS */
421 26a5f13b bellard
#if defined(__linux__) 
422 26a5f13b bellard
    {
423 26a5f13b bellard
        int flags;
424 141ac468 blueswir1
        void *start = NULL;
425 141ac468 blueswir1
426 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
427 26a5f13b bellard
#if defined(__x86_64__)
428 26a5f13b bellard
        flags |= MAP_32BIT;
429 26a5f13b bellard
        /* Cannot map more than that */
430 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
431 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
432 141ac468 blueswir1
#elif defined(__sparc_v9__)
433 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
434 141ac468 blueswir1
        flags |= MAP_FIXED;
435 141ac468 blueswir1
        start = (void *) 0x60000000UL;
436 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
437 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
438 1cb0661e balrog
#elif defined(__arm__)
439 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
440 1cb0661e balrog
        flags |= MAP_FIXED;
441 1cb0661e balrog
        start = (void *) 0x01000000UL;
442 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
443 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
444 26a5f13b bellard
#endif
445 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
446 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
447 26a5f13b bellard
                               flags, -1, 0);
448 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
449 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450 26a5f13b bellard
            exit(1);
451 26a5f13b bellard
        }
452 26a5f13b bellard
    }
453 c5e97233 blueswir1
#elif defined(__FreeBSD__) || defined(__DragonFly__)
454 06e67a82 aliguori
    {
455 06e67a82 aliguori
        int flags;
456 06e67a82 aliguori
        void *addr = NULL;
457 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
458 06e67a82 aliguori
#if defined(__x86_64__)
459 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460 06e67a82 aliguori
         * 0x40000000 is free */
461 06e67a82 aliguori
        flags |= MAP_FIXED;
462 06e67a82 aliguori
        addr = (void *)0x40000000;
463 06e67a82 aliguori
        /* Cannot map more than that */
464 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
465 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
466 06e67a82 aliguori
#endif
467 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
468 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
469 06e67a82 aliguori
                               flags, -1, 0);
470 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
471 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 06e67a82 aliguori
            exit(1);
473 06e67a82 aliguori
        }
474 06e67a82 aliguori
    }
475 26a5f13b bellard
#else
476 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
478 26a5f13b bellard
#endif
479 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
480 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
482 26a5f13b bellard
        code_gen_max_block_size();
483 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485 26a5f13b bellard
}
486 26a5f13b bellard
487 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
488 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
489 26a5f13b bellard
   size. */
490 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
491 26a5f13b bellard
{
492 26a5f13b bellard
    cpu_gen_init();
493 26a5f13b bellard
    code_gen_alloc(tb_size);
494 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
495 4369415f bellard
    page_init();
496 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
497 26a5f13b bellard
    io_mem_init();
498 e2eef170 pbrook
#endif
499 26a5f13b bellard
}
500 26a5f13b bellard
501 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 9656f324 pbrook
503 9656f324 pbrook
#define CPU_COMMON_SAVE_VERSION 1
504 9656f324 pbrook
505 9656f324 pbrook
static void cpu_common_save(QEMUFile *f, void *opaque)
506 9656f324 pbrook
{
507 9656f324 pbrook
    CPUState *env = opaque;
508 9656f324 pbrook
509 9656f324 pbrook
    qemu_put_be32s(f, &env->halted);
510 9656f324 pbrook
    qemu_put_be32s(f, &env->interrupt_request);
511 9656f324 pbrook
}
512 9656f324 pbrook
513 9656f324 pbrook
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514 9656f324 pbrook
{
515 9656f324 pbrook
    CPUState *env = opaque;
516 9656f324 pbrook
517 9656f324 pbrook
    if (version_id != CPU_COMMON_SAVE_VERSION)
518 9656f324 pbrook
        return -EINVAL;
519 9656f324 pbrook
520 9656f324 pbrook
    qemu_get_be32s(f, &env->halted);
521 75f482ae pbrook
    qemu_get_be32s(f, &env->interrupt_request);
522 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523 3098dba0 aurel32
       version_id is increased. */
524 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
525 9656f324 pbrook
    tlb_flush(env, 1);
526 9656f324 pbrook
527 9656f324 pbrook
    return 0;
528 9656f324 pbrook
}
529 9656f324 pbrook
#endif
530 9656f324 pbrook
531 6a00d601 bellard
void cpu_exec_init(CPUState *env)
532 fd6ce8f6 bellard
{
533 6a00d601 bellard
    CPUState **penv;
534 6a00d601 bellard
    int cpu_index;
535 6a00d601 bellard
536 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
537 c2764719 pbrook
    cpu_list_lock();
538 c2764719 pbrook
#endif
539 6a00d601 bellard
    env->next_cpu = NULL;
540 6a00d601 bellard
    penv = &first_cpu;
541 6a00d601 bellard
    cpu_index = 0;
542 6a00d601 bellard
    while (*penv != NULL) {
543 6a00d601 bellard
        penv = (CPUState **)&(*penv)->next_cpu;
544 6a00d601 bellard
        cpu_index++;
545 6a00d601 bellard
    }
546 6a00d601 bellard
    env->cpu_index = cpu_index;
547 c0ce998e aliguori
    TAILQ_INIT(&env->breakpoints);
548 c0ce998e aliguori
    TAILQ_INIT(&env->watchpoints);
549 6a00d601 bellard
    *penv = env;
550 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
551 c2764719 pbrook
    cpu_list_unlock();
552 c2764719 pbrook
#endif
553 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554 9656f324 pbrook
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555 9656f324 pbrook
                    cpu_common_save, cpu_common_load, env);
556 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557 b3c7724c pbrook
                    cpu_save, cpu_load, env);
558 b3c7724c pbrook
#endif
559 fd6ce8f6 bellard
}
560 fd6ce8f6 bellard
561 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
562 9fa3e853 bellard
{
563 9fa3e853 bellard
    if (p->code_bitmap) {
564 59817ccb bellard
        qemu_free(p->code_bitmap);
565 9fa3e853 bellard
        p->code_bitmap = NULL;
566 9fa3e853 bellard
    }
567 9fa3e853 bellard
    p->code_write_count = 0;
568 9fa3e853 bellard
}
569 9fa3e853 bellard
570 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
571 fd6ce8f6 bellard
static void page_flush_tb(void)
572 fd6ce8f6 bellard
{
573 fd6ce8f6 bellard
    int i, j;
574 fd6ce8f6 bellard
    PageDesc *p;
575 fd6ce8f6 bellard
576 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
577 fd6ce8f6 bellard
        p = l1_map[i];
578 fd6ce8f6 bellard
        if (p) {
579 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
580 9fa3e853 bellard
                p->first_tb = NULL;
581 9fa3e853 bellard
                invalidate_page_bitmap(p);
582 9fa3e853 bellard
                p++;
583 9fa3e853 bellard
            }
584 fd6ce8f6 bellard
        }
585 fd6ce8f6 bellard
    }
586 fd6ce8f6 bellard
}
587 fd6ce8f6 bellard
588 fd6ce8f6 bellard
/* flush all the translation blocks */
589 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
590 6a00d601 bellard
void tb_flush(CPUState *env1)
591 fd6ce8f6 bellard
{
592 6a00d601 bellard
    CPUState *env;
593 0124311e bellard
#if defined(DEBUG_FLUSH)
594 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
596 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
597 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
598 fd6ce8f6 bellard
#endif
599 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
601 a208e54a pbrook
602 fd6ce8f6 bellard
    nb_tbs = 0;
603 3b46e624 ths
604 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
605 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 6a00d601 bellard
    }
607 9fa3e853 bellard
608 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
609 fd6ce8f6 bellard
    page_flush_tb();
610 9fa3e853 bellard
611 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
612 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
613 d4e8164f bellard
       expensive */
614 e3db7226 bellard
    tb_flush_count++;
615 fd6ce8f6 bellard
}
616 fd6ce8f6 bellard
617 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
618 fd6ce8f6 bellard
619 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
620 fd6ce8f6 bellard
{
621 fd6ce8f6 bellard
    TranslationBlock *tb;
622 fd6ce8f6 bellard
    int i;
623 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
624 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
628 fd6ce8f6 bellard
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
630 fd6ce8f6 bellard
            }
631 fd6ce8f6 bellard
        }
632 fd6ce8f6 bellard
    }
633 fd6ce8f6 bellard
}
634 fd6ce8f6 bellard
635 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
636 fd6ce8f6 bellard
static void tb_page_check(void)
637 fd6ce8f6 bellard
{
638 fd6ce8f6 bellard
    TranslationBlock *tb;
639 fd6ce8f6 bellard
    int i, flags1, flags2;
640 3b46e624 ths
641 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
644 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
645 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
648 fd6ce8f6 bellard
            }
649 fd6ce8f6 bellard
        }
650 fd6ce8f6 bellard
    }
651 fd6ce8f6 bellard
}
652 fd6ce8f6 bellard
653 bdaf78e0 blueswir1
static void tb_jmp_check(TranslationBlock *tb)
654 d4e8164f bellard
{
655 d4e8164f bellard
    TranslationBlock *tb1;
656 d4e8164f bellard
    unsigned int n1;
657 d4e8164f bellard
658 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
659 d4e8164f bellard
    tb1 = tb->jmp_first;
660 d4e8164f bellard
    for(;;) {
661 d4e8164f bellard
        n1 = (long)tb1 & 3;
662 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
663 d4e8164f bellard
        if (n1 == 2)
664 d4e8164f bellard
            break;
665 d4e8164f bellard
        tb1 = tb1->jmp_next[n1];
666 d4e8164f bellard
    }
667 d4e8164f bellard
    /* check end of list */
668 d4e8164f bellard
    if (tb1 != tb) {
669 d4e8164f bellard
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670 d4e8164f bellard
    }
671 d4e8164f bellard
}
672 d4e8164f bellard
673 fd6ce8f6 bellard
#endif
674 fd6ce8f6 bellard
675 fd6ce8f6 bellard
/* invalidate one TB */
676 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677 fd6ce8f6 bellard
                             int next_offset)
678 fd6ce8f6 bellard
{
679 fd6ce8f6 bellard
    TranslationBlock *tb1;
680 fd6ce8f6 bellard
    for(;;) {
681 fd6ce8f6 bellard
        tb1 = *ptb;
682 fd6ce8f6 bellard
        if (tb1 == tb) {
683 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684 fd6ce8f6 bellard
            break;
685 fd6ce8f6 bellard
        }
686 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687 fd6ce8f6 bellard
    }
688 fd6ce8f6 bellard
}
689 fd6ce8f6 bellard
690 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691 9fa3e853 bellard
{
692 9fa3e853 bellard
    TranslationBlock *tb1;
693 9fa3e853 bellard
    unsigned int n1;
694 9fa3e853 bellard
695 9fa3e853 bellard
    for(;;) {
696 9fa3e853 bellard
        tb1 = *ptb;
697 9fa3e853 bellard
        n1 = (long)tb1 & 3;
698 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
699 9fa3e853 bellard
        if (tb1 == tb) {
700 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
701 9fa3e853 bellard
            break;
702 9fa3e853 bellard
        }
703 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
704 9fa3e853 bellard
    }
705 9fa3e853 bellard
}
706 9fa3e853 bellard
707 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708 d4e8164f bellard
{
709 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
710 d4e8164f bellard
    unsigned int n1;
711 d4e8164f bellard
712 d4e8164f bellard
    ptb = &tb->jmp_next[n];
713 d4e8164f bellard
    tb1 = *ptb;
714 d4e8164f bellard
    if (tb1) {
715 d4e8164f bellard
        /* find tb(n) in circular list */
716 d4e8164f bellard
        for(;;) {
717 d4e8164f bellard
            tb1 = *ptb;
718 d4e8164f bellard
            n1 = (long)tb1 & 3;
719 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 d4e8164f bellard
            if (n1 == n && tb1 == tb)
721 d4e8164f bellard
                break;
722 d4e8164f bellard
            if (n1 == 2) {
723 d4e8164f bellard
                ptb = &tb1->jmp_first;
724 d4e8164f bellard
            } else {
725 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
726 d4e8164f bellard
            }
727 d4e8164f bellard
        }
728 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
729 d4e8164f bellard
        *ptb = tb->jmp_next[n];
730 d4e8164f bellard
731 d4e8164f bellard
        tb->jmp_next[n] = NULL;
732 d4e8164f bellard
    }
733 d4e8164f bellard
}
734 d4e8164f bellard
735 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
736 d4e8164f bellard
   another TB */
737 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
738 d4e8164f bellard
{
739 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740 d4e8164f bellard
}
741 d4e8164f bellard
742 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743 fd6ce8f6 bellard
{
744 6a00d601 bellard
    CPUState *env;
745 8a40a180 bellard
    PageDesc *p;
746 d4e8164f bellard
    unsigned int h, n1;
747 00f82b8a aurel32
    target_phys_addr_t phys_pc;
748 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
749 3b46e624 ths
750 8a40a180 bellard
    /* remove the TB from the hash list */
751 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
753 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
754 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
755 8a40a180 bellard
756 8a40a180 bellard
    /* remove the TB from the page list */
757 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
758 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
760 8a40a180 bellard
        invalidate_page_bitmap(p);
761 8a40a180 bellard
    }
762 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
765 8a40a180 bellard
        invalidate_page_bitmap(p);
766 8a40a180 bellard
    }
767 8a40a180 bellard
768 36bdbe54 bellard
    tb_invalidated_flag = 1;
769 59817ccb bellard
770 fd6ce8f6 bellard
    /* remove the TB from the hash list */
771 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
772 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
773 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
774 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
775 6a00d601 bellard
    }
776 d4e8164f bellard
777 d4e8164f bellard
    /* suppress this TB from the two jump lists */
778 d4e8164f bellard
    tb_jmp_remove(tb, 0);
779 d4e8164f bellard
    tb_jmp_remove(tb, 1);
780 d4e8164f bellard
781 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
782 d4e8164f bellard
    tb1 = tb->jmp_first;
783 d4e8164f bellard
    for(;;) {
784 d4e8164f bellard
        n1 = (long)tb1 & 3;
785 d4e8164f bellard
        if (n1 == 2)
786 d4e8164f bellard
            break;
787 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
789 d4e8164f bellard
        tb_reset_jump(tb1, n1);
790 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
791 d4e8164f bellard
        tb1 = tb2;
792 d4e8164f bellard
    }
793 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
794 9fa3e853 bellard
795 e3db7226 bellard
    tb_phys_invalidate_count++;
796 9fa3e853 bellard
}
797 9fa3e853 bellard
798 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
799 9fa3e853 bellard
{
800 9fa3e853 bellard
    int end, mask, end1;
801 9fa3e853 bellard
802 9fa3e853 bellard
    end = start + len;
803 9fa3e853 bellard
    tab += start >> 3;
804 9fa3e853 bellard
    mask = 0xff << (start & 7);
805 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
806 9fa3e853 bellard
        if (start < end) {
807 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
808 9fa3e853 bellard
            *tab |= mask;
809 9fa3e853 bellard
        }
810 9fa3e853 bellard
    } else {
811 9fa3e853 bellard
        *tab++ |= mask;
812 9fa3e853 bellard
        start = (start + 8) & ~7;
813 9fa3e853 bellard
        end1 = end & ~7;
814 9fa3e853 bellard
        while (start < end1) {
815 9fa3e853 bellard
            *tab++ = 0xff;
816 9fa3e853 bellard
            start += 8;
817 9fa3e853 bellard
        }
818 9fa3e853 bellard
        if (start < end) {
819 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
820 9fa3e853 bellard
            *tab |= mask;
821 9fa3e853 bellard
        }
822 9fa3e853 bellard
    }
823 9fa3e853 bellard
}
824 9fa3e853 bellard
825 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
826 9fa3e853 bellard
{
827 9fa3e853 bellard
    int n, tb_start, tb_end;
828 9fa3e853 bellard
    TranslationBlock *tb;
829 3b46e624 ths
830 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
831 9fa3e853 bellard
832 9fa3e853 bellard
    tb = p->first_tb;
833 9fa3e853 bellard
    while (tb != NULL) {
834 9fa3e853 bellard
        n = (long)tb & 3;
835 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
836 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
837 9fa3e853 bellard
        if (n == 0) {
838 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
839 9fa3e853 bellard
               it is not a problem */
840 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 9fa3e853 bellard
            tb_end = tb_start + tb->size;
842 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
843 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
844 9fa3e853 bellard
        } else {
845 9fa3e853 bellard
            tb_start = 0;
846 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 9fa3e853 bellard
        }
848 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 9fa3e853 bellard
        tb = tb->page_next[n];
850 9fa3e853 bellard
    }
851 9fa3e853 bellard
}
852 9fa3e853 bellard
853 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
854 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
855 2e70f6ef pbrook
                              int flags, int cflags)
856 d720b93d bellard
{
857 d720b93d bellard
    TranslationBlock *tb;
858 d720b93d bellard
    uint8_t *tc_ptr;
859 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
860 d720b93d bellard
    int code_gen_size;
861 d720b93d bellard
862 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
863 c27004ec bellard
    tb = tb_alloc(pc);
864 d720b93d bellard
    if (!tb) {
865 d720b93d bellard
        /* flush must be done */
866 d720b93d bellard
        tb_flush(env);
867 d720b93d bellard
        /* cannot fail at this point */
868 c27004ec bellard
        tb = tb_alloc(pc);
869 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
870 2e70f6ef pbrook
        tb_invalidated_flag = 1;
871 d720b93d bellard
    }
872 d720b93d bellard
    tc_ptr = code_gen_ptr;
873 d720b93d bellard
    tb->tc_ptr = tc_ptr;
874 d720b93d bellard
    tb->cs_base = cs_base;
875 d720b93d bellard
    tb->flags = flags;
876 d720b93d bellard
    tb->cflags = cflags;
877 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
878 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
879 3b46e624 ths
880 d720b93d bellard
    /* check next page if needed */
881 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882 d720b93d bellard
    phys_page2 = -1;
883 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
885 d720b93d bellard
    }
886 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
887 2e70f6ef pbrook
    return tb;
888 d720b93d bellard
}
889 3b46e624 ths
890 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
891 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
892 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
893 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
894 d720b93d bellard
   TB if code is modified inside this TB. */
895 00f82b8a aurel32
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 d720b93d bellard
                                   int is_cpu_write_access)
897 d720b93d bellard
{
898 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
899 d720b93d bellard
    CPUState *env = cpu_single_env;
900 9fa3e853 bellard
    target_ulong tb_start, tb_end;
901 6b917547 aliguori
    PageDesc *p;
902 6b917547 aliguori
    int n;
903 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
904 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
905 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
906 6b917547 aliguori
    int current_tb_modified = 0;
907 6b917547 aliguori
    target_ulong current_pc = 0;
908 6b917547 aliguori
    target_ulong current_cs_base = 0;
909 6b917547 aliguori
    int current_flags = 0;
910 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
911 9fa3e853 bellard
912 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
913 5fafdf24 ths
    if (!p)
914 9fa3e853 bellard
        return;
915 5fafdf24 ths
    if (!p->code_bitmap &&
916 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 d720b93d bellard
        is_cpu_write_access) {
918 9fa3e853 bellard
        /* build code bitmap */
919 9fa3e853 bellard
        build_page_bitmap(p);
920 9fa3e853 bellard
    }
921 9fa3e853 bellard
922 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
923 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
924 9fa3e853 bellard
    tb = p->first_tb;
925 9fa3e853 bellard
    while (tb != NULL) {
926 9fa3e853 bellard
        n = (long)tb & 3;
927 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
928 9fa3e853 bellard
        tb_next = tb->page_next[n];
929 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
930 9fa3e853 bellard
        if (n == 0) {
931 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
932 9fa3e853 bellard
               it is not a problem */
933 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 9fa3e853 bellard
            tb_end = tb_start + tb->size;
935 9fa3e853 bellard
        } else {
936 9fa3e853 bellard
            tb_start = tb->page_addr[1];
937 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 9fa3e853 bellard
        }
939 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
940 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
941 d720b93d bellard
            if (current_tb_not_found) {
942 d720b93d bellard
                current_tb_not_found = 0;
943 d720b93d bellard
                current_tb = NULL;
944 2e70f6ef pbrook
                if (env->mem_io_pc) {
945 d720b93d bellard
                    /* now we have a real cpu fault */
946 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
947 d720b93d bellard
                }
948 d720b93d bellard
            }
949 d720b93d bellard
            if (current_tb == tb &&
950 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 d720b93d bellard
                /* If we are modifying the current TB, we must stop
952 d720b93d bellard
                its execution. We could be more precise by checking
953 d720b93d bellard
                that the modification is after the current PC, but it
954 d720b93d bellard
                would require a specialized function to partially
955 d720b93d bellard
                restore the CPU state */
956 3b46e624 ths
957 d720b93d bellard
                current_tb_modified = 1;
958 5fafdf24 ths
                cpu_restore_state(current_tb, env,
959 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
960 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 6b917547 aliguori
                                     &current_flags);
962 d720b93d bellard
            }
963 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
964 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
965 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
966 6f5a9f7e bellard
            saved_tb = NULL;
967 6f5a9f7e bellard
            if (env) {
968 6f5a9f7e bellard
                saved_tb = env->current_tb;
969 6f5a9f7e bellard
                env->current_tb = NULL;
970 6f5a9f7e bellard
            }
971 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
972 6f5a9f7e bellard
            if (env) {
973 6f5a9f7e bellard
                env->current_tb = saved_tb;
974 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
975 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
976 6f5a9f7e bellard
            }
977 9fa3e853 bellard
        }
978 9fa3e853 bellard
        tb = tb_next;
979 9fa3e853 bellard
    }
980 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
981 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
982 9fa3e853 bellard
    if (!p->first_tb) {
983 9fa3e853 bellard
        invalidate_page_bitmap(p);
984 d720b93d bellard
        if (is_cpu_write_access) {
985 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986 d720b93d bellard
        }
987 d720b93d bellard
    }
988 d720b93d bellard
#endif
989 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
990 d720b93d bellard
    if (current_tb_modified) {
991 d720b93d bellard
        /* we generate a block containing just the instruction
992 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
993 d720b93d bellard
           itself */
994 ea1c1802 bellard
        env->current_tb = NULL;
995 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
997 9fa3e853 bellard
    }
998 fd6ce8f6 bellard
#endif
999 9fa3e853 bellard
}
1000 fd6ce8f6 bellard
1001 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1002 00f82b8a aurel32
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1003 9fa3e853 bellard
{
1004 9fa3e853 bellard
    PageDesc *p;
1005 9fa3e853 bellard
    int offset, b;
1006 59817ccb bellard
#if 0
1007 a4193c8a bellard
    if (1) {
1008 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1010 93fcfe39 aliguori
                  cpu_single_env->eip,
1011 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012 59817ccb bellard
    }
1013 59817ccb bellard
#endif
1014 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1015 5fafdf24 ths
    if (!p)
1016 9fa3e853 bellard
        return;
1017 9fa3e853 bellard
    if (p->code_bitmap) {
1018 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1019 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1021 9fa3e853 bellard
            goto do_invalidate;
1022 9fa3e853 bellard
    } else {
1023 9fa3e853 bellard
    do_invalidate:
1024 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1025 9fa3e853 bellard
    }
1026 9fa3e853 bellard
}
1027 9fa3e853 bellard
1028 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1029 00f82b8a aurel32
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 d720b93d bellard
                                    unsigned long pc, void *puc)
1031 9fa3e853 bellard
{
1032 6b917547 aliguori
    TranslationBlock *tb;
1033 9fa3e853 bellard
    PageDesc *p;
1034 6b917547 aliguori
    int n;
1035 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1036 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1037 d720b93d bellard
    CPUState *env = cpu_single_env;
1038 6b917547 aliguori
    int current_tb_modified = 0;
1039 6b917547 aliguori
    target_ulong current_pc = 0;
1040 6b917547 aliguori
    target_ulong current_cs_base = 0;
1041 6b917547 aliguori
    int current_flags = 0;
1042 d720b93d bellard
#endif
1043 9fa3e853 bellard
1044 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1045 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1046 5fafdf24 ths
    if (!p)
1047 9fa3e853 bellard
        return;
1048 9fa3e853 bellard
    tb = p->first_tb;
1049 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1050 d720b93d bellard
    if (tb && pc != 0) {
1051 d720b93d bellard
        current_tb = tb_find_pc(pc);
1052 d720b93d bellard
    }
1053 d720b93d bellard
#endif
1054 9fa3e853 bellard
    while (tb != NULL) {
1055 9fa3e853 bellard
        n = (long)tb & 3;
1056 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1057 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1058 d720b93d bellard
        if (current_tb == tb &&
1059 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1061 d720b93d bellard
                   its execution. We could be more precise by checking
1062 d720b93d bellard
                   that the modification is after the current PC, but it
1063 d720b93d bellard
                   would require a specialized function to partially
1064 d720b93d bellard
                   restore the CPU state */
1065 3b46e624 ths
1066 d720b93d bellard
            current_tb_modified = 1;
1067 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1068 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 6b917547 aliguori
                                 &current_flags);
1070 d720b93d bellard
        }
1071 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1072 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1073 9fa3e853 bellard
        tb = tb->page_next[n];
1074 9fa3e853 bellard
    }
1075 fd6ce8f6 bellard
    p->first_tb = NULL;
1076 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1077 d720b93d bellard
    if (current_tb_modified) {
1078 d720b93d bellard
        /* we generate a block containing just the instruction
1079 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1080 d720b93d bellard
           itself */
1081 ea1c1802 bellard
        env->current_tb = NULL;
1082 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1084 d720b93d bellard
    }
1085 d720b93d bellard
#endif
1086 fd6ce8f6 bellard
}
1087 9fa3e853 bellard
#endif
1088 fd6ce8f6 bellard
1089 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1090 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1091 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1092 fd6ce8f6 bellard
{
1093 fd6ce8f6 bellard
    PageDesc *p;
1094 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1095 9fa3e853 bellard
1096 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1097 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1099 9fa3e853 bellard
    last_first_tb = p->first_tb;
1100 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1101 9fa3e853 bellard
    invalidate_page_bitmap(p);
1102 fd6ce8f6 bellard
1103 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1104 d720b93d bellard
1105 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1106 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1107 53a5960a pbrook
        target_ulong addr;
1108 53a5960a pbrook
        PageDesc *p2;
1109 9fa3e853 bellard
        int prot;
1110 9fa3e853 bellard
1111 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1112 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1113 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1114 fd6ce8f6 bellard
        prot = 0;
1115 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1117 53a5960a pbrook
1118 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 53a5960a pbrook
            if (!p2)
1120 53a5960a pbrook
                continue;
1121 53a5960a pbrook
            prot |= p2->flags;
1122 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1123 53a5960a pbrook
            page_get_flags(addr);
1124 53a5960a pbrook
          }
1125 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1126 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1128 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129 53a5960a pbrook
               page_addr);
1130 fd6ce8f6 bellard
#endif
1131 fd6ce8f6 bellard
    }
1132 9fa3e853 bellard
#else
1133 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1134 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1135 9fa3e853 bellard
       allocated in a physical page */
1136 9fa3e853 bellard
    if (!last_first_tb) {
1137 6a00d601 bellard
        tlb_protect_code(page_addr);
1138 9fa3e853 bellard
    }
1139 9fa3e853 bellard
#endif
1140 d720b93d bellard
1141 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1142 fd6ce8f6 bellard
}
1143 fd6ce8f6 bellard
1144 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1145 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1146 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1147 fd6ce8f6 bellard
{
1148 fd6ce8f6 bellard
    TranslationBlock *tb;
1149 fd6ce8f6 bellard
1150 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1151 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152 d4e8164f bellard
        return NULL;
1153 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1154 fd6ce8f6 bellard
    tb->pc = pc;
1155 b448f2f3 bellard
    tb->cflags = 0;
1156 d4e8164f bellard
    return tb;
1157 d4e8164f bellard
}
1158 d4e8164f bellard
1159 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1160 2e70f6ef pbrook
{
1161 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1162 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1163 2e70f6ef pbrook
       be the last one generated.  */
1164 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1166 2e70f6ef pbrook
        nb_tbs--;
1167 2e70f6ef pbrook
    }
1168 2e70f6ef pbrook
}
1169 2e70f6ef pbrook
1170 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1171 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1172 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1173 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1174 d4e8164f bellard
{
1175 9fa3e853 bellard
    unsigned int h;
1176 9fa3e853 bellard
    TranslationBlock **ptb;
1177 9fa3e853 bellard
1178 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1179 c8a706fe pbrook
       before we are done.  */
1180 c8a706fe pbrook
    mmap_lock();
1181 9fa3e853 bellard
    /* add in the physical hash table */
1182 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1183 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1184 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1185 9fa3e853 bellard
    *ptb = tb;
1186 fd6ce8f6 bellard
1187 fd6ce8f6 bellard
    /* add in the page list */
1188 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 9fa3e853 bellard
    if (phys_page2 != -1)
1190 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1191 9fa3e853 bellard
    else
1192 9fa3e853 bellard
        tb->page_addr[1] = -1;
1193 9fa3e853 bellard
1194 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1196 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1197 d4e8164f bellard
1198 d4e8164f bellard
    /* init original jump addresses */
1199 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1200 d4e8164f bellard
        tb_reset_jump(tb, 0);
1201 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1202 d4e8164f bellard
        tb_reset_jump(tb, 1);
1203 8a40a180 bellard
1204 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1205 8a40a180 bellard
    tb_page_check();
1206 8a40a180 bellard
#endif
1207 c8a706fe pbrook
    mmap_unlock();
1208 fd6ce8f6 bellard
}
1209 fd6ce8f6 bellard
1210 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1212 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213 fd6ce8f6 bellard
{
1214 9fa3e853 bellard
    int m_min, m_max, m;
1215 9fa3e853 bellard
    unsigned long v;
1216 9fa3e853 bellard
    TranslationBlock *tb;
1217 a513fe19 bellard
1218 a513fe19 bellard
    if (nb_tbs <= 0)
1219 a513fe19 bellard
        return NULL;
1220 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1222 a513fe19 bellard
        return NULL;
1223 a513fe19 bellard
    /* binary search (cf Knuth) */
1224 a513fe19 bellard
    m_min = 0;
1225 a513fe19 bellard
    m_max = nb_tbs - 1;
1226 a513fe19 bellard
    while (m_min <= m_max) {
1227 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1228 a513fe19 bellard
        tb = &tbs[m];
1229 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1230 a513fe19 bellard
        if (v == tc_ptr)
1231 a513fe19 bellard
            return tb;
1232 a513fe19 bellard
        else if (tc_ptr < v) {
1233 a513fe19 bellard
            m_max = m - 1;
1234 a513fe19 bellard
        } else {
1235 a513fe19 bellard
            m_min = m + 1;
1236 a513fe19 bellard
        }
1237 5fafdf24 ths
    }
1238 a513fe19 bellard
    return &tbs[m_max];
1239 a513fe19 bellard
}
1240 7501267e bellard
1241 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1242 ea041c0e bellard
1243 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244 ea041c0e bellard
{
1245 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1246 ea041c0e bellard
    unsigned int n1;
1247 ea041c0e bellard
1248 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1249 ea041c0e bellard
    if (tb1 != NULL) {
1250 ea041c0e bellard
        /* find head of list */
1251 ea041c0e bellard
        for(;;) {
1252 ea041c0e bellard
            n1 = (long)tb1 & 3;
1253 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 ea041c0e bellard
            if (n1 == 2)
1255 ea041c0e bellard
                break;
1256 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1257 ea041c0e bellard
        }
1258 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1259 ea041c0e bellard
        tb_next = tb1;
1260 ea041c0e bellard
1261 ea041c0e bellard
        /* remove tb from the jmp_first list */
1262 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1263 ea041c0e bellard
        for(;;) {
1264 ea041c0e bellard
            tb1 = *ptb;
1265 ea041c0e bellard
            n1 = (long)tb1 & 3;
1266 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1268 ea041c0e bellard
                break;
1269 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1270 ea041c0e bellard
        }
1271 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1272 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1273 3b46e624 ths
1274 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1275 ea041c0e bellard
        tb_reset_jump(tb, n);
1276 ea041c0e bellard
1277 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1278 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1279 ea041c0e bellard
    }
1280 ea041c0e bellard
}
1281 ea041c0e bellard
1282 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1283 ea041c0e bellard
{
1284 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1285 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1286 ea041c0e bellard
}
1287 ea041c0e bellard
1288 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1289 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290 d720b93d bellard
{
1291 9b3c35e0 j_mayer
    target_phys_addr_t addr;
1292 9b3c35e0 j_mayer
    target_ulong pd;
1293 c2f07f81 pbrook
    ram_addr_t ram_addr;
1294 c2f07f81 pbrook
    PhysPageDesc *p;
1295 d720b93d bellard
1296 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1297 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 c2f07f81 pbrook
    if (!p) {
1299 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1300 c2f07f81 pbrook
    } else {
1301 c2f07f81 pbrook
        pd = p->phys_offset;
1302 c2f07f81 pbrook
    }
1303 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305 d720b93d bellard
}
1306 c27004ec bellard
#endif
1307 d720b93d bellard
1308 6658ffb8 pbrook
/* Add a watchpoint.  */
1309 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1311 6658ffb8 pbrook
{
1312 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1313 c0ce998e aliguori
    CPUWatchpoint *wp;
1314 6658ffb8 pbrook
1315 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 b4051334 aliguori
        return -EINVAL;
1320 b4051334 aliguori
    }
1321 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1322 a1d1bb31 aliguori
1323 a1d1bb31 aliguori
    wp->vaddr = addr;
1324 b4051334 aliguori
    wp->len_mask = len_mask;
1325 a1d1bb31 aliguori
    wp->flags = flags;
1326 a1d1bb31 aliguori
1327 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1328 c0ce998e aliguori
    if (flags & BP_GDB)
1329 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330 c0ce998e aliguori
    else
1331 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1332 6658ffb8 pbrook
1333 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1334 a1d1bb31 aliguori
1335 a1d1bb31 aliguori
    if (watchpoint)
1336 a1d1bb31 aliguori
        *watchpoint = wp;
1337 a1d1bb31 aliguori
    return 0;
1338 6658ffb8 pbrook
}
1339 6658ffb8 pbrook
1340 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1341 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342 a1d1bb31 aliguori
                          int flags)
1343 6658ffb8 pbrook
{
1344 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1345 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1346 6658ffb8 pbrook
1347 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1349 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1351 6658ffb8 pbrook
            return 0;
1352 6658ffb8 pbrook
        }
1353 6658ffb8 pbrook
    }
1354 a1d1bb31 aliguori
    return -ENOENT;
1355 6658ffb8 pbrook
}
1356 6658ffb8 pbrook
1357 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1358 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359 a1d1bb31 aliguori
{
1360 c0ce998e aliguori
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1361 7d03f82f edgar_igl
1362 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1363 a1d1bb31 aliguori
1364 a1d1bb31 aliguori
    qemu_free(watchpoint);
1365 a1d1bb31 aliguori
}
1366 a1d1bb31 aliguori
1367 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1368 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369 a1d1bb31 aliguori
{
1370 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1371 a1d1bb31 aliguori
1372 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373 a1d1bb31 aliguori
        if (wp->flags & mask)
1374 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1375 c0ce998e aliguori
    }
1376 7d03f82f edgar_igl
}
1377 7d03f82f edgar_igl
1378 a1d1bb31 aliguori
/* Add a breakpoint.  */
1379 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1381 4c3a88a2 bellard
{
1382 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1383 c0ce998e aliguori
    CPUBreakpoint *bp;
1384 3b46e624 ths
1385 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1386 4c3a88a2 bellard
1387 a1d1bb31 aliguori
    bp->pc = pc;
1388 a1d1bb31 aliguori
    bp->flags = flags;
1389 a1d1bb31 aliguori
1390 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1391 c0ce998e aliguori
    if (flags & BP_GDB)
1392 c0ce998e aliguori
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393 c0ce998e aliguori
    else
1394 c0ce998e aliguori
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1395 3b46e624 ths
1396 d720b93d bellard
    breakpoint_invalidate(env, pc);
1397 a1d1bb31 aliguori
1398 a1d1bb31 aliguori
    if (breakpoint)
1399 a1d1bb31 aliguori
        *breakpoint = bp;
1400 4c3a88a2 bellard
    return 0;
1401 4c3a88a2 bellard
#else
1402 a1d1bb31 aliguori
    return -ENOSYS;
1403 4c3a88a2 bellard
#endif
1404 4c3a88a2 bellard
}
1405 4c3a88a2 bellard
1406 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1407 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408 a1d1bb31 aliguori
{
1409 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1410 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1411 a1d1bb31 aliguori
1412 c0ce998e aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1414 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1415 a1d1bb31 aliguori
            return 0;
1416 a1d1bb31 aliguori
        }
1417 7d03f82f edgar_igl
    }
1418 a1d1bb31 aliguori
    return -ENOENT;
1419 a1d1bb31 aliguori
#else
1420 a1d1bb31 aliguori
    return -ENOSYS;
1421 7d03f82f edgar_igl
#endif
1422 7d03f82f edgar_igl
}
1423 7d03f82f edgar_igl
1424 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1425 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1426 4c3a88a2 bellard
{
1427 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1428 c0ce998e aliguori
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1429 d720b93d bellard
1430 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1431 a1d1bb31 aliguori
1432 a1d1bb31 aliguori
    qemu_free(breakpoint);
1433 a1d1bb31 aliguori
#endif
1434 a1d1bb31 aliguori
}
1435 a1d1bb31 aliguori
1436 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1437 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438 a1d1bb31 aliguori
{
1439 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1440 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1441 a1d1bb31 aliguori
1442 c0ce998e aliguori
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443 a1d1bb31 aliguori
        if (bp->flags & mask)
1444 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1445 c0ce998e aliguori
    }
1446 4c3a88a2 bellard
#endif
1447 4c3a88a2 bellard
}
1448 4c3a88a2 bellard
1449 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1450 c33a346e bellard
   CPU loop after each instruction */
1451 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1452 c33a346e bellard
{
1453 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1454 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1455 c33a346e bellard
        env->singlestep_enabled = enabled;
1456 e22a25c9 aliguori
        if (kvm_enabled())
1457 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1458 e22a25c9 aliguori
        else {
1459 e22a25c9 aliguori
            /* must flush all the translated code to avoid inconsistancies */
1460 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1461 e22a25c9 aliguori
            tb_flush(env);
1462 e22a25c9 aliguori
        }
1463 c33a346e bellard
    }
1464 c33a346e bellard
#endif
1465 c33a346e bellard
}
1466 c33a346e bellard
1467 34865134 bellard
/* enable or disable low levels log */
1468 34865134 bellard
void cpu_set_log(int log_flags)
1469 34865134 bellard
{
1470 34865134 bellard
    loglevel = log_flags;
1471 34865134 bellard
    if (loglevel && !logfile) {
1472 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1473 34865134 bellard
        if (!logfile) {
1474 34865134 bellard
            perror(logfilename);
1475 34865134 bellard
            _exit(1);
1476 34865134 bellard
        }
1477 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1478 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 9fa3e853 bellard
        {
1480 b55266b5 blueswir1
            static char logfile_buf[4096];
1481 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 9fa3e853 bellard
        }
1483 9fa3e853 bellard
#else
1484 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1485 9fa3e853 bellard
#endif
1486 e735b91c pbrook
        log_append = 1;
1487 e735b91c pbrook
    }
1488 e735b91c pbrook
    if (!loglevel && logfile) {
1489 e735b91c pbrook
        fclose(logfile);
1490 e735b91c pbrook
        logfile = NULL;
1491 34865134 bellard
    }
1492 34865134 bellard
}
1493 34865134 bellard
1494 34865134 bellard
void cpu_set_log_filename(const char *filename)
1495 34865134 bellard
{
1496 34865134 bellard
    logfilename = strdup(filename);
1497 e735b91c pbrook
    if (logfile) {
1498 e735b91c pbrook
        fclose(logfile);
1499 e735b91c pbrook
        logfile = NULL;
1500 e735b91c pbrook
    }
1501 e735b91c pbrook
    cpu_set_log(loglevel);
1502 34865134 bellard
}
1503 c33a346e bellard
1504 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1505 ea041c0e bellard
{
1506 3098dba0 aurel32
#if defined(USE_NPTL)
1507 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1509 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1510 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1511 3098dba0 aurel32
#else
1512 ea041c0e bellard
    TranslationBlock *tb;
1513 15a51156 aurel32
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1514 59817ccb bellard
1515 3098dba0 aurel32
    tb = env->current_tb;
1516 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1517 3098dba0 aurel32
       all the potentially executing TB */
1518 3098dba0 aurel32
    if (tb && !testandset(&interrupt_lock)) {
1519 3098dba0 aurel32
        env->current_tb = NULL;
1520 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1521 3098dba0 aurel32
        resetlock(&interrupt_lock);
1522 be214e6c aurel32
    }
1523 3098dba0 aurel32
#endif
1524 3098dba0 aurel32
}
1525 3098dba0 aurel32
1526 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1527 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1528 3098dba0 aurel32
{
1529 3098dba0 aurel32
    int old_mask;
1530 be214e6c aurel32
1531 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1532 68a79315 bellard
    env->interrupt_request |= mask;
1533 3098dba0 aurel32
1534 2e70f6ef pbrook
    if (use_icount) {
1535 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1536 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1537 2e70f6ef pbrook
        if (!can_do_io(env)
1538 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1539 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1540 2e70f6ef pbrook
        }
1541 2e70f6ef pbrook
#endif
1542 2e70f6ef pbrook
    } else {
1543 3098dba0 aurel32
        cpu_unlink_tb(env);
1544 ea041c0e bellard
    }
1545 ea041c0e bellard
}
1546 ea041c0e bellard
1547 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1548 b54ad049 bellard
{
1549 b54ad049 bellard
    env->interrupt_request &= ~mask;
1550 b54ad049 bellard
}
1551 b54ad049 bellard
1552 3098dba0 aurel32
void cpu_exit(CPUState *env)
1553 3098dba0 aurel32
{
1554 3098dba0 aurel32
    env->exit_request = 1;
1555 3098dba0 aurel32
    cpu_unlink_tb(env);
1556 3098dba0 aurel32
}
1557 3098dba0 aurel32
1558 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1559 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1560 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1561 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1562 f193c797 bellard
      "show target assembly code for each compiled TB" },
1563 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1564 57fec1fe bellard
      "show micro ops for each compiled TB" },
1565 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1566 e01a1157 blueswir1
      "show micro ops "
1567 e01a1157 blueswir1
#ifdef TARGET_I386
1568 e01a1157 blueswir1
      "before eflags optimization and "
1569 f193c797 bellard
#endif
1570 e01a1157 blueswir1
      "after liveness analysis" },
1571 f193c797 bellard
    { CPU_LOG_INT, "int",
1572 f193c797 bellard
      "show interrupts/exceptions in short format" },
1573 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1574 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1575 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1576 e91c8a77 ths
      "show CPU state before block translation" },
1577 f193c797 bellard
#ifdef TARGET_I386
1578 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1579 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1580 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1581 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1582 f193c797 bellard
#endif
1583 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1584 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1585 fd872598 bellard
      "show all i/o ports accesses" },
1586 8e3a9fd2 bellard
#endif
1587 f193c797 bellard
    { 0, NULL, NULL },
1588 f193c797 bellard
};
1589 f193c797 bellard
1590 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1591 f193c797 bellard
{
1592 f193c797 bellard
    if (strlen(s2) != n)
1593 f193c797 bellard
        return 0;
1594 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1595 f193c797 bellard
}
1596 3b46e624 ths
1597 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1598 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1599 f193c797 bellard
{
1600 c7cd6a37 blueswir1
    const CPULogItem *item;
1601 f193c797 bellard
    int mask;
1602 f193c797 bellard
    const char *p, *p1;
1603 f193c797 bellard
1604 f193c797 bellard
    p = str;
1605 f193c797 bellard
    mask = 0;
1606 f193c797 bellard
    for(;;) {
1607 f193c797 bellard
        p1 = strchr(p, ',');
1608 f193c797 bellard
        if (!p1)
1609 f193c797 bellard
            p1 = p + strlen(p);
1610 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1611 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1612 8e3a9fd2 bellard
                        mask |= item->mask;
1613 8e3a9fd2 bellard
                }
1614 8e3a9fd2 bellard
        } else {
1615 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1616 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1617 f193c797 bellard
                goto found;
1618 f193c797 bellard
        }
1619 f193c797 bellard
        return 0;
1620 8e3a9fd2 bellard
        }
1621 f193c797 bellard
    found:
1622 f193c797 bellard
        mask |= item->mask;
1623 f193c797 bellard
        if (*p1 != ',')
1624 f193c797 bellard
            break;
1625 f193c797 bellard
        p = p1 + 1;
1626 f193c797 bellard
    }
1627 f193c797 bellard
    return mask;
1628 f193c797 bellard
}
1629 ea041c0e bellard
1630 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1631 7501267e bellard
{
1632 7501267e bellard
    va_list ap;
1633 493ae1f0 pbrook
    va_list ap2;
1634 7501267e bellard
1635 7501267e bellard
    va_start(ap, fmt);
1636 493ae1f0 pbrook
    va_copy(ap2, ap);
1637 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1638 7501267e bellard
    vfprintf(stderr, fmt, ap);
1639 7501267e bellard
    fprintf(stderr, "\n");
1640 7501267e bellard
#ifdef TARGET_I386
1641 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642 7fe48483 bellard
#else
1643 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1644 7501267e bellard
#endif
1645 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1646 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1647 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1648 93fcfe39 aliguori
        qemu_log("\n");
1649 f9373291 j_mayer
#ifdef TARGET_I386
1650 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1651 f9373291 j_mayer
#else
1652 93fcfe39 aliguori
        log_cpu_state(env, 0);
1653 f9373291 j_mayer
#endif
1654 31b1a7b4 aliguori
        qemu_log_flush();
1655 93fcfe39 aliguori
        qemu_log_close();
1656 924edcae balrog
    }
1657 493ae1f0 pbrook
    va_end(ap2);
1658 f9373291 j_mayer
    va_end(ap);
1659 7501267e bellard
    abort();
1660 7501267e bellard
}
1661 7501267e bellard
1662 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1663 c5be9f08 ths
{
1664 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1665 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1666 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1667 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1668 5a38f081 aliguori
    CPUBreakpoint *bp;
1669 5a38f081 aliguori
    CPUWatchpoint *wp;
1670 5a38f081 aliguori
#endif
1671 5a38f081 aliguori
1672 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1673 5a38f081 aliguori
1674 5a38f081 aliguori
    /* Preserve chaining and index. */
1675 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1676 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1677 5a38f081 aliguori
1678 5a38f081 aliguori
    /* Clone all break/watchpoints.
1679 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1680 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1681 5a38f081 aliguori
    TAILQ_INIT(&env->breakpoints);
1682 5a38f081 aliguori
    TAILQ_INIT(&env->watchpoints);
1683 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1684 5a38f081 aliguori
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686 5a38f081 aliguori
    }
1687 5a38f081 aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689 5a38f081 aliguori
                              wp->flags, NULL);
1690 5a38f081 aliguori
    }
1691 5a38f081 aliguori
#endif
1692 5a38f081 aliguori
1693 c5be9f08 ths
    return new_env;
1694 c5be9f08 ths
}
1695 c5be9f08 ths
1696 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1697 0124311e bellard
1698 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699 5c751e99 edgar_igl
{
1700 5c751e99 edgar_igl
    unsigned int i;
1701 5c751e99 edgar_igl
1702 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1703 5c751e99 edgar_igl
       overlap the flushed page.  */
1704 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1706 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707 5c751e99 edgar_igl
1708 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1709 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1710 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711 5c751e99 edgar_igl
}
1712 5c751e99 edgar_igl
1713 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1714 ee8b7021 bellard
   implemented yet) */
1715 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1716 33417e70 bellard
{
1717 33417e70 bellard
    int i;
1718 0124311e bellard
1719 9fa3e853 bellard
#if defined(DEBUG_TLB)
1720 9fa3e853 bellard
    printf("tlb_flush:\n");
1721 9fa3e853 bellard
#endif
1722 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1723 0124311e bellard
       links while we are modifying them */
1724 0124311e bellard
    env->current_tb = NULL;
1725 0124311e bellard
1726 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1727 84b7b8e7 bellard
        env->tlb_table[0][i].addr_read = -1;
1728 84b7b8e7 bellard
        env->tlb_table[0][i].addr_write = -1;
1729 84b7b8e7 bellard
        env->tlb_table[0][i].addr_code = -1;
1730 84b7b8e7 bellard
        env->tlb_table[1][i].addr_read = -1;
1731 84b7b8e7 bellard
        env->tlb_table[1][i].addr_write = -1;
1732 84b7b8e7 bellard
        env->tlb_table[1][i].addr_code = -1;
1733 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1734 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_read = -1;
1735 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_write = -1;
1736 6fa4cea9 j_mayer
        env->tlb_table[2][i].addr_code = -1;
1737 e37e6ee6 aurel32
#endif
1738 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1739 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_read = -1;
1740 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_write = -1;
1741 6fa4cea9 j_mayer
        env->tlb_table[3][i].addr_code = -1;
1742 6fa4cea9 j_mayer
#endif
1743 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1744 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_read = -1;
1745 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_write = -1;
1746 e37e6ee6 aurel32
        env->tlb_table[4][i].addr_code = -1;
1747 6fa4cea9 j_mayer
#endif
1748 e37e6ee6 aurel32
1749 33417e70 bellard
    }
1750 9fa3e853 bellard
1751 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1752 9fa3e853 bellard
1753 0a962c02 bellard
#ifdef USE_KQEMU
1754 0a962c02 bellard
    if (env->kqemu_enabled) {
1755 0a962c02 bellard
        kqemu_flush(env, flush_global);
1756 0a962c02 bellard
    }
1757 0a962c02 bellard
#endif
1758 e3db7226 bellard
    tlb_flush_count++;
1759 33417e70 bellard
}
1760 33417e70 bellard
1761 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1762 61382a50 bellard
{
1763 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1764 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1766 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1768 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769 84b7b8e7 bellard
        tlb_entry->addr_read = -1;
1770 84b7b8e7 bellard
        tlb_entry->addr_write = -1;
1771 84b7b8e7 bellard
        tlb_entry->addr_code = -1;
1772 84b7b8e7 bellard
    }
1773 61382a50 bellard
}
1774 61382a50 bellard
1775 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1776 33417e70 bellard
{
1777 8a40a180 bellard
    int i;
1778 0124311e bellard
1779 9fa3e853 bellard
#if defined(DEBUG_TLB)
1780 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1781 9fa3e853 bellard
#endif
1782 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1783 0124311e bellard
       links while we are modifying them */
1784 0124311e bellard
    env->current_tb = NULL;
1785 61382a50 bellard
1786 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1787 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1789 84b7b8e7 bellard
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1790 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1791 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1792 e37e6ee6 aurel32
#endif
1793 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1794 6fa4cea9 j_mayer
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1795 6fa4cea9 j_mayer
#endif
1796 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1797 e37e6ee6 aurel32
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1798 6fa4cea9 j_mayer
#endif
1799 0124311e bellard
1800 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1801 9fa3e853 bellard
1802 0a962c02 bellard
#ifdef USE_KQEMU
1803 0a962c02 bellard
    if (env->kqemu_enabled) {
1804 0a962c02 bellard
        kqemu_flush_page(env, addr);
1805 0a962c02 bellard
    }
1806 0a962c02 bellard
#endif
1807 9fa3e853 bellard
}
1808 9fa3e853 bellard
1809 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1810 9fa3e853 bellard
   can be detected */
1811 6a00d601 bellard
static void tlb_protect_code(ram_addr_t ram_addr)
1812 9fa3e853 bellard
{
1813 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1814 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1815 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1816 9fa3e853 bellard
}
1817 9fa3e853 bellard
1818 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1819 3a7d929e bellard
   tested for self modifying code */
1820 5fafdf24 ths
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1821 3a7d929e bellard
                                    target_ulong vaddr)
1822 9fa3e853 bellard
{
1823 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1824 1ccde1cb bellard
}
1825 1ccde1cb bellard
1826 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1828 1ccde1cb bellard
{
1829 1ccde1cb bellard
    unsigned long addr;
1830 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832 1ccde1cb bellard
        if ((addr - start) < length) {
1833 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1834 1ccde1cb bellard
        }
1835 1ccde1cb bellard
    }
1836 1ccde1cb bellard
}
1837 1ccde1cb bellard
1838 3a7d929e bellard
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1839 0a962c02 bellard
                                     int dirty_flags)
1840 1ccde1cb bellard
{
1841 1ccde1cb bellard
    CPUState *env;
1842 4f2ac237 bellard
    unsigned long length, start1;
1843 0a962c02 bellard
    int i, mask, len;
1844 0a962c02 bellard
    uint8_t *p;
1845 1ccde1cb bellard
1846 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1847 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1848 1ccde1cb bellard
1849 1ccde1cb bellard
    length = end - start;
1850 1ccde1cb bellard
    if (length == 0)
1851 1ccde1cb bellard
        return;
1852 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1853 3a7d929e bellard
#ifdef USE_KQEMU
1854 6a00d601 bellard
    /* XXX: should not depend on cpu context */
1855 6a00d601 bellard
    env = first_cpu;
1856 3a7d929e bellard
    if (env->kqemu_enabled) {
1857 f23db169 bellard
        ram_addr_t addr;
1858 f23db169 bellard
        addr = start;
1859 f23db169 bellard
        for(i = 0; i < len; i++) {
1860 f23db169 bellard
            kqemu_set_notdirty(env, addr);
1861 f23db169 bellard
            addr += TARGET_PAGE_SIZE;
1862 f23db169 bellard
        }
1863 3a7d929e bellard
    }
1864 3a7d929e bellard
#endif
1865 f23db169 bellard
    mask = ~dirty_flags;
1866 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1867 f23db169 bellard
    for(i = 0; i < len; i++)
1868 f23db169 bellard
        p[i] &= mask;
1869 f23db169 bellard
1870 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1871 1ccde1cb bellard
       when accessing the range */
1872 59817ccb bellard
    start1 = start + (unsigned long)phys_ram_base;
1873 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1875 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1876 6a00d601 bellard
        for(i = 0; i < CPU_TLB_SIZE; i++)
1877 84b7b8e7 bellard
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1878 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1879 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1880 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1881 e37e6ee6 aurel32
#endif
1882 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1883 6fa4cea9 j_mayer
        for(i = 0; i < CPU_TLB_SIZE; i++)
1884 6fa4cea9 j_mayer
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1885 6fa4cea9 j_mayer
#endif
1886 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1887 e37e6ee6 aurel32
        for(i = 0; i < CPU_TLB_SIZE; i++)
1888 e37e6ee6 aurel32
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1889 6fa4cea9 j_mayer
#endif
1890 6a00d601 bellard
    }
1891 1ccde1cb bellard
}
1892 1ccde1cb bellard
1893 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1894 74576198 aliguori
{
1895 74576198 aliguori
    in_migration = enable;
1896 74576198 aliguori
    return 0;
1897 74576198 aliguori
}
1898 74576198 aliguori
1899 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1900 74576198 aliguori
{
1901 74576198 aliguori
    return in_migration;
1902 74576198 aliguori
}
1903 74576198 aliguori
1904 2bec46dc aliguori
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1905 2bec46dc aliguori
{
1906 2bec46dc aliguori
    if (kvm_enabled())
1907 2bec46dc aliguori
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1908 2bec46dc aliguori
}
1909 2bec46dc aliguori
1910 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1911 3a7d929e bellard
{
1912 3a7d929e bellard
    ram_addr_t ram_addr;
1913 3a7d929e bellard
1914 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1915 5fafdf24 ths
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1916 3a7d929e bellard
            tlb_entry->addend - (unsigned long)phys_ram_base;
1917 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1918 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1919 3a7d929e bellard
        }
1920 3a7d929e bellard
    }
1921 3a7d929e bellard
}
1922 3a7d929e bellard
1923 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
1924 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
1925 3a7d929e bellard
{
1926 3a7d929e bellard
    int i;
1927 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1928 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[0][i]);
1929 3a7d929e bellard
    for(i = 0; i < CPU_TLB_SIZE; i++)
1930 84b7b8e7 bellard
        tlb_update_dirty(&env->tlb_table[1][i]);
1931 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1932 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1933 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[2][i]);
1934 e37e6ee6 aurel32
#endif
1935 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1936 6fa4cea9 j_mayer
    for(i = 0; i < CPU_TLB_SIZE; i++)
1937 6fa4cea9 j_mayer
        tlb_update_dirty(&env->tlb_table[3][i]);
1938 6fa4cea9 j_mayer
#endif
1939 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1940 e37e6ee6 aurel32
    for(i = 0; i < CPU_TLB_SIZE; i++)
1941 e37e6ee6 aurel32
        tlb_update_dirty(&env->tlb_table[4][i]);
1942 6fa4cea9 j_mayer
#endif
1943 3a7d929e bellard
}
1944 3a7d929e bellard
1945 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1946 1ccde1cb bellard
{
1947 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1948 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
1949 1ccde1cb bellard
}
1950 1ccde1cb bellard
1951 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
1952 0f459d16 pbrook
   so that it is no longer dirty */
1953 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1954 1ccde1cb bellard
{
1955 1ccde1cb bellard
    int i;
1956 1ccde1cb bellard
1957 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
1958 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1959 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1960 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1961 6fa4cea9 j_mayer
#if (NB_MMU_MODES >= 3)
1962 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1963 e37e6ee6 aurel32
#endif
1964 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 4)
1965 0f459d16 pbrook
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1966 6fa4cea9 j_mayer
#endif
1967 e37e6ee6 aurel32
#if (NB_MMU_MODES >= 5)
1968 e37e6ee6 aurel32
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1969 6fa4cea9 j_mayer
#endif
1970 9fa3e853 bellard
}
1971 9fa3e853 bellard
1972 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
1973 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1974 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
1975 59817ccb bellard
   conflicting with the host address space). */
1976 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
1978 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
1979 9fa3e853 bellard
{
1980 92e873b9 bellard
    PhysPageDesc *p;
1981 4f2ac237 bellard
    unsigned long pd;
1982 9fa3e853 bellard
    unsigned int index;
1983 4f2ac237 bellard
    target_ulong address;
1984 0f459d16 pbrook
    target_ulong code_address;
1985 108c49b8 bellard
    target_phys_addr_t addend;
1986 9fa3e853 bellard
    int ret;
1987 84b7b8e7 bellard
    CPUTLBEntry *te;
1988 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1989 0f459d16 pbrook
    target_phys_addr_t iotlb;
1990 9fa3e853 bellard
1991 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1992 9fa3e853 bellard
    if (!p) {
1993 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
1994 9fa3e853 bellard
    } else {
1995 9fa3e853 bellard
        pd = p->phys_offset;
1996 9fa3e853 bellard
    }
1997 9fa3e853 bellard
#if defined(DEBUG_TLB)
1998 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1999 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2000 9fa3e853 bellard
#endif
2001 9fa3e853 bellard
2002 9fa3e853 bellard
    ret = 0;
2003 0f459d16 pbrook
    address = vaddr;
2004 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2005 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2006 0f459d16 pbrook
        address |= TLB_MMIO;
2007 0f459d16 pbrook
    }
2008 0f459d16 pbrook
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2009 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2010 0f459d16 pbrook
        /* Normal RAM.  */
2011 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2012 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2013 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2014 0f459d16 pbrook
        else
2015 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2016 0f459d16 pbrook
    } else {
2017 0f459d16 pbrook
        /* IO handlers are currently passed a phsical address.
2018 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2019 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2020 0f459d16 pbrook
           and avoid full address decoding in every device.
2021 0f459d16 pbrook
           We can't use the high bits of pd for this because
2022 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2023 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2024 8da3ff18 pbrook
        if (p) {
2025 8da3ff18 pbrook
            iotlb += p->region_offset;
2026 8da3ff18 pbrook
        } else {
2027 8da3ff18 pbrook
            iotlb += paddr;
2028 8da3ff18 pbrook
        }
2029 0f459d16 pbrook
    }
2030 0f459d16 pbrook
2031 0f459d16 pbrook
    code_address = address;
2032 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2033 0f459d16 pbrook
       watchpoint trap routines.  */
2034 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2035 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2036 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2037 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2038 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2039 0f459d16 pbrook
            address |= TLB_MMIO;
2040 6658ffb8 pbrook
        }
2041 0f459d16 pbrook
    }
2042 d79acba4 balrog
2043 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2044 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2045 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2046 0f459d16 pbrook
    te->addend = addend - vaddr;
2047 0f459d16 pbrook
    if (prot & PAGE_READ) {
2048 0f459d16 pbrook
        te->addr_read = address;
2049 0f459d16 pbrook
    } else {
2050 0f459d16 pbrook
        te->addr_read = -1;
2051 0f459d16 pbrook
    }
2052 5c751e99 edgar_igl
2053 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2054 0f459d16 pbrook
        te->addr_code = code_address;
2055 0f459d16 pbrook
    } else {
2056 0f459d16 pbrook
        te->addr_code = -1;
2057 0f459d16 pbrook
    }
2058 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2059 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2060 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2061 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2062 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2063 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2064 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2065 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2066 9fa3e853 bellard
        } else {
2067 0f459d16 pbrook
            te->addr_write = address;
2068 9fa3e853 bellard
        }
2069 0f459d16 pbrook
    } else {
2070 0f459d16 pbrook
        te->addr_write = -1;
2071 9fa3e853 bellard
    }
2072 9fa3e853 bellard
    return ret;
2073 9fa3e853 bellard
}
2074 9fa3e853 bellard
2075 0124311e bellard
#else
2076 0124311e bellard
2077 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2078 0124311e bellard
{
2079 0124311e bellard
}
2080 0124311e bellard
2081 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2082 0124311e bellard
{
2083 0124311e bellard
}
2084 0124311e bellard
2085 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2086 5fafdf24 ths
                      target_phys_addr_t paddr, int prot,
2087 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2088 9fa3e853 bellard
{
2089 9fa3e853 bellard
    return 0;
2090 9fa3e853 bellard
}
2091 0124311e bellard
2092 9fa3e853 bellard
/* dump memory mappings */
2093 9fa3e853 bellard
void page_dump(FILE *f)
2094 33417e70 bellard
{
2095 9fa3e853 bellard
    unsigned long start, end;
2096 9fa3e853 bellard
    int i, j, prot, prot1;
2097 9fa3e853 bellard
    PageDesc *p;
2098 33417e70 bellard
2099 9fa3e853 bellard
    fprintf(f, "%-8s %-8s %-8s %s\n",
2100 9fa3e853 bellard
            "start", "end", "size", "prot");
2101 9fa3e853 bellard
    start = -1;
2102 9fa3e853 bellard
    end = -1;
2103 9fa3e853 bellard
    prot = 0;
2104 9fa3e853 bellard
    for(i = 0; i <= L1_SIZE; i++) {
2105 9fa3e853 bellard
        if (i < L1_SIZE)
2106 9fa3e853 bellard
            p = l1_map[i];
2107 9fa3e853 bellard
        else
2108 9fa3e853 bellard
            p = NULL;
2109 9fa3e853 bellard
        for(j = 0;j < L2_SIZE; j++) {
2110 9fa3e853 bellard
            if (!p)
2111 9fa3e853 bellard
                prot1 = 0;
2112 9fa3e853 bellard
            else
2113 9fa3e853 bellard
                prot1 = p[j].flags;
2114 9fa3e853 bellard
            if (prot1 != prot) {
2115 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2116 9fa3e853 bellard
                if (start != -1) {
2117 9fa3e853 bellard
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2118 5fafdf24 ths
                            start, end, end - start,
2119 9fa3e853 bellard
                            prot & PAGE_READ ? 'r' : '-',
2120 9fa3e853 bellard
                            prot & PAGE_WRITE ? 'w' : '-',
2121 9fa3e853 bellard
                            prot & PAGE_EXEC ? 'x' : '-');
2122 9fa3e853 bellard
                }
2123 9fa3e853 bellard
                if (prot1 != 0)
2124 9fa3e853 bellard
                    start = end;
2125 9fa3e853 bellard
                else
2126 9fa3e853 bellard
                    start = -1;
2127 9fa3e853 bellard
                prot = prot1;
2128 9fa3e853 bellard
            }
2129 9fa3e853 bellard
            if (!p)
2130 9fa3e853 bellard
                break;
2131 9fa3e853 bellard
        }
2132 33417e70 bellard
    }
2133 33417e70 bellard
}
2134 33417e70 bellard
2135 53a5960a pbrook
int page_get_flags(target_ulong address)
2136 33417e70 bellard
{
2137 9fa3e853 bellard
    PageDesc *p;
2138 9fa3e853 bellard
2139 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2140 33417e70 bellard
    if (!p)
2141 9fa3e853 bellard
        return 0;
2142 9fa3e853 bellard
    return p->flags;
2143 9fa3e853 bellard
}
2144 9fa3e853 bellard
2145 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2146 9fa3e853 bellard
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2147 9fa3e853 bellard
   depending on PAGE_WRITE */
2148 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2149 9fa3e853 bellard
{
2150 9fa3e853 bellard
    PageDesc *p;
2151 53a5960a pbrook
    target_ulong addr;
2152 9fa3e853 bellard
2153 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2154 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2155 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2156 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2157 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2158 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2159 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2160 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2161 17e2377a pbrook
           address space.  */
2162 17e2377a pbrook
        if (!p)
2163 17e2377a pbrook
            return;
2164 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2165 9fa3e853 bellard
           inside */
2166 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2167 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2168 9fa3e853 bellard
            p->first_tb) {
2169 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2170 9fa3e853 bellard
        }
2171 9fa3e853 bellard
        p->flags = flags;
2172 9fa3e853 bellard
    }
2173 33417e70 bellard
}
2174 33417e70 bellard
2175 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2176 3d97b40b ths
{
2177 3d97b40b ths
    PageDesc *p;
2178 3d97b40b ths
    target_ulong end;
2179 3d97b40b ths
    target_ulong addr;
2180 3d97b40b ths
2181 55f280c9 balrog
    if (start + len < start)
2182 55f280c9 balrog
        /* we've wrapped around */
2183 55f280c9 balrog
        return -1;
2184 55f280c9 balrog
2185 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2186 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2187 3d97b40b ths
2188 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2189 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2190 3d97b40b ths
        if( !p )
2191 3d97b40b ths
            return -1;
2192 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2193 3d97b40b ths
            return -1;
2194 3d97b40b ths
2195 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2196 3d97b40b ths
            return -1;
2197 dae3270c bellard
        if (flags & PAGE_WRITE) {
2198 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2199 dae3270c bellard
                return -1;
2200 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2201 dae3270c bellard
               contains translated code */
2202 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2203 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2204 dae3270c bellard
                    return -1;
2205 dae3270c bellard
            }
2206 dae3270c bellard
            return 0;
2207 dae3270c bellard
        }
2208 3d97b40b ths
    }
2209 3d97b40b ths
    return 0;
2210 3d97b40b ths
}
2211 3d97b40b ths
2212 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2213 9fa3e853 bellard
   page. Return TRUE if the fault was succesfully handled. */
2214 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2215 9fa3e853 bellard
{
2216 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2217 9fa3e853 bellard
    PageDesc *p, *p1;
2218 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2219 9fa3e853 bellard
2220 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2221 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2222 c8a706fe pbrook
       practice it seems to be ok.  */
2223 c8a706fe pbrook
    mmap_lock();
2224 c8a706fe pbrook
2225 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2226 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2227 9fa3e853 bellard
    p1 = page_find(page_index);
2228 c8a706fe pbrook
    if (!p1) {
2229 c8a706fe pbrook
        mmap_unlock();
2230 9fa3e853 bellard
        return 0;
2231 c8a706fe pbrook
    }
2232 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2233 9fa3e853 bellard
    p = p1;
2234 9fa3e853 bellard
    prot = 0;
2235 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2236 9fa3e853 bellard
        prot |= p->flags;
2237 9fa3e853 bellard
        p++;
2238 9fa3e853 bellard
    }
2239 9fa3e853 bellard
    /* if the page was really writable, then we change its
2240 9fa3e853 bellard
       protection back to writable */
2241 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2242 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2243 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2244 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2245 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2246 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2247 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2248 9fa3e853 bellard
               the corresponding translated code. */
2249 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2250 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2251 9fa3e853 bellard
            tb_invalidate_check(address);
2252 9fa3e853 bellard
#endif
2253 c8a706fe pbrook
            mmap_unlock();
2254 9fa3e853 bellard
            return 1;
2255 9fa3e853 bellard
        }
2256 9fa3e853 bellard
    }
2257 c8a706fe pbrook
    mmap_unlock();
2258 9fa3e853 bellard
    return 0;
2259 9fa3e853 bellard
}
2260 9fa3e853 bellard
2261 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2262 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2263 1ccde1cb bellard
{
2264 1ccde1cb bellard
}
2265 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2266 9fa3e853 bellard
2267 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2268 8da3ff18 pbrook
2269 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2270 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset);
2271 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2272 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2273 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274 db7b5426 blueswir1
                      need_subpage)                                     \
2275 db7b5426 blueswir1
    do {                                                                \
2276 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2277 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2278 db7b5426 blueswir1
        else {                                                          \
2279 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2280 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2281 db7b5426 blueswir1
                need_subpage = 1;                                       \
2282 db7b5426 blueswir1
        }                                                               \
2283 db7b5426 blueswir1
                                                                        \
2284 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2285 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2286 db7b5426 blueswir1
        else {                                                          \
2287 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2288 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2289 db7b5426 blueswir1
                need_subpage = 1;                                       \
2290 db7b5426 blueswir1
        }                                                               \
2291 db7b5426 blueswir1
    } while (0)
2292 db7b5426 blueswir1
2293 33417e70 bellard
/* register physical memory. 'size' must be a multiple of the target
2294 33417e70 bellard
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2295 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2296 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2297 8da3ff18 pbrook
   start_region and regon_offset are rounded down to a page boundary
2298 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2299 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2300 8da3ff18 pbrook
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2301 8da3ff18 pbrook
                                         ram_addr_t size,
2302 8da3ff18 pbrook
                                         ram_addr_t phys_offset,
2303 8da3ff18 pbrook
                                         ram_addr_t region_offset)
2304 33417e70 bellard
{
2305 108c49b8 bellard
    target_phys_addr_t addr, end_addr;
2306 92e873b9 bellard
    PhysPageDesc *p;
2307 9d42037b bellard
    CPUState *env;
2308 00f82b8a aurel32
    ram_addr_t orig_size = size;
2309 db7b5426 blueswir1
    void *subpage;
2310 33417e70 bellard
2311 da260249 bellard
#ifdef USE_KQEMU
2312 da260249 bellard
    /* XXX: should not depend on cpu context */
2313 da260249 bellard
    env = first_cpu;
2314 da260249 bellard
    if (env->kqemu_enabled) {
2315 da260249 bellard
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2316 da260249 bellard
    }
2317 da260249 bellard
#endif
2318 7ba1e619 aliguori
    if (kvm_enabled())
2319 7ba1e619 aliguori
        kvm_set_phys_mem(start_addr, size, phys_offset);
2320 7ba1e619 aliguori
2321 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2322 67c4d23c pbrook
        region_offset = start_addr;
2323 67c4d23c pbrook
    }
2324 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2325 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2326 49e9fba2 blueswir1
    end_addr = start_addr + (target_phys_addr_t)size;
2327 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2328 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2329 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2330 00f82b8a aurel32
            ram_addr_t orig_memory = p->phys_offset;
2331 db7b5426 blueswir1
            target_phys_addr_t start_addr2, end_addr2;
2332 db7b5426 blueswir1
            int need_subpage = 0;
2333 db7b5426 blueswir1
2334 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2335 db7b5426 blueswir1
                          need_subpage);
2336 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2337 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2338 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2339 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2340 8da3ff18 pbrook
                                           p->region_offset);
2341 db7b5426 blueswir1
                } else {
2342 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2343 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2344 db7b5426 blueswir1
                }
2345 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2346 8da3ff18 pbrook
                                 region_offset);
2347 8da3ff18 pbrook
                p->region_offset = 0;
2348 db7b5426 blueswir1
            } else {
2349 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2350 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2351 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2352 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2353 db7b5426 blueswir1
            }
2354 db7b5426 blueswir1
        } else {
2355 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2357 8da3ff18 pbrook
            p->region_offset = region_offset;
2358 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2359 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2360 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2361 0e8f0967 pbrook
            } else {
2362 db7b5426 blueswir1
                target_phys_addr_t start_addr2, end_addr2;
2363 db7b5426 blueswir1
                int need_subpage = 0;
2364 db7b5426 blueswir1
2365 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2366 db7b5426 blueswir1
                              end_addr2, need_subpage);
2367 db7b5426 blueswir1
2368 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2369 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2370 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2371 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2372 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2373 8da3ff18 pbrook
                                     phys_offset, region_offset);
2374 8da3ff18 pbrook
                    p->region_offset = 0;
2375 db7b5426 blueswir1
                }
2376 db7b5426 blueswir1
            }
2377 db7b5426 blueswir1
        }
2378 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2379 33417e70 bellard
    }
2380 3b46e624 ths
2381 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2382 9d42037b bellard
       reset the modified entries */
2383 9d42037b bellard
    /* XXX: slow ! */
2384 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2385 9d42037b bellard
        tlb_flush(env, 1);
2386 9d42037b bellard
    }
2387 33417e70 bellard
}
2388 33417e70 bellard
2389 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2390 00f82b8a aurel32
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2391 ba863458 bellard
{
2392 ba863458 bellard
    PhysPageDesc *p;
2393 ba863458 bellard
2394 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 ba863458 bellard
    if (!p)
2396 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2397 ba863458 bellard
    return p->phys_offset;
2398 ba863458 bellard
}
2399 ba863458 bellard
2400 f65ed4c1 aliguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401 f65ed4c1 aliguori
{
2402 f65ed4c1 aliguori
    if (kvm_enabled())
2403 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2404 f65ed4c1 aliguori
}
2405 f65ed4c1 aliguori
2406 f65ed4c1 aliguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407 f65ed4c1 aliguori
{
2408 f65ed4c1 aliguori
    if (kvm_enabled())
2409 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2410 f65ed4c1 aliguori
}
2411 f65ed4c1 aliguori
2412 e9a1ab19 bellard
/* XXX: better than nothing */
2413 00f82b8a aurel32
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2414 e9a1ab19 bellard
{
2415 e9a1ab19 bellard
    ram_addr_t addr;
2416 7fb4fdcf balrog
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2417 012a7045 ths
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2418 ed441467 bellard
                (uint64_t)size, (uint64_t)phys_ram_size);
2419 e9a1ab19 bellard
        abort();
2420 e9a1ab19 bellard
    }
2421 e9a1ab19 bellard
    addr = phys_ram_alloc_offset;
2422 e9a1ab19 bellard
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2423 e9a1ab19 bellard
    return addr;
2424 e9a1ab19 bellard
}
2425 e9a1ab19 bellard
2426 e9a1ab19 bellard
void qemu_ram_free(ram_addr_t addr)
2427 e9a1ab19 bellard
{
2428 e9a1ab19 bellard
}
2429 e9a1ab19 bellard
2430 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2431 dc828ca1 pbrook
   This may only be used if you actually allocated the ram, and
2432 dc828ca1 pbrook
   aready know how but the ram block is.  */
2433 dc828ca1 pbrook
void *qemu_get_ram_ptr(ram_addr_t addr)
2434 dc828ca1 pbrook
{
2435 dc828ca1 pbrook
    return phys_ram_base + addr;
2436 dc828ca1 pbrook
}
2437 dc828ca1 pbrook
2438 a4193c8a bellard
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2439 33417e70 bellard
{
2440 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2441 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2442 67d3b957 pbrook
#endif
2443 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2444 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2445 e18231a3 blueswir1
#endif
2446 e18231a3 blueswir1
    return 0;
2447 e18231a3 blueswir1
}
2448 e18231a3 blueswir1
2449 e18231a3 blueswir1
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2450 e18231a3 blueswir1
{
2451 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2452 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2453 e18231a3 blueswir1
#endif
2454 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2455 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2456 e18231a3 blueswir1
#endif
2457 e18231a3 blueswir1
    return 0;
2458 e18231a3 blueswir1
}
2459 e18231a3 blueswir1
2460 e18231a3 blueswir1
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2461 e18231a3 blueswir1
{
2462 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2463 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2464 e18231a3 blueswir1
#endif
2465 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2466 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2467 b4f0a316 blueswir1
#endif
2468 33417e70 bellard
    return 0;
2469 33417e70 bellard
}
2470 33417e70 bellard
2471 a4193c8a bellard
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2472 33417e70 bellard
{
2473 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2474 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2475 67d3b957 pbrook
#endif
2476 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2477 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2478 e18231a3 blueswir1
#endif
2479 e18231a3 blueswir1
}
2480 e18231a3 blueswir1
2481 e18231a3 blueswir1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2482 e18231a3 blueswir1
{
2483 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2484 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2485 e18231a3 blueswir1
#endif
2486 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2487 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2488 e18231a3 blueswir1
#endif
2489 e18231a3 blueswir1
}
2490 e18231a3 blueswir1
2491 e18231a3 blueswir1
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2492 e18231a3 blueswir1
{
2493 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2494 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2495 e18231a3 blueswir1
#endif
2496 0a6f8a6d edgar_igl
#if defined(TARGET_SPARC)
2497 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2498 b4f0a316 blueswir1
#endif
2499 33417e70 bellard
}
2500 33417e70 bellard
2501 33417e70 bellard
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2502 33417e70 bellard
    unassigned_mem_readb,
2503 e18231a3 blueswir1
    unassigned_mem_readw,
2504 e18231a3 blueswir1
    unassigned_mem_readl,
2505 33417e70 bellard
};
2506 33417e70 bellard
2507 33417e70 bellard
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2508 33417e70 bellard
    unassigned_mem_writeb,
2509 e18231a3 blueswir1
    unassigned_mem_writew,
2510 e18231a3 blueswir1
    unassigned_mem_writel,
2511 33417e70 bellard
};
2512 33417e70 bellard
2513 0f459d16 pbrook
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2514 0f459d16 pbrook
                                uint32_t val)
2515 9fa3e853 bellard
{
2516 3a7d929e bellard
    int dirty_flags;
2517 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2518 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2519 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2520 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2521 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2522 9fa3e853 bellard
#endif
2523 3a7d929e bellard
    }
2524 0f459d16 pbrook
    stb_p(phys_ram_base + ram_addr, val);
2525 f32fc648 bellard
#ifdef USE_KQEMU
2526 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2527 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2528 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2529 f32fc648 bellard
#endif
2530 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2531 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2532 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2533 f23db169 bellard
       flushed */
2534 f23db169 bellard
    if (dirty_flags == 0xff)
2535 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2536 9fa3e853 bellard
}
2537 9fa3e853 bellard
2538 0f459d16 pbrook
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2539 0f459d16 pbrook
                                uint32_t val)
2540 9fa3e853 bellard
{
2541 3a7d929e bellard
    int dirty_flags;
2542 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2543 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2544 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2545 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2546 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2547 9fa3e853 bellard
#endif
2548 3a7d929e bellard
    }
2549 0f459d16 pbrook
    stw_p(phys_ram_base + ram_addr, val);
2550 f32fc648 bellard
#ifdef USE_KQEMU
2551 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2552 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2553 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2554 f32fc648 bellard
#endif
2555 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2556 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2557 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2558 f23db169 bellard
       flushed */
2559 f23db169 bellard
    if (dirty_flags == 0xff)
2560 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2561 9fa3e853 bellard
}
2562 9fa3e853 bellard
2563 0f459d16 pbrook
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2564 0f459d16 pbrook
                                uint32_t val)
2565 9fa3e853 bellard
{
2566 3a7d929e bellard
    int dirty_flags;
2567 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2568 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2569 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2570 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2571 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2572 9fa3e853 bellard
#endif
2573 3a7d929e bellard
    }
2574 0f459d16 pbrook
    stl_p(phys_ram_base + ram_addr, val);
2575 f32fc648 bellard
#ifdef USE_KQEMU
2576 f32fc648 bellard
    if (cpu_single_env->kqemu_enabled &&
2577 f32fc648 bellard
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2578 f32fc648 bellard
        kqemu_modify_page(cpu_single_env, ram_addr);
2579 f32fc648 bellard
#endif
2580 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2581 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2582 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2583 f23db169 bellard
       flushed */
2584 f23db169 bellard
    if (dirty_flags == 0xff)
2585 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2586 9fa3e853 bellard
}
2587 9fa3e853 bellard
2588 3a7d929e bellard
static CPUReadMemoryFunc *error_mem_read[3] = {
2589 9fa3e853 bellard
    NULL, /* never used */
2590 9fa3e853 bellard
    NULL, /* never used */
2591 9fa3e853 bellard
    NULL, /* never used */
2592 9fa3e853 bellard
};
2593 9fa3e853 bellard
2594 1ccde1cb bellard
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2595 1ccde1cb bellard
    notdirty_mem_writeb,
2596 1ccde1cb bellard
    notdirty_mem_writew,
2597 1ccde1cb bellard
    notdirty_mem_writel,
2598 1ccde1cb bellard
};
2599 1ccde1cb bellard
2600 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2601 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2602 0f459d16 pbrook
{
2603 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2604 06d55cc1 aliguori
    target_ulong pc, cs_base;
2605 06d55cc1 aliguori
    TranslationBlock *tb;
2606 0f459d16 pbrook
    target_ulong vaddr;
2607 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2608 06d55cc1 aliguori
    int cpu_flags;
2609 0f459d16 pbrook
2610 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2611 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2612 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2613 06d55cc1 aliguori
         * current instruction. */
2614 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2615 06d55cc1 aliguori
        return;
2616 06d55cc1 aliguori
    }
2617 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2618 c0ce998e aliguori
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2619 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2620 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2621 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2622 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2623 6e140f28 aliguori
                env->watchpoint_hit = wp;
2624 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2625 6e140f28 aliguori
                if (!tb) {
2626 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2627 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2628 6e140f28 aliguori
                }
2629 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2630 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2631 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2632 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2633 6e140f28 aliguori
                } else {
2634 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2635 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2636 6e140f28 aliguori
                }
2637 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2638 06d55cc1 aliguori
            }
2639 6e140f28 aliguori
        } else {
2640 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2641 0f459d16 pbrook
        }
2642 0f459d16 pbrook
    }
2643 0f459d16 pbrook
}
2644 0f459d16 pbrook
2645 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2646 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2647 6658ffb8 pbrook
   phys routines.  */
2648 6658ffb8 pbrook
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2649 6658ffb8 pbrook
{
2650 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2651 6658ffb8 pbrook
    return ldub_phys(addr);
2652 6658ffb8 pbrook
}
2653 6658ffb8 pbrook
2654 6658ffb8 pbrook
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2655 6658ffb8 pbrook
{
2656 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2657 6658ffb8 pbrook
    return lduw_phys(addr);
2658 6658ffb8 pbrook
}
2659 6658ffb8 pbrook
2660 6658ffb8 pbrook
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2661 6658ffb8 pbrook
{
2662 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2663 6658ffb8 pbrook
    return ldl_phys(addr);
2664 6658ffb8 pbrook
}
2665 6658ffb8 pbrook
2666 6658ffb8 pbrook
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2667 6658ffb8 pbrook
                             uint32_t val)
2668 6658ffb8 pbrook
{
2669 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2670 6658ffb8 pbrook
    stb_phys(addr, val);
2671 6658ffb8 pbrook
}
2672 6658ffb8 pbrook
2673 6658ffb8 pbrook
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2674 6658ffb8 pbrook
                             uint32_t val)
2675 6658ffb8 pbrook
{
2676 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2677 6658ffb8 pbrook
    stw_phys(addr, val);
2678 6658ffb8 pbrook
}
2679 6658ffb8 pbrook
2680 6658ffb8 pbrook
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2681 6658ffb8 pbrook
                             uint32_t val)
2682 6658ffb8 pbrook
{
2683 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2684 6658ffb8 pbrook
    stl_phys(addr, val);
2685 6658ffb8 pbrook
}
2686 6658ffb8 pbrook
2687 6658ffb8 pbrook
static CPUReadMemoryFunc *watch_mem_read[3] = {
2688 6658ffb8 pbrook
    watch_mem_readb,
2689 6658ffb8 pbrook
    watch_mem_readw,
2690 6658ffb8 pbrook
    watch_mem_readl,
2691 6658ffb8 pbrook
};
2692 6658ffb8 pbrook
2693 6658ffb8 pbrook
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2694 6658ffb8 pbrook
    watch_mem_writeb,
2695 6658ffb8 pbrook
    watch_mem_writew,
2696 6658ffb8 pbrook
    watch_mem_writel,
2697 6658ffb8 pbrook
};
2698 6658ffb8 pbrook
2699 db7b5426 blueswir1
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2700 db7b5426 blueswir1
                                 unsigned int len)
2701 db7b5426 blueswir1
{
2702 db7b5426 blueswir1
    uint32_t ret;
2703 db7b5426 blueswir1
    unsigned int idx;
2704 db7b5426 blueswir1
2705 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2706 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2707 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2708 db7b5426 blueswir1
           mmio, len, addr, idx);
2709 db7b5426 blueswir1
#endif
2710 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2711 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2712 db7b5426 blueswir1
2713 db7b5426 blueswir1
    return ret;
2714 db7b5426 blueswir1
}
2715 db7b5426 blueswir1
2716 db7b5426 blueswir1
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2717 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2718 db7b5426 blueswir1
{
2719 db7b5426 blueswir1
    unsigned int idx;
2720 db7b5426 blueswir1
2721 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2722 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2723 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2724 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2725 db7b5426 blueswir1
#endif
2726 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2727 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2728 8da3ff18 pbrook
                                  value);
2729 db7b5426 blueswir1
}
2730 db7b5426 blueswir1
2731 db7b5426 blueswir1
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2732 db7b5426 blueswir1
{
2733 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2734 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2735 db7b5426 blueswir1
#endif
2736 db7b5426 blueswir1
2737 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2738 db7b5426 blueswir1
}
2739 db7b5426 blueswir1
2740 db7b5426 blueswir1
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2741 db7b5426 blueswir1
                            uint32_t value)
2742 db7b5426 blueswir1
{
2743 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2744 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2745 db7b5426 blueswir1
#endif
2746 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2747 db7b5426 blueswir1
}
2748 db7b5426 blueswir1
2749 db7b5426 blueswir1
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2750 db7b5426 blueswir1
{
2751 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2752 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2753 db7b5426 blueswir1
#endif
2754 db7b5426 blueswir1
2755 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2756 db7b5426 blueswir1
}
2757 db7b5426 blueswir1
2758 db7b5426 blueswir1
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2759 db7b5426 blueswir1
                            uint32_t value)
2760 db7b5426 blueswir1
{
2761 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2762 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2763 db7b5426 blueswir1
#endif
2764 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2765 db7b5426 blueswir1
}
2766 db7b5426 blueswir1
2767 db7b5426 blueswir1
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2768 db7b5426 blueswir1
{
2769 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2770 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2771 db7b5426 blueswir1
#endif
2772 db7b5426 blueswir1
2773 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2774 db7b5426 blueswir1
}
2775 db7b5426 blueswir1
2776 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2777 db7b5426 blueswir1
                         target_phys_addr_t addr, uint32_t value)
2778 db7b5426 blueswir1
{
2779 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2780 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2781 db7b5426 blueswir1
#endif
2782 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2783 db7b5426 blueswir1
}
2784 db7b5426 blueswir1
2785 db7b5426 blueswir1
static CPUReadMemoryFunc *subpage_read[] = {
2786 db7b5426 blueswir1
    &subpage_readb,
2787 db7b5426 blueswir1
    &subpage_readw,
2788 db7b5426 blueswir1
    &subpage_readl,
2789 db7b5426 blueswir1
};
2790 db7b5426 blueswir1
2791 db7b5426 blueswir1
static CPUWriteMemoryFunc *subpage_write[] = {
2792 db7b5426 blueswir1
    &subpage_writeb,
2793 db7b5426 blueswir1
    &subpage_writew,
2794 db7b5426 blueswir1
    &subpage_writel,
2795 db7b5426 blueswir1
};
2796 db7b5426 blueswir1
2797 db7b5426 blueswir1
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2798 8da3ff18 pbrook
                             ram_addr_t memory, ram_addr_t region_offset)
2799 db7b5426 blueswir1
{
2800 db7b5426 blueswir1
    int idx, eidx;
2801 4254fab8 blueswir1
    unsigned int i;
2802 db7b5426 blueswir1
2803 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2804 db7b5426 blueswir1
        return -1;
2805 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2806 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2807 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2808 db7b5426 blueswir1
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2809 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2810 db7b5426 blueswir1
#endif
2811 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2812 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2813 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2814 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2815 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2816 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2817 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2818 3ee89922 blueswir1
            }
2819 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2820 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2821 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2822 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2823 3ee89922 blueswir1
            }
2824 4254fab8 blueswir1
        }
2825 db7b5426 blueswir1
    }
2826 db7b5426 blueswir1
2827 db7b5426 blueswir1
    return 0;
2828 db7b5426 blueswir1
}
2829 db7b5426 blueswir1
2830 00f82b8a aurel32
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2831 8da3ff18 pbrook
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2832 db7b5426 blueswir1
{
2833 db7b5426 blueswir1
    subpage_t *mmio;
2834 db7b5426 blueswir1
    int subpage_memory;
2835 db7b5426 blueswir1
2836 db7b5426 blueswir1
    mmio = qemu_mallocz(sizeof(subpage_t));
2837 1eec614b aliguori
2838 1eec614b aliguori
    mmio->base = base;
2839 1eec614b aliguori
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2840 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2841 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2842 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2843 db7b5426 blueswir1
#endif
2844 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2845 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2846 8da3ff18 pbrook
                         region_offset);
2847 db7b5426 blueswir1
2848 db7b5426 blueswir1
    return mmio;
2849 db7b5426 blueswir1
}
2850 db7b5426 blueswir1
2851 88715657 aliguori
static int get_free_io_mem_idx(void)
2852 88715657 aliguori
{
2853 88715657 aliguori
    int i;
2854 88715657 aliguori
2855 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2856 88715657 aliguori
        if (!io_mem_used[i]) {
2857 88715657 aliguori
            io_mem_used[i] = 1;
2858 88715657 aliguori
            return i;
2859 88715657 aliguori
        }
2860 88715657 aliguori
2861 88715657 aliguori
    return -1;
2862 88715657 aliguori
}
2863 88715657 aliguori
2864 33417e70 bellard
static void io_mem_init(void)
2865 33417e70 bellard
{
2866 88715657 aliguori
    int i;
2867 88715657 aliguori
2868 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2869 a4193c8a bellard
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2870 3a7d929e bellard
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2871 88715657 aliguori
    for (i=0; i<5; i++)
2872 88715657 aliguori
        io_mem_used[i] = 1;
2873 1ccde1cb bellard
2874 0f459d16 pbrook
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2875 6658ffb8 pbrook
                                          watch_mem_write, NULL);
2876 1ccde1cb bellard
    /* alloc dirty bits array */
2877 0a962c02 bellard
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2878 3a7d929e bellard
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2879 33417e70 bellard
}
2880 33417e70 bellard
2881 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
2882 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
2883 3ee89922 blueswir1
   2). Functions can be omitted with a NULL function pointer. The
2884 3ee89922 blueswir1
   registered functions may be modified dynamically later.
2885 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
2886 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
2887 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
2888 4254fab8 blueswir1
   returned if error. */
2889 33417e70 bellard
int cpu_register_io_memory(int io_index,
2890 33417e70 bellard
                           CPUReadMemoryFunc **mem_read,
2891 a4193c8a bellard
                           CPUWriteMemoryFunc **mem_write,
2892 a4193c8a bellard
                           void *opaque)
2893 33417e70 bellard
{
2894 4254fab8 blueswir1
    int i, subwidth = 0;
2895 33417e70 bellard
2896 33417e70 bellard
    if (io_index <= 0) {
2897 88715657 aliguori
        io_index = get_free_io_mem_idx();
2898 88715657 aliguori
        if (io_index == -1)
2899 88715657 aliguori
            return io_index;
2900 33417e70 bellard
    } else {
2901 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
2902 33417e70 bellard
            return -1;
2903 33417e70 bellard
    }
2904 b5ff1b31 bellard
2905 33417e70 bellard
    for(i = 0;i < 3; i++) {
2906 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
2907 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
2908 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
2909 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
2910 33417e70 bellard
    }
2911 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
2912 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
2913 33417e70 bellard
}
2914 61382a50 bellard
2915 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
2916 88715657 aliguori
{
2917 88715657 aliguori
    int i;
2918 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
2919 88715657 aliguori
2920 88715657 aliguori
    for (i=0;i < 3; i++) {
2921 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2922 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2923 88715657 aliguori
    }
2924 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
2925 88715657 aliguori
    io_mem_used[io_index] = 0;
2926 88715657 aliguori
}
2927 88715657 aliguori
2928 8926b517 bellard
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2929 8926b517 bellard
{
2930 8926b517 bellard
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2931 8926b517 bellard
}
2932 8926b517 bellard
2933 8926b517 bellard
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2934 8926b517 bellard
{
2935 8926b517 bellard
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2936 8926b517 bellard
}
2937 8926b517 bellard
2938 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
2939 e2eef170 pbrook
2940 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
2941 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
2942 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2943 13eb76e0 bellard
                            int len, int is_write)
2944 13eb76e0 bellard
{
2945 13eb76e0 bellard
    int l, flags;
2946 13eb76e0 bellard
    target_ulong page;
2947 53a5960a pbrook
    void * p;
2948 13eb76e0 bellard
2949 13eb76e0 bellard
    while (len > 0) {
2950 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2951 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2952 13eb76e0 bellard
        if (l > len)
2953 13eb76e0 bellard
            l = len;
2954 13eb76e0 bellard
        flags = page_get_flags(page);
2955 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
2956 13eb76e0 bellard
            return;
2957 13eb76e0 bellard
        if (is_write) {
2958 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
2959 13eb76e0 bellard
                return;
2960 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
2961 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2962 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
2963 579a97f7 bellard
                return;
2964 72fb7daa aurel32
            memcpy(p, buf, l);
2965 72fb7daa aurel32
            unlock_user(p, addr, l);
2966 13eb76e0 bellard
        } else {
2967 13eb76e0 bellard
            if (!(flags & PAGE_READ))
2968 13eb76e0 bellard
                return;
2969 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
2970 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2971 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
2972 579a97f7 bellard
                return;
2973 72fb7daa aurel32
            memcpy(buf, p, l);
2974 5b257578 aurel32
            unlock_user(p, addr, 0);
2975 13eb76e0 bellard
        }
2976 13eb76e0 bellard
        len -= l;
2977 13eb76e0 bellard
        buf += l;
2978 13eb76e0 bellard
        addr += l;
2979 13eb76e0 bellard
    }
2980 13eb76e0 bellard
}
2981 8df1cd07 bellard
2982 13eb76e0 bellard
#else
2983 5fafdf24 ths
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2984 13eb76e0 bellard
                            int len, int is_write)
2985 13eb76e0 bellard
{
2986 13eb76e0 bellard
    int l, io_index;
2987 13eb76e0 bellard
    uint8_t *ptr;
2988 13eb76e0 bellard
    uint32_t val;
2989 2e12669a bellard
    target_phys_addr_t page;
2990 2e12669a bellard
    unsigned long pd;
2991 92e873b9 bellard
    PhysPageDesc *p;
2992 3b46e624 ths
2993 13eb76e0 bellard
    while (len > 0) {
2994 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2995 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2996 13eb76e0 bellard
        if (l > len)
2997 13eb76e0 bellard
            l = len;
2998 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2999 13eb76e0 bellard
        if (!p) {
3000 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3001 13eb76e0 bellard
        } else {
3002 13eb76e0 bellard
            pd = p->phys_offset;
3003 13eb76e0 bellard
        }
3004 3b46e624 ths
3005 13eb76e0 bellard
        if (is_write) {
3006 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3007 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3008 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3009 8da3ff18 pbrook
                if (p)
3010 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3011 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3012 6a00d601 bellard
                   potential bugs */
3013 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3014 1c213d19 bellard
                    /* 32 bit write access */
3015 c27004ec bellard
                    val = ldl_p(buf);
3016 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3017 13eb76e0 bellard
                    l = 4;
3018 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3019 1c213d19 bellard
                    /* 16 bit write access */
3020 c27004ec bellard
                    val = lduw_p(buf);
3021 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3022 13eb76e0 bellard
                    l = 2;
3023 13eb76e0 bellard
                } else {
3024 1c213d19 bellard
                    /* 8 bit write access */
3025 c27004ec bellard
                    val = ldub_p(buf);
3026 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3027 13eb76e0 bellard
                    l = 1;
3028 13eb76e0 bellard
                }
3029 13eb76e0 bellard
            } else {
3030 b448f2f3 bellard
                unsigned long addr1;
3031 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3032 13eb76e0 bellard
                /* RAM case */
3033 b448f2f3 bellard
                ptr = phys_ram_base + addr1;
3034 13eb76e0 bellard
                memcpy(ptr, buf, l);
3035 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3036 3a7d929e bellard
                    /* invalidate code */
3037 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3038 3a7d929e bellard
                    /* set dirty bit */
3039 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3040 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3041 3a7d929e bellard
                }
3042 13eb76e0 bellard
            }
3043 13eb76e0 bellard
        } else {
3044 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3045 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3046 6c2934db aurel32
                target_phys_addr_t addr1 = addr;
3047 13eb76e0 bellard
                /* I/O case */
3048 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3049 8da3ff18 pbrook
                if (p)
3050 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3051 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3052 13eb76e0 bellard
                    /* 32 bit read access */
3053 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3054 c27004ec bellard
                    stl_p(buf, val);
3055 13eb76e0 bellard
                    l = 4;
3056 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3057 13eb76e0 bellard
                    /* 16 bit read access */
3058 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3059 c27004ec bellard
                    stw_p(buf, val);
3060 13eb76e0 bellard
                    l = 2;
3061 13eb76e0 bellard
                } else {
3062 1c213d19 bellard
                    /* 8 bit read access */
3063 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3064 c27004ec bellard
                    stb_p(buf, val);
3065 13eb76e0 bellard
                    l = 1;
3066 13eb76e0 bellard
                }
3067 13eb76e0 bellard
            } else {
3068 13eb76e0 bellard
                /* RAM case */
3069 5fafdf24 ths
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3070 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3071 13eb76e0 bellard
                memcpy(buf, ptr, l);
3072 13eb76e0 bellard
            }
3073 13eb76e0 bellard
        }
3074 13eb76e0 bellard
        len -= l;
3075 13eb76e0 bellard
        buf += l;
3076 13eb76e0 bellard
        addr += l;
3077 13eb76e0 bellard
    }
3078 13eb76e0 bellard
}
3079 8df1cd07 bellard
3080 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3081 5fafdf24 ths
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3082 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3083 d0ecd2aa bellard
{
3084 d0ecd2aa bellard
    int l;
3085 d0ecd2aa bellard
    uint8_t *ptr;
3086 d0ecd2aa bellard
    target_phys_addr_t page;
3087 d0ecd2aa bellard
    unsigned long pd;
3088 d0ecd2aa bellard
    PhysPageDesc *p;
3089 3b46e624 ths
3090 d0ecd2aa bellard
    while (len > 0) {
3091 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3092 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3093 d0ecd2aa bellard
        if (l > len)
3094 d0ecd2aa bellard
            l = len;
3095 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3096 d0ecd2aa bellard
        if (!p) {
3097 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3098 d0ecd2aa bellard
        } else {
3099 d0ecd2aa bellard
            pd = p->phys_offset;
3100 d0ecd2aa bellard
        }
3101 3b46e624 ths
3102 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3103 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3104 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3105 d0ecd2aa bellard
            /* do nothing */
3106 d0ecd2aa bellard
        } else {
3107 d0ecd2aa bellard
            unsigned long addr1;
3108 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109 d0ecd2aa bellard
            /* ROM/RAM case */
3110 d0ecd2aa bellard
            ptr = phys_ram_base + addr1;
3111 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3112 d0ecd2aa bellard
        }
3113 d0ecd2aa bellard
        len -= l;
3114 d0ecd2aa bellard
        buf += l;
3115 d0ecd2aa bellard
        addr += l;
3116 d0ecd2aa bellard
    }
3117 d0ecd2aa bellard
}
3118 d0ecd2aa bellard
3119 6d16c2f8 aliguori
typedef struct {
3120 6d16c2f8 aliguori
    void *buffer;
3121 6d16c2f8 aliguori
    target_phys_addr_t addr;
3122 6d16c2f8 aliguori
    target_phys_addr_t len;
3123 6d16c2f8 aliguori
} BounceBuffer;
3124 6d16c2f8 aliguori
3125 6d16c2f8 aliguori
static BounceBuffer bounce;
3126 6d16c2f8 aliguori
3127 ba223c29 aliguori
typedef struct MapClient {
3128 ba223c29 aliguori
    void *opaque;
3129 ba223c29 aliguori
    void (*callback)(void *opaque);
3130 ba223c29 aliguori
    LIST_ENTRY(MapClient) link;
3131 ba223c29 aliguori
} MapClient;
3132 ba223c29 aliguori
3133 ba223c29 aliguori
static LIST_HEAD(map_client_list, MapClient) map_client_list
3134 ba223c29 aliguori
    = LIST_HEAD_INITIALIZER(map_client_list);
3135 ba223c29 aliguori
3136 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3137 ba223c29 aliguori
{
3138 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3139 ba223c29 aliguori
3140 ba223c29 aliguori
    client->opaque = opaque;
3141 ba223c29 aliguori
    client->callback = callback;
3142 ba223c29 aliguori
    LIST_INSERT_HEAD(&map_client_list, client, link);
3143 ba223c29 aliguori
    return client;
3144 ba223c29 aliguori
}
3145 ba223c29 aliguori
3146 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3147 ba223c29 aliguori
{
3148 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3149 ba223c29 aliguori
3150 ba223c29 aliguori
    LIST_REMOVE(client, link);
3151 ba223c29 aliguori
}
3152 ba223c29 aliguori
3153 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3154 ba223c29 aliguori
{
3155 ba223c29 aliguori
    MapClient *client;
3156 ba223c29 aliguori
3157 ba223c29 aliguori
    while (!LIST_EMPTY(&map_client_list)) {
3158 ba223c29 aliguori
        client = LIST_FIRST(&map_client_list);
3159 ba223c29 aliguori
        client->callback(client->opaque);
3160 ba223c29 aliguori
        LIST_REMOVE(client, link);
3161 ba223c29 aliguori
    }
3162 ba223c29 aliguori
}
3163 ba223c29 aliguori
3164 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3165 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3166 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3167 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3168 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3169 ba223c29 aliguori
 * likely to succeed.
3170 6d16c2f8 aliguori
 */
3171 6d16c2f8 aliguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3172 6d16c2f8 aliguori
                              target_phys_addr_t *plen,
3173 6d16c2f8 aliguori
                              int is_write)
3174 6d16c2f8 aliguori
{
3175 6d16c2f8 aliguori
    target_phys_addr_t len = *plen;
3176 6d16c2f8 aliguori
    target_phys_addr_t done = 0;
3177 6d16c2f8 aliguori
    int l;
3178 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3179 6d16c2f8 aliguori
    uint8_t *ptr;
3180 6d16c2f8 aliguori
    target_phys_addr_t page;
3181 6d16c2f8 aliguori
    unsigned long pd;
3182 6d16c2f8 aliguori
    PhysPageDesc *p;
3183 6d16c2f8 aliguori
    unsigned long addr1;
3184 6d16c2f8 aliguori
3185 6d16c2f8 aliguori
    while (len > 0) {
3186 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3187 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3188 6d16c2f8 aliguori
        if (l > len)
3189 6d16c2f8 aliguori
            l = len;
3190 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3191 6d16c2f8 aliguori
        if (!p) {
3192 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3193 6d16c2f8 aliguori
        } else {
3194 6d16c2f8 aliguori
            pd = p->phys_offset;
3195 6d16c2f8 aliguori
        }
3196 6d16c2f8 aliguori
3197 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3198 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3199 6d16c2f8 aliguori
                break;
3200 6d16c2f8 aliguori
            }
3201 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3202 6d16c2f8 aliguori
            bounce.addr = addr;
3203 6d16c2f8 aliguori
            bounce.len = l;
3204 6d16c2f8 aliguori
            if (!is_write) {
3205 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3206 6d16c2f8 aliguori
            }
3207 6d16c2f8 aliguori
            ptr = bounce.buffer;
3208 6d16c2f8 aliguori
        } else {
3209 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3210 6d16c2f8 aliguori
            ptr = phys_ram_base + addr1;
3211 6d16c2f8 aliguori
        }
3212 6d16c2f8 aliguori
        if (!done) {
3213 6d16c2f8 aliguori
            ret = ptr;
3214 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3215 6d16c2f8 aliguori
            break;
3216 6d16c2f8 aliguori
        }
3217 6d16c2f8 aliguori
3218 6d16c2f8 aliguori
        len -= l;
3219 6d16c2f8 aliguori
        addr += l;
3220 6d16c2f8 aliguori
        done += l;
3221 6d16c2f8 aliguori
    }
3222 6d16c2f8 aliguori
    *plen = done;
3223 6d16c2f8 aliguori
    return ret;
3224 6d16c2f8 aliguori
}
3225 6d16c2f8 aliguori
3226 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3227 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3228 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3229 6d16c2f8 aliguori
 */
3230 6d16c2f8 aliguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3231 6d16c2f8 aliguori
                               int is_write, target_phys_addr_t access_len)
3232 6d16c2f8 aliguori
{
3233 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3234 6d16c2f8 aliguori
        if (is_write) {
3235 6d16c2f8 aliguori
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3236 6d16c2f8 aliguori
            while (access_len) {
3237 6d16c2f8 aliguori
                unsigned l;
3238 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3239 6d16c2f8 aliguori
                if (l > access_len)
3240 6d16c2f8 aliguori
                    l = access_len;
3241 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3242 6d16c2f8 aliguori
                    /* invalidate code */
3243 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3244 6d16c2f8 aliguori
                    /* set dirty bit */
3245 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3246 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3247 6d16c2f8 aliguori
                }
3248 6d16c2f8 aliguori
                addr1 += l;
3249 6d16c2f8 aliguori
                access_len -= l;
3250 6d16c2f8 aliguori
            }
3251 6d16c2f8 aliguori
        }
3252 6d16c2f8 aliguori
        return;
3253 6d16c2f8 aliguori
    }
3254 6d16c2f8 aliguori
    if (is_write) {
3255 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3256 6d16c2f8 aliguori
    }
3257 6d16c2f8 aliguori
    qemu_free(bounce.buffer);
3258 6d16c2f8 aliguori
    bounce.buffer = NULL;
3259 ba223c29 aliguori
    cpu_notify_map_clients();
3260 6d16c2f8 aliguori
}
3261 d0ecd2aa bellard
3262 8df1cd07 bellard
/* warning: addr must be aligned */
3263 8df1cd07 bellard
uint32_t ldl_phys(target_phys_addr_t addr)
3264 8df1cd07 bellard
{
3265 8df1cd07 bellard
    int io_index;
3266 8df1cd07 bellard
    uint8_t *ptr;
3267 8df1cd07 bellard
    uint32_t val;
3268 8df1cd07 bellard
    unsigned long pd;
3269 8df1cd07 bellard
    PhysPageDesc *p;
3270 8df1cd07 bellard
3271 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3272 8df1cd07 bellard
    if (!p) {
3273 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3274 8df1cd07 bellard
    } else {
3275 8df1cd07 bellard
        pd = p->phys_offset;
3276 8df1cd07 bellard
    }
3277 3b46e624 ths
3278 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3279 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3280 8df1cd07 bellard
        /* I/O case */
3281 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3282 8da3ff18 pbrook
        if (p)
3283 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3284 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285 8df1cd07 bellard
    } else {
3286 8df1cd07 bellard
        /* RAM case */
3287 5fafdf24 ths
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3288 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3289 8df1cd07 bellard
        val = ldl_p(ptr);
3290 8df1cd07 bellard
    }
3291 8df1cd07 bellard
    return val;
3292 8df1cd07 bellard
}
3293 8df1cd07 bellard
3294 84b7b8e7 bellard
/* warning: addr must be aligned */
3295 84b7b8e7 bellard
uint64_t ldq_phys(target_phys_addr_t addr)
3296 84b7b8e7 bellard
{
3297 84b7b8e7 bellard
    int io_index;
3298 84b7b8e7 bellard
    uint8_t *ptr;
3299 84b7b8e7 bellard
    uint64_t val;
3300 84b7b8e7 bellard
    unsigned long pd;
3301 84b7b8e7 bellard
    PhysPageDesc *p;
3302 84b7b8e7 bellard
3303 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3304 84b7b8e7 bellard
    if (!p) {
3305 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3306 84b7b8e7 bellard
    } else {
3307 84b7b8e7 bellard
        pd = p->phys_offset;
3308 84b7b8e7 bellard
    }
3309 3b46e624 ths
3310 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3312 84b7b8e7 bellard
        /* I/O case */
3313 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3314 8da3ff18 pbrook
        if (p)
3315 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3316 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3317 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3318 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3319 84b7b8e7 bellard
#else
3320 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3321 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3322 84b7b8e7 bellard
#endif
3323 84b7b8e7 bellard
    } else {
3324 84b7b8e7 bellard
        /* RAM case */
3325 5fafdf24 ths
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3326 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3327 84b7b8e7 bellard
        val = ldq_p(ptr);
3328 84b7b8e7 bellard
    }
3329 84b7b8e7 bellard
    return val;
3330 84b7b8e7 bellard
}
3331 84b7b8e7 bellard
3332 aab33094 bellard
/* XXX: optimize */
3333 aab33094 bellard
uint32_t ldub_phys(target_phys_addr_t addr)
3334 aab33094 bellard
{
3335 aab33094 bellard
    uint8_t val;
3336 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3337 aab33094 bellard
    return val;
3338 aab33094 bellard
}
3339 aab33094 bellard
3340 aab33094 bellard
/* XXX: optimize */
3341 aab33094 bellard
uint32_t lduw_phys(target_phys_addr_t addr)
3342 aab33094 bellard
{
3343 aab33094 bellard
    uint16_t val;
3344 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3345 aab33094 bellard
    return tswap16(val);
3346 aab33094 bellard
}
3347 aab33094 bellard
3348 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3349 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3350 8df1cd07 bellard
   bits are used to track modified PTEs */
3351 8df1cd07 bellard
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3352 8df1cd07 bellard
{
3353 8df1cd07 bellard
    int io_index;
3354 8df1cd07 bellard
    uint8_t *ptr;
3355 8df1cd07 bellard
    unsigned long pd;
3356 8df1cd07 bellard
    PhysPageDesc *p;
3357 8df1cd07 bellard
3358 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359 8df1cd07 bellard
    if (!p) {
3360 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3361 8df1cd07 bellard
    } else {
3362 8df1cd07 bellard
        pd = p->phys_offset;
3363 8df1cd07 bellard
    }
3364 3b46e624 ths
3365 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3366 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3367 8da3ff18 pbrook
        if (p)
3368 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3369 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3370 8df1cd07 bellard
    } else {
3371 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3372 74576198 aliguori
        ptr = phys_ram_base + addr1;
3373 8df1cd07 bellard
        stl_p(ptr, val);
3374 74576198 aliguori
3375 74576198 aliguori
        if (unlikely(in_migration)) {
3376 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3377 74576198 aliguori
                /* invalidate code */
3378 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3379 74576198 aliguori
                /* set dirty bit */
3380 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3381 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3382 74576198 aliguori
            }
3383 74576198 aliguori
        }
3384 8df1cd07 bellard
    }
3385 8df1cd07 bellard
}
3386 8df1cd07 bellard
3387 bc98a7ef j_mayer
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3388 bc98a7ef j_mayer
{
3389 bc98a7ef j_mayer
    int io_index;
3390 bc98a7ef j_mayer
    uint8_t *ptr;
3391 bc98a7ef j_mayer
    unsigned long pd;
3392 bc98a7ef j_mayer
    PhysPageDesc *p;
3393 bc98a7ef j_mayer
3394 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3395 bc98a7ef j_mayer
    if (!p) {
3396 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3397 bc98a7ef j_mayer
    } else {
3398 bc98a7ef j_mayer
        pd = p->phys_offset;
3399 bc98a7ef j_mayer
    }
3400 3b46e624 ths
3401 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3402 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3403 8da3ff18 pbrook
        if (p)
3404 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3405 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3406 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3407 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3408 bc98a7ef j_mayer
#else
3409 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3410 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3411 bc98a7ef j_mayer
#endif
3412 bc98a7ef j_mayer
    } else {
3413 5fafdf24 ths
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3414 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3415 bc98a7ef j_mayer
        stq_p(ptr, val);
3416 bc98a7ef j_mayer
    }
3417 bc98a7ef j_mayer
}
3418 bc98a7ef j_mayer
3419 8df1cd07 bellard
/* warning: addr must be aligned */
3420 8df1cd07 bellard
void stl_phys(target_phys_addr_t addr, uint32_t val)
3421 8df1cd07 bellard
{
3422 8df1cd07 bellard
    int io_index;
3423 8df1cd07 bellard
    uint8_t *ptr;
3424 8df1cd07 bellard
    unsigned long pd;
3425 8df1cd07 bellard
    PhysPageDesc *p;
3426 8df1cd07 bellard
3427 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428 8df1cd07 bellard
    if (!p) {
3429 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3430 8df1cd07 bellard
    } else {
3431 8df1cd07 bellard
        pd = p->phys_offset;
3432 8df1cd07 bellard
    }
3433 3b46e624 ths
3434 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3435 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3436 8da3ff18 pbrook
        if (p)
3437 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3438 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3439 8df1cd07 bellard
    } else {
3440 8df1cd07 bellard
        unsigned long addr1;
3441 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3442 8df1cd07 bellard
        /* RAM case */
3443 8df1cd07 bellard
        ptr = phys_ram_base + addr1;
3444 8df1cd07 bellard
        stl_p(ptr, val);
3445 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3446 3a7d929e bellard
            /* invalidate code */
3447 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3448 3a7d929e bellard
            /* set dirty bit */
3449 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3450 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3451 3a7d929e bellard
        }
3452 8df1cd07 bellard
    }
3453 8df1cd07 bellard
}
3454 8df1cd07 bellard
3455 aab33094 bellard
/* XXX: optimize */
3456 aab33094 bellard
void stb_phys(target_phys_addr_t addr, uint32_t val)
3457 aab33094 bellard
{
3458 aab33094 bellard
    uint8_t v = val;
3459 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3460 aab33094 bellard
}
3461 aab33094 bellard
3462 aab33094 bellard
/* XXX: optimize */
3463 aab33094 bellard
void stw_phys(target_phys_addr_t addr, uint32_t val)
3464 aab33094 bellard
{
3465 aab33094 bellard
    uint16_t v = tswap16(val);
3466 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3467 aab33094 bellard
}
3468 aab33094 bellard
3469 aab33094 bellard
/* XXX: optimize */
3470 aab33094 bellard
void stq_phys(target_phys_addr_t addr, uint64_t val)
3471 aab33094 bellard
{
3472 aab33094 bellard
    val = tswap64(val);
3473 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3474 aab33094 bellard
}
3475 aab33094 bellard
3476 13eb76e0 bellard
#endif
3477 13eb76e0 bellard
3478 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3479 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3480 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3481 13eb76e0 bellard
{
3482 13eb76e0 bellard
    int l;
3483 9b3c35e0 j_mayer
    target_phys_addr_t phys_addr;
3484 9b3c35e0 j_mayer
    target_ulong page;
3485 13eb76e0 bellard
3486 13eb76e0 bellard
    while (len > 0) {
3487 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3488 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3489 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3490 13eb76e0 bellard
        if (phys_addr == -1)
3491 13eb76e0 bellard
            return -1;
3492 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3493 13eb76e0 bellard
        if (l > len)
3494 13eb76e0 bellard
            l = len;
3495 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3496 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3497 5e2972fd aliguori
        if (is_write)
3498 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3499 5e2972fd aliguori
        else
3500 5e2972fd aliguori
#endif
3501 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3502 13eb76e0 bellard
        len -= l;
3503 13eb76e0 bellard
        buf += l;
3504 13eb76e0 bellard
        addr += l;
3505 13eb76e0 bellard
    }
3506 13eb76e0 bellard
    return 0;
3507 13eb76e0 bellard
}
3508 13eb76e0 bellard
3509 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3510 2e70f6ef pbrook
   must be at the end of the TB */
3511 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3512 2e70f6ef pbrook
{
3513 2e70f6ef pbrook
    TranslationBlock *tb;
3514 2e70f6ef pbrook
    uint32_t n, cflags;
3515 2e70f6ef pbrook
    target_ulong pc, cs_base;
3516 2e70f6ef pbrook
    uint64_t flags;
3517 2e70f6ef pbrook
3518 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3519 2e70f6ef pbrook
    if (!tb) {
3520 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3521 2e70f6ef pbrook
                  retaddr);
3522 2e70f6ef pbrook
    }
3523 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3524 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3525 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3526 bf20dc07 ths
       occurred.  */
3527 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3528 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3529 2e70f6ef pbrook
    n++;
3530 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3531 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3532 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3533 2e70f6ef pbrook
       branch.  */
3534 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3535 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3536 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3537 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3538 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3539 2e70f6ef pbrook
    }
3540 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3541 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3542 2e70f6ef pbrook
            && n > 1) {
3543 2e70f6ef pbrook
        env->pc -= 2;
3544 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3545 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3546 2e70f6ef pbrook
    }
3547 2e70f6ef pbrook
#endif
3548 2e70f6ef pbrook
    /* This should never happen.  */
3549 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3550 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3551 2e70f6ef pbrook
3552 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3553 2e70f6ef pbrook
    pc = tb->pc;
3554 2e70f6ef pbrook
    cs_base = tb->cs_base;
3555 2e70f6ef pbrook
    flags = tb->flags;
3556 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3557 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3558 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3559 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3560 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3561 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3562 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3563 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3564 2e70f6ef pbrook
       second new TB.  */
3565 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3566 2e70f6ef pbrook
}
3567 2e70f6ef pbrook
3568 e3db7226 bellard
void dump_exec_info(FILE *f,
3569 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3570 e3db7226 bellard
{
3571 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3572 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3573 e3db7226 bellard
    TranslationBlock *tb;
3574 3b46e624 ths
3575 e3db7226 bellard
    target_code_size = 0;
3576 e3db7226 bellard
    max_target_code_size = 0;
3577 e3db7226 bellard
    cross_page = 0;
3578 e3db7226 bellard
    direct_jmp_count = 0;
3579 e3db7226 bellard
    direct_jmp2_count = 0;
3580 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3581 e3db7226 bellard
        tb = &tbs[i];
3582 e3db7226 bellard
        target_code_size += tb->size;
3583 e3db7226 bellard
        if (tb->size > max_target_code_size)
3584 e3db7226 bellard
            max_target_code_size = tb->size;
3585 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3586 e3db7226 bellard
            cross_page++;
3587 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3588 e3db7226 bellard
            direct_jmp_count++;
3589 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3590 e3db7226 bellard
                direct_jmp2_count++;
3591 e3db7226 bellard
            }
3592 e3db7226 bellard
        }
3593 e3db7226 bellard
    }
3594 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3595 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3596 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3597 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3598 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3599 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3600 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3601 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3602 e3db7226 bellard
                max_target_code_size);
3603 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3604 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3605 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3606 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3607 5fafdf24 ths
            cross_page,
3608 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3609 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3610 5fafdf24 ths
                direct_jmp_count,
3611 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3612 e3db7226 bellard
                direct_jmp2_count,
3613 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3614 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3615 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3616 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3617 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3618 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3619 e3db7226 bellard
}
3620 e3db7226 bellard
3621 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3622 61382a50 bellard
3623 61382a50 bellard
#define MMUSUFFIX _cmmu
3624 61382a50 bellard
#define GETPC() NULL
3625 61382a50 bellard
#define env cpu_single_env
3626 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3627 61382a50 bellard
3628 61382a50 bellard
#define SHIFT 0
3629 61382a50 bellard
#include "softmmu_template.h"
3630 61382a50 bellard
3631 61382a50 bellard
#define SHIFT 1
3632 61382a50 bellard
#include "softmmu_template.h"
3633 61382a50 bellard
3634 61382a50 bellard
#define SHIFT 2
3635 61382a50 bellard
#include "softmmu_template.h"
3636 61382a50 bellard
3637 61382a50 bellard
#define SHIFT 3
3638 61382a50 bellard
#include "softmmu_template.h"
3639 61382a50 bellard
3640 61382a50 bellard
#undef env
3641 61382a50 bellard
3642 61382a50 bellard
#endif