Statistics
| Branch: | Revision:

root / exec.c @ ea375f9a

History | View | Annotate | Download (114.7 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 fd052bf6 Riku Voipio
#include <signal.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 4a1418e0 Anthony Liguori
#elif defined(TARGET_X86_64)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 4a1418e0 Anthony Liguori
#elif defined(TARGET_I386)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80 108c49b8 bellard
#endif
81 108c49b8 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 26a5f13b bellard
int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 f8e2af11 Stefan Weil
#elif defined(_WIN32)
97 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
98 f8e2af11 Stefan Weil
#define code_gen_section                                \
99 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
100 d03d860b blueswir1
#else
101 d03d860b blueswir1
#define code_gen_section                                \
102 d03d860b blueswir1
    __attribute__((aligned (32)))
103 d03d860b blueswir1
#endif
104 d03d860b blueswir1
105 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
106 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
107 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
108 26a5f13b bellard
/* threshold to flush the translated code buffer */
109 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
110 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
111 fd6ce8f6 bellard
112 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
113 9fa3e853 bellard
int phys_ram_fd;
114 1ccde1cb bellard
uint8_t *phys_ram_dirty;
115 74576198 aliguori
static int in_migration;
116 94a6b54f pbrook
117 94a6b54f pbrook
typedef struct RAMBlock {
118 94a6b54f pbrook
    uint8_t *host;
119 c227f099 Anthony Liguori
    ram_addr_t offset;
120 c227f099 Anthony Liguori
    ram_addr_t length;
121 94a6b54f pbrook
    struct RAMBlock *next;
122 94a6b54f pbrook
} RAMBlock;
123 94a6b54f pbrook
124 94a6b54f pbrook
static RAMBlock *ram_blocks;
125 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
127 94a6b54f pbrook
   of this variable will break.  */
128 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
129 e2eef170 pbrook
#endif
130 9fa3e853 bellard
131 6a00d601 bellard
CPUState *first_cpu;
132 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
133 6a00d601 bellard
   cpu_exec() */
134 5fafdf24 ths
CPUState *cpu_single_env;
135 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
136 bf20dc07 ths
   1 = Precise instruction counting.
137 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
138 2e70f6ef pbrook
int use_icount = 0;
139 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
140 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
141 2e70f6ef pbrook
int64_t qemu_icount;
142 6a00d601 bellard
143 54936004 bellard
typedef struct PageDesc {
144 92e873b9 bellard
    /* list of TBs intersecting this ram page */
145 fd6ce8f6 bellard
    TranslationBlock *first_tb;
146 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
147 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
148 9fa3e853 bellard
    unsigned int code_write_count;
149 9fa3e853 bellard
    uint8_t *code_bitmap;
150 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
151 9fa3e853 bellard
    unsigned long flags;
152 9fa3e853 bellard
#endif
153 54936004 bellard
} PageDesc;
154 54936004 bellard
155 92e873b9 bellard
typedef struct PhysPageDesc {
156 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
157 c227f099 Anthony Liguori
    ram_addr_t phys_offset;
158 c227f099 Anthony Liguori
    ram_addr_t region_offset;
159 92e873b9 bellard
} PhysPageDesc;
160 92e873b9 bellard
161 54936004 bellard
#define L2_BITS 10
162 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
164 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
165 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
166 bedb69ea j_mayer
 */
167 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 bedb69ea j_mayer
#else
169 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 bedb69ea j_mayer
#endif
171 54936004 bellard
172 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
173 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
174 54936004 bellard
175 83fb7adf bellard
unsigned long qemu_real_host_page_size;
176 83fb7adf bellard
unsigned long qemu_host_page_bits;
177 83fb7adf bellard
unsigned long qemu_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_mask;
179 54936004 bellard
180 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
181 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
182 54936004 bellard
183 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
184 6d9a1304 Paul Brook
static PhysPageDesc **l1_phys_map;
185 6d9a1304 Paul Brook
186 e2eef170 pbrook
static void io_mem_init(void);
187 e2eef170 pbrook
188 33417e70 bellard
/* io memory support */
189 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
191 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
192 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
193 6658ffb8 pbrook
static int io_mem_watch;
194 6658ffb8 pbrook
#endif
195 33417e70 bellard
196 34865134 bellard
/* log support */
197 1e8b27ca Juha Riihimรคki
#ifdef WIN32
198 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
199 1e8b27ca Juha Riihimรคki
#else
200 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
201 1e8b27ca Juha Riihimรคki
#endif
202 34865134 bellard
FILE *logfile;
203 34865134 bellard
int loglevel;
204 e735b91c pbrook
static int log_append = 0;
205 34865134 bellard
206 e3db7226 bellard
/* statistics */
207 e3db7226 bellard
static int tlb_flush_count;
208 e3db7226 bellard
static int tb_flush_count;
209 e3db7226 bellard
static int tb_phys_invalidate_count;
210 e3db7226 bellard
211 7cb69cae bellard
#ifdef _WIN32
212 7cb69cae bellard
static void map_exec(void *addr, long size)
213 7cb69cae bellard
{
214 7cb69cae bellard
    DWORD old_protect;
215 7cb69cae bellard
    VirtualProtect(addr, size,
216 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
217 7cb69cae bellard
    
218 7cb69cae bellard
}
219 7cb69cae bellard
#else
220 7cb69cae bellard
static void map_exec(void *addr, long size)
221 7cb69cae bellard
{
222 4369415f bellard
    unsigned long start, end, page_size;
223 7cb69cae bellard
    
224 4369415f bellard
    page_size = getpagesize();
225 7cb69cae bellard
    start = (unsigned long)addr;
226 4369415f bellard
    start &= ~(page_size - 1);
227 7cb69cae bellard
    
228 7cb69cae bellard
    end = (unsigned long)addr + size;
229 4369415f bellard
    end += page_size - 1;
230 4369415f bellard
    end &= ~(page_size - 1);
231 7cb69cae bellard
    
232 7cb69cae bellard
    mprotect((void *)start, end - start,
233 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
234 7cb69cae bellard
}
235 7cb69cae bellard
#endif
236 7cb69cae bellard
237 b346ff46 bellard
static void page_init(void)
238 54936004 bellard
{
239 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
240 54936004 bellard
       TARGET_PAGE_SIZE */
241 c2b48b69 aliguori
#ifdef _WIN32
242 c2b48b69 aliguori
    {
243 c2b48b69 aliguori
        SYSTEM_INFO system_info;
244 c2b48b69 aliguori
245 c2b48b69 aliguori
        GetSystemInfo(&system_info);
246 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
247 c2b48b69 aliguori
    }
248 c2b48b69 aliguori
#else
249 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
250 c2b48b69 aliguori
#endif
251 83fb7adf bellard
    if (qemu_host_page_size == 0)
252 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
253 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
255 83fb7adf bellard
    qemu_host_page_bits = 0;
256 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 83fb7adf bellard
        qemu_host_page_bits++;
258 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
259 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
260 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 6d9a1304 Paul Brook
#endif
263 50a9569b balrog
264 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 50a9569b balrog
    {
266 50a9569b balrog
        long long startaddr, endaddr;
267 50a9569b balrog
        FILE *f;
268 50a9569b balrog
        int n;
269 50a9569b balrog
270 c8a706fe pbrook
        mmap_lock();
271 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
272 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
273 50a9569b balrog
        if (f) {
274 50a9569b balrog
            do {
275 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276 50a9569b balrog
                if (n == 2) {
277 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
278 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
280 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
282 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
283 50a9569b balrog
                                   PAGE_RESERVED); 
284 50a9569b balrog
                }
285 50a9569b balrog
            } while (!feof(f));
286 50a9569b balrog
            fclose(f);
287 50a9569b balrog
        }
288 c8a706fe pbrook
        mmap_unlock();
289 50a9569b balrog
    }
290 50a9569b balrog
#endif
291 54936004 bellard
}
292 54936004 bellard
293 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
294 54936004 bellard
{
295 17e2377a pbrook
#if TARGET_LONG_BITS > 32
296 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
297 17e2377a pbrook
       excluded high addresses.  */
298 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
299 17e2377a pbrook
        return NULL;
300 17e2377a pbrook
#endif
301 434929bf aliguori
    return &l1_map[index >> L2_BITS];
302 434929bf aliguori
}
303 434929bf aliguori
304 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
305 434929bf aliguori
{
306 434929bf aliguori
    PageDesc **lp, *p;
307 434929bf aliguori
    lp = page_l1_map(index);
308 434929bf aliguori
    if (!lp)
309 434929bf aliguori
        return NULL;
310 434929bf aliguori
311 54936004 bellard
    p = *lp;
312 54936004 bellard
    if (!p) {
313 54936004 bellard
        /* allocate if not found */
314 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
315 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
316 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
317 660f11be Blue Swirl
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
318 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
319 54936004 bellard
        *lp = p;
320 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
321 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
322 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
323 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
324 17e2377a pbrook
                           PAGE_RESERVED); 
325 17e2377a pbrook
        }
326 17e2377a pbrook
#else
327 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 17e2377a pbrook
        *lp = p;
329 17e2377a pbrook
#endif
330 54936004 bellard
    }
331 54936004 bellard
    return p + (index & (L2_SIZE - 1));
332 54936004 bellard
}
333 54936004 bellard
334 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
335 54936004 bellard
{
336 434929bf aliguori
    PageDesc **lp, *p;
337 434929bf aliguori
    lp = page_l1_map(index);
338 434929bf aliguori
    if (!lp)
339 434929bf aliguori
        return NULL;
340 54936004 bellard
341 434929bf aliguori
    p = *lp;
342 660f11be Blue Swirl
    if (!p) {
343 660f11be Blue Swirl
        return NULL;
344 660f11be Blue Swirl
    }
345 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
346 fd6ce8f6 bellard
}
347 fd6ce8f6 bellard
348 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
349 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350 92e873b9 bellard
{
351 108c49b8 bellard
    void **lp, **p;
352 e3f4e2a4 pbrook
    PhysPageDesc *pd;
353 92e873b9 bellard
354 108c49b8 bellard
    p = (void **)l1_phys_map;
355 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 108c49b8 bellard
357 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 108c49b8 bellard
#endif
360 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361 92e873b9 bellard
    p = *lp;
362 92e873b9 bellard
    if (!p) {
363 92e873b9 bellard
        /* allocate if not found */
364 108c49b8 bellard
        if (!alloc)
365 108c49b8 bellard
            return NULL;
366 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
368 108c49b8 bellard
        *lp = p;
369 108c49b8 bellard
    }
370 108c49b8 bellard
#endif
371 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 e3f4e2a4 pbrook
    pd = *lp;
373 e3f4e2a4 pbrook
    if (!pd) {
374 e3f4e2a4 pbrook
        int i;
375 108c49b8 bellard
        /* allocate if not found */
376 108c49b8 bellard
        if (!alloc)
377 108c49b8 bellard
            return NULL;
378 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 e3f4e2a4 pbrook
        *lp = pd;
380 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
381 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 67c4d23c pbrook
        }
384 92e873b9 bellard
    }
385 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386 92e873b9 bellard
}
387 92e873b9 bellard
388 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389 92e873b9 bellard
{
390 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
391 92e873b9 bellard
}
392 92e873b9 bellard
393 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
394 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
395 3a7d929e bellard
                                    target_ulong vaddr);
396 c8a706fe pbrook
#define mmap_lock() do { } while(0)
397 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
398 9fa3e853 bellard
#endif
399 fd6ce8f6 bellard
400 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401 4369415f bellard
402 4369415f bellard
#if defined(CONFIG_USER_ONLY)
403 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
404 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
405 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
406 4369415f bellard
#endif
407 4369415f bellard
408 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
409 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
410 4369415f bellard
#endif
411 4369415f bellard
412 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
413 26a5f13b bellard
{
414 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
415 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
416 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
418 4369415f bellard
#else
419 26a5f13b bellard
    code_gen_buffer_size = tb_size;
420 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
421 4369415f bellard
#if defined(CONFIG_USER_ONLY)
422 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
423 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424 4369415f bellard
#else
425 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
426 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
427 4369415f bellard
#endif
428 26a5f13b bellard
    }
429 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
432 26a5f13b bellard
       the host cpu and OS */
433 26a5f13b bellard
#if defined(__linux__) 
434 26a5f13b bellard
    {
435 26a5f13b bellard
        int flags;
436 141ac468 blueswir1
        void *start = NULL;
437 141ac468 blueswir1
438 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
439 26a5f13b bellard
#if defined(__x86_64__)
440 26a5f13b bellard
        flags |= MAP_32BIT;
441 26a5f13b bellard
        /* Cannot map more than that */
442 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
443 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
444 141ac468 blueswir1
#elif defined(__sparc_v9__)
445 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
446 141ac468 blueswir1
        flags |= MAP_FIXED;
447 141ac468 blueswir1
        start = (void *) 0x60000000UL;
448 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
449 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
450 1cb0661e balrog
#elif defined(__arm__)
451 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
452 1cb0661e balrog
        flags |= MAP_FIXED;
453 1cb0661e balrog
        start = (void *) 0x01000000UL;
454 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
455 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
456 26a5f13b bellard
#endif
457 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
458 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
459 26a5f13b bellard
                               flags, -1, 0);
460 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
461 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
462 26a5f13b bellard
            exit(1);
463 26a5f13b bellard
        }
464 26a5f13b bellard
    }
465 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
466 06e67a82 aliguori
    {
467 06e67a82 aliguori
        int flags;
468 06e67a82 aliguori
        void *addr = NULL;
469 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
470 06e67a82 aliguori
#if defined(__x86_64__)
471 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 06e67a82 aliguori
         * 0x40000000 is free */
473 06e67a82 aliguori
        flags |= MAP_FIXED;
474 06e67a82 aliguori
        addr = (void *)0x40000000;
475 06e67a82 aliguori
        /* Cannot map more than that */
476 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
477 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
478 06e67a82 aliguori
#endif
479 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
480 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
481 06e67a82 aliguori
                               flags, -1, 0);
482 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
483 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 06e67a82 aliguori
            exit(1);
485 06e67a82 aliguori
        }
486 06e67a82 aliguori
    }
487 26a5f13b bellard
#else
488 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
489 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
490 26a5f13b bellard
#endif
491 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
492 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
493 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
494 26a5f13b bellard
        code_gen_max_block_size();
495 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
496 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497 26a5f13b bellard
}
498 26a5f13b bellard
499 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
500 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
501 26a5f13b bellard
   size. */
502 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
503 26a5f13b bellard
{
504 26a5f13b bellard
    cpu_gen_init();
505 26a5f13b bellard
    code_gen_alloc(tb_size);
506 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
507 4369415f bellard
    page_init();
508 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
509 26a5f13b bellard
    io_mem_init();
510 e2eef170 pbrook
#endif
511 26a5f13b bellard
}
512 26a5f13b bellard
513 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514 9656f324 pbrook
515 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
516 e7f4eff7 Juan Quintela
{
517 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
518 9656f324 pbrook
519 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
520 3098dba0 aurel32
       version_id is increased. */
521 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
522 9656f324 pbrook
    tlb_flush(env, 1);
523 9656f324 pbrook
524 9656f324 pbrook
    return 0;
525 9656f324 pbrook
}
526 e7f4eff7 Juan Quintela
527 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
528 e7f4eff7 Juan Quintela
    .name = "cpu_common",
529 e7f4eff7 Juan Quintela
    .version_id = 1,
530 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
531 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
532 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
533 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
534 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
535 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
536 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
537 e7f4eff7 Juan Quintela
    }
538 e7f4eff7 Juan Quintela
};
539 9656f324 pbrook
#endif
540 9656f324 pbrook
541 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
542 950f1472 Glauber Costa
{
543 950f1472 Glauber Costa
    CPUState *env = first_cpu;
544 950f1472 Glauber Costa
545 950f1472 Glauber Costa
    while (env) {
546 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
547 950f1472 Glauber Costa
            break;
548 950f1472 Glauber Costa
        env = env->next_cpu;
549 950f1472 Glauber Costa
    }
550 950f1472 Glauber Costa
551 950f1472 Glauber Costa
    return env;
552 950f1472 Glauber Costa
}
553 950f1472 Glauber Costa
554 6a00d601 bellard
void cpu_exec_init(CPUState *env)
555 fd6ce8f6 bellard
{
556 6a00d601 bellard
    CPUState **penv;
557 6a00d601 bellard
    int cpu_index;
558 6a00d601 bellard
559 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
560 c2764719 pbrook
    cpu_list_lock();
561 c2764719 pbrook
#endif
562 6a00d601 bellard
    env->next_cpu = NULL;
563 6a00d601 bellard
    penv = &first_cpu;
564 6a00d601 bellard
    cpu_index = 0;
565 6a00d601 bellard
    while (*penv != NULL) {
566 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
567 6a00d601 bellard
        cpu_index++;
568 6a00d601 bellard
    }
569 6a00d601 bellard
    env->cpu_index = cpu_index;
570 268a362c aliguori
    env->numa_node = 0;
571 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
572 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
573 6a00d601 bellard
    *penv = env;
574 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
575 c2764719 pbrook
    cpu_list_unlock();
576 c2764719 pbrook
#endif
577 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
578 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
579 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
580 b3c7724c pbrook
                    cpu_save, cpu_load, env);
581 b3c7724c pbrook
#endif
582 fd6ce8f6 bellard
}
583 fd6ce8f6 bellard
584 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
585 9fa3e853 bellard
{
586 9fa3e853 bellard
    if (p->code_bitmap) {
587 59817ccb bellard
        qemu_free(p->code_bitmap);
588 9fa3e853 bellard
        p->code_bitmap = NULL;
589 9fa3e853 bellard
    }
590 9fa3e853 bellard
    p->code_write_count = 0;
591 9fa3e853 bellard
}
592 9fa3e853 bellard
593 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
594 fd6ce8f6 bellard
static void page_flush_tb(void)
595 fd6ce8f6 bellard
{
596 fd6ce8f6 bellard
    int i, j;
597 fd6ce8f6 bellard
    PageDesc *p;
598 fd6ce8f6 bellard
599 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
600 fd6ce8f6 bellard
        p = l1_map[i];
601 fd6ce8f6 bellard
        if (p) {
602 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
603 9fa3e853 bellard
                p->first_tb = NULL;
604 9fa3e853 bellard
                invalidate_page_bitmap(p);
605 9fa3e853 bellard
                p++;
606 9fa3e853 bellard
            }
607 fd6ce8f6 bellard
        }
608 fd6ce8f6 bellard
    }
609 fd6ce8f6 bellard
}
610 fd6ce8f6 bellard
611 fd6ce8f6 bellard
/* flush all the translation blocks */
612 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
613 6a00d601 bellard
void tb_flush(CPUState *env1)
614 fd6ce8f6 bellard
{
615 6a00d601 bellard
    CPUState *env;
616 0124311e bellard
#if defined(DEBUG_FLUSH)
617 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
618 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
619 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
620 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
621 fd6ce8f6 bellard
#endif
622 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
623 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
624 a208e54a pbrook
625 fd6ce8f6 bellard
    nb_tbs = 0;
626 3b46e624 ths
627 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
628 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
629 6a00d601 bellard
    }
630 9fa3e853 bellard
631 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
632 fd6ce8f6 bellard
    page_flush_tb();
633 9fa3e853 bellard
634 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
635 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
636 d4e8164f bellard
       expensive */
637 e3db7226 bellard
    tb_flush_count++;
638 fd6ce8f6 bellard
}
639 fd6ce8f6 bellard
640 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
641 fd6ce8f6 bellard
642 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
643 fd6ce8f6 bellard
{
644 fd6ce8f6 bellard
    TranslationBlock *tb;
645 fd6ce8f6 bellard
    int i;
646 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
647 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
648 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
649 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
650 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
651 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
652 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
653 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
654 fd6ce8f6 bellard
            }
655 fd6ce8f6 bellard
        }
656 fd6ce8f6 bellard
    }
657 fd6ce8f6 bellard
}
658 fd6ce8f6 bellard
659 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
660 fd6ce8f6 bellard
static void tb_page_check(void)
661 fd6ce8f6 bellard
{
662 fd6ce8f6 bellard
    TranslationBlock *tb;
663 fd6ce8f6 bellard
    int i, flags1, flags2;
664 3b46e624 ths
665 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
668 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
669 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
670 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
671 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
672 fd6ce8f6 bellard
            }
673 fd6ce8f6 bellard
        }
674 fd6ce8f6 bellard
    }
675 fd6ce8f6 bellard
}
676 fd6ce8f6 bellard
677 fd6ce8f6 bellard
#endif
678 fd6ce8f6 bellard
679 fd6ce8f6 bellard
/* invalidate one TB */
680 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 fd6ce8f6 bellard
                             int next_offset)
682 fd6ce8f6 bellard
{
683 fd6ce8f6 bellard
    TranslationBlock *tb1;
684 fd6ce8f6 bellard
    for(;;) {
685 fd6ce8f6 bellard
        tb1 = *ptb;
686 fd6ce8f6 bellard
        if (tb1 == tb) {
687 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 fd6ce8f6 bellard
            break;
689 fd6ce8f6 bellard
        }
690 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 fd6ce8f6 bellard
    }
692 fd6ce8f6 bellard
}
693 fd6ce8f6 bellard
694 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
695 9fa3e853 bellard
{
696 9fa3e853 bellard
    TranslationBlock *tb1;
697 9fa3e853 bellard
    unsigned int n1;
698 9fa3e853 bellard
699 9fa3e853 bellard
    for(;;) {
700 9fa3e853 bellard
        tb1 = *ptb;
701 9fa3e853 bellard
        n1 = (long)tb1 & 3;
702 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 9fa3e853 bellard
        if (tb1 == tb) {
704 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
705 9fa3e853 bellard
            break;
706 9fa3e853 bellard
        }
707 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
708 9fa3e853 bellard
    }
709 9fa3e853 bellard
}
710 9fa3e853 bellard
711 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
712 d4e8164f bellard
{
713 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
714 d4e8164f bellard
    unsigned int n1;
715 d4e8164f bellard
716 d4e8164f bellard
    ptb = &tb->jmp_next[n];
717 d4e8164f bellard
    tb1 = *ptb;
718 d4e8164f bellard
    if (tb1) {
719 d4e8164f bellard
        /* find tb(n) in circular list */
720 d4e8164f bellard
        for(;;) {
721 d4e8164f bellard
            tb1 = *ptb;
722 d4e8164f bellard
            n1 = (long)tb1 & 3;
723 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 d4e8164f bellard
            if (n1 == n && tb1 == tb)
725 d4e8164f bellard
                break;
726 d4e8164f bellard
            if (n1 == 2) {
727 d4e8164f bellard
                ptb = &tb1->jmp_first;
728 d4e8164f bellard
            } else {
729 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
730 d4e8164f bellard
            }
731 d4e8164f bellard
        }
732 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
733 d4e8164f bellard
        *ptb = tb->jmp_next[n];
734 d4e8164f bellard
735 d4e8164f bellard
        tb->jmp_next[n] = NULL;
736 d4e8164f bellard
    }
737 d4e8164f bellard
}
738 d4e8164f bellard
739 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
740 d4e8164f bellard
   another TB */
741 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
742 d4e8164f bellard
{
743 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
744 d4e8164f bellard
}
745 d4e8164f bellard
746 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
747 fd6ce8f6 bellard
{
748 6a00d601 bellard
    CPUState *env;
749 8a40a180 bellard
    PageDesc *p;
750 d4e8164f bellard
    unsigned int h, n1;
751 c227f099 Anthony Liguori
    target_phys_addr_t phys_pc;
752 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
753 3b46e624 ths
754 8a40a180 bellard
    /* remove the TB from the hash list */
755 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
757 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
758 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
759 8a40a180 bellard
760 8a40a180 bellard
    /* remove the TB from the page list */
761 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
762 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
764 8a40a180 bellard
        invalidate_page_bitmap(p);
765 8a40a180 bellard
    }
766 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
769 8a40a180 bellard
        invalidate_page_bitmap(p);
770 8a40a180 bellard
    }
771 8a40a180 bellard
772 36bdbe54 bellard
    tb_invalidated_flag = 1;
773 59817ccb bellard
774 fd6ce8f6 bellard
    /* remove the TB from the hash list */
775 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
776 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
778 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
779 6a00d601 bellard
    }
780 d4e8164f bellard
781 d4e8164f bellard
    /* suppress this TB from the two jump lists */
782 d4e8164f bellard
    tb_jmp_remove(tb, 0);
783 d4e8164f bellard
    tb_jmp_remove(tb, 1);
784 d4e8164f bellard
785 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
786 d4e8164f bellard
    tb1 = tb->jmp_first;
787 d4e8164f bellard
    for(;;) {
788 d4e8164f bellard
        n1 = (long)tb1 & 3;
789 d4e8164f bellard
        if (n1 == 2)
790 d4e8164f bellard
            break;
791 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
793 d4e8164f bellard
        tb_reset_jump(tb1, n1);
794 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
795 d4e8164f bellard
        tb1 = tb2;
796 d4e8164f bellard
    }
797 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
798 9fa3e853 bellard
799 e3db7226 bellard
    tb_phys_invalidate_count++;
800 9fa3e853 bellard
}
801 9fa3e853 bellard
802 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
803 9fa3e853 bellard
{
804 9fa3e853 bellard
    int end, mask, end1;
805 9fa3e853 bellard
806 9fa3e853 bellard
    end = start + len;
807 9fa3e853 bellard
    tab += start >> 3;
808 9fa3e853 bellard
    mask = 0xff << (start & 7);
809 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
810 9fa3e853 bellard
        if (start < end) {
811 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
812 9fa3e853 bellard
            *tab |= mask;
813 9fa3e853 bellard
        }
814 9fa3e853 bellard
    } else {
815 9fa3e853 bellard
        *tab++ |= mask;
816 9fa3e853 bellard
        start = (start + 8) & ~7;
817 9fa3e853 bellard
        end1 = end & ~7;
818 9fa3e853 bellard
        while (start < end1) {
819 9fa3e853 bellard
            *tab++ = 0xff;
820 9fa3e853 bellard
            start += 8;
821 9fa3e853 bellard
        }
822 9fa3e853 bellard
        if (start < end) {
823 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
824 9fa3e853 bellard
            *tab |= mask;
825 9fa3e853 bellard
        }
826 9fa3e853 bellard
    }
827 9fa3e853 bellard
}
828 9fa3e853 bellard
829 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
830 9fa3e853 bellard
{
831 9fa3e853 bellard
    int n, tb_start, tb_end;
832 9fa3e853 bellard
    TranslationBlock *tb;
833 3b46e624 ths
834 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
835 9fa3e853 bellard
836 9fa3e853 bellard
    tb = p->first_tb;
837 9fa3e853 bellard
    while (tb != NULL) {
838 9fa3e853 bellard
        n = (long)tb & 3;
839 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
840 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
841 9fa3e853 bellard
        if (n == 0) {
842 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
843 9fa3e853 bellard
               it is not a problem */
844 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
845 9fa3e853 bellard
            tb_end = tb_start + tb->size;
846 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
847 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
848 9fa3e853 bellard
        } else {
849 9fa3e853 bellard
            tb_start = 0;
850 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
851 9fa3e853 bellard
        }
852 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
853 9fa3e853 bellard
        tb = tb->page_next[n];
854 9fa3e853 bellard
    }
855 9fa3e853 bellard
}
856 9fa3e853 bellard
857 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
858 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
859 2e70f6ef pbrook
                              int flags, int cflags)
860 d720b93d bellard
{
861 d720b93d bellard
    TranslationBlock *tb;
862 d720b93d bellard
    uint8_t *tc_ptr;
863 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
864 d720b93d bellard
    int code_gen_size;
865 d720b93d bellard
866 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
867 c27004ec bellard
    tb = tb_alloc(pc);
868 d720b93d bellard
    if (!tb) {
869 d720b93d bellard
        /* flush must be done */
870 d720b93d bellard
        tb_flush(env);
871 d720b93d bellard
        /* cannot fail at this point */
872 c27004ec bellard
        tb = tb_alloc(pc);
873 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
874 2e70f6ef pbrook
        tb_invalidated_flag = 1;
875 d720b93d bellard
    }
876 d720b93d bellard
    tc_ptr = code_gen_ptr;
877 d720b93d bellard
    tb->tc_ptr = tc_ptr;
878 d720b93d bellard
    tb->cs_base = cs_base;
879 d720b93d bellard
    tb->flags = flags;
880 d720b93d bellard
    tb->cflags = cflags;
881 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
882 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
883 3b46e624 ths
884 d720b93d bellard
    /* check next page if needed */
885 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
886 d720b93d bellard
    phys_page2 = -1;
887 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
888 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
889 d720b93d bellard
    }
890 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
891 2e70f6ef pbrook
    return tb;
892 d720b93d bellard
}
893 3b46e624 ths
894 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
895 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
896 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
897 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
898 d720b93d bellard
   TB if code is modified inside this TB. */
899 c227f099 Anthony Liguori
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
900 d720b93d bellard
                                   int is_cpu_write_access)
901 d720b93d bellard
{
902 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
903 d720b93d bellard
    CPUState *env = cpu_single_env;
904 9fa3e853 bellard
    target_ulong tb_start, tb_end;
905 6b917547 aliguori
    PageDesc *p;
906 6b917547 aliguori
    int n;
907 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
908 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
909 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
910 6b917547 aliguori
    int current_tb_modified = 0;
911 6b917547 aliguori
    target_ulong current_pc = 0;
912 6b917547 aliguori
    target_ulong current_cs_base = 0;
913 6b917547 aliguori
    int current_flags = 0;
914 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
915 9fa3e853 bellard
916 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
917 5fafdf24 ths
    if (!p)
918 9fa3e853 bellard
        return;
919 5fafdf24 ths
    if (!p->code_bitmap &&
920 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
921 d720b93d bellard
        is_cpu_write_access) {
922 9fa3e853 bellard
        /* build code bitmap */
923 9fa3e853 bellard
        build_page_bitmap(p);
924 9fa3e853 bellard
    }
925 9fa3e853 bellard
926 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
927 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
928 9fa3e853 bellard
    tb = p->first_tb;
929 9fa3e853 bellard
    while (tb != NULL) {
930 9fa3e853 bellard
        n = (long)tb & 3;
931 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
932 9fa3e853 bellard
        tb_next = tb->page_next[n];
933 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
934 9fa3e853 bellard
        if (n == 0) {
935 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
936 9fa3e853 bellard
               it is not a problem */
937 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 9fa3e853 bellard
            tb_end = tb_start + tb->size;
939 9fa3e853 bellard
        } else {
940 9fa3e853 bellard
            tb_start = tb->page_addr[1];
941 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
942 9fa3e853 bellard
        }
943 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
944 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
945 d720b93d bellard
            if (current_tb_not_found) {
946 d720b93d bellard
                current_tb_not_found = 0;
947 d720b93d bellard
                current_tb = NULL;
948 2e70f6ef pbrook
                if (env->mem_io_pc) {
949 d720b93d bellard
                    /* now we have a real cpu fault */
950 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
951 d720b93d bellard
                }
952 d720b93d bellard
            }
953 d720b93d bellard
            if (current_tb == tb &&
954 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
955 d720b93d bellard
                /* If we are modifying the current TB, we must stop
956 d720b93d bellard
                its execution. We could be more precise by checking
957 d720b93d bellard
                that the modification is after the current PC, but it
958 d720b93d bellard
                would require a specialized function to partially
959 d720b93d bellard
                restore the CPU state */
960 3b46e624 ths
961 d720b93d bellard
                current_tb_modified = 1;
962 5fafdf24 ths
                cpu_restore_state(current_tb, env,
963 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
964 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
965 6b917547 aliguori
                                     &current_flags);
966 d720b93d bellard
            }
967 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
968 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
969 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
970 6f5a9f7e bellard
            saved_tb = NULL;
971 6f5a9f7e bellard
            if (env) {
972 6f5a9f7e bellard
                saved_tb = env->current_tb;
973 6f5a9f7e bellard
                env->current_tb = NULL;
974 6f5a9f7e bellard
            }
975 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
976 6f5a9f7e bellard
            if (env) {
977 6f5a9f7e bellard
                env->current_tb = saved_tb;
978 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
979 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
980 6f5a9f7e bellard
            }
981 9fa3e853 bellard
        }
982 9fa3e853 bellard
        tb = tb_next;
983 9fa3e853 bellard
    }
984 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
985 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
986 9fa3e853 bellard
    if (!p->first_tb) {
987 9fa3e853 bellard
        invalidate_page_bitmap(p);
988 d720b93d bellard
        if (is_cpu_write_access) {
989 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
990 d720b93d bellard
        }
991 d720b93d bellard
    }
992 d720b93d bellard
#endif
993 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
994 d720b93d bellard
    if (current_tb_modified) {
995 d720b93d bellard
        /* we generate a block containing just the instruction
996 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
997 d720b93d bellard
           itself */
998 ea1c1802 bellard
        env->current_tb = NULL;
999 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1000 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1001 9fa3e853 bellard
    }
1002 fd6ce8f6 bellard
#endif
1003 9fa3e853 bellard
}
1004 fd6ce8f6 bellard
1005 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1006 c227f099 Anthony Liguori
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1007 9fa3e853 bellard
{
1008 9fa3e853 bellard
    PageDesc *p;
1009 9fa3e853 bellard
    int offset, b;
1010 59817ccb bellard
#if 0
1011 a4193c8a bellard
    if (1) {
1012 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1013 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1014 93fcfe39 aliguori
                  cpu_single_env->eip,
1015 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1016 59817ccb bellard
    }
1017 59817ccb bellard
#endif
1018 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1019 5fafdf24 ths
    if (!p)
1020 9fa3e853 bellard
        return;
1021 9fa3e853 bellard
    if (p->code_bitmap) {
1022 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1023 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1024 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1025 9fa3e853 bellard
            goto do_invalidate;
1026 9fa3e853 bellard
    } else {
1027 9fa3e853 bellard
    do_invalidate:
1028 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1029 9fa3e853 bellard
    }
1030 9fa3e853 bellard
}
1031 9fa3e853 bellard
1032 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1033 c227f099 Anthony Liguori
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1034 d720b93d bellard
                                    unsigned long pc, void *puc)
1035 9fa3e853 bellard
{
1036 6b917547 aliguori
    TranslationBlock *tb;
1037 9fa3e853 bellard
    PageDesc *p;
1038 6b917547 aliguori
    int n;
1039 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1040 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1041 d720b93d bellard
    CPUState *env = cpu_single_env;
1042 6b917547 aliguori
    int current_tb_modified = 0;
1043 6b917547 aliguori
    target_ulong current_pc = 0;
1044 6b917547 aliguori
    target_ulong current_cs_base = 0;
1045 6b917547 aliguori
    int current_flags = 0;
1046 d720b93d bellard
#endif
1047 9fa3e853 bellard
1048 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1049 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1050 5fafdf24 ths
    if (!p)
1051 9fa3e853 bellard
        return;
1052 9fa3e853 bellard
    tb = p->first_tb;
1053 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1054 d720b93d bellard
    if (tb && pc != 0) {
1055 d720b93d bellard
        current_tb = tb_find_pc(pc);
1056 d720b93d bellard
    }
1057 d720b93d bellard
#endif
1058 9fa3e853 bellard
    while (tb != NULL) {
1059 9fa3e853 bellard
        n = (long)tb & 3;
1060 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1061 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1062 d720b93d bellard
        if (current_tb == tb &&
1063 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1064 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1065 d720b93d bellard
                   its execution. We could be more precise by checking
1066 d720b93d bellard
                   that the modification is after the current PC, but it
1067 d720b93d bellard
                   would require a specialized function to partially
1068 d720b93d bellard
                   restore the CPU state */
1069 3b46e624 ths
1070 d720b93d bellard
            current_tb_modified = 1;
1071 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1072 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 6b917547 aliguori
                                 &current_flags);
1074 d720b93d bellard
        }
1075 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1076 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1077 9fa3e853 bellard
        tb = tb->page_next[n];
1078 9fa3e853 bellard
    }
1079 fd6ce8f6 bellard
    p->first_tb = NULL;
1080 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1081 d720b93d bellard
    if (current_tb_modified) {
1082 d720b93d bellard
        /* we generate a block containing just the instruction
1083 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1084 d720b93d bellard
           itself */
1085 ea1c1802 bellard
        env->current_tb = NULL;
1086 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1087 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1088 d720b93d bellard
    }
1089 d720b93d bellard
#endif
1090 fd6ce8f6 bellard
}
1091 9fa3e853 bellard
#endif
1092 fd6ce8f6 bellard
1093 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1094 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1095 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1096 fd6ce8f6 bellard
{
1097 fd6ce8f6 bellard
    PageDesc *p;
1098 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1099 9fa3e853 bellard
1100 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1101 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1102 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1103 9fa3e853 bellard
    last_first_tb = p->first_tb;
1104 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1105 9fa3e853 bellard
    invalidate_page_bitmap(p);
1106 fd6ce8f6 bellard
1107 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1108 d720b93d bellard
1109 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1110 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1111 53a5960a pbrook
        target_ulong addr;
1112 53a5960a pbrook
        PageDesc *p2;
1113 9fa3e853 bellard
        int prot;
1114 9fa3e853 bellard
1115 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1116 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1117 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1118 fd6ce8f6 bellard
        prot = 0;
1119 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1120 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1121 53a5960a pbrook
1122 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1123 53a5960a pbrook
            if (!p2)
1124 53a5960a pbrook
                continue;
1125 53a5960a pbrook
            prot |= p2->flags;
1126 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1127 53a5960a pbrook
            page_get_flags(addr);
1128 53a5960a pbrook
          }
1129 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1130 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1131 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1132 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1133 53a5960a pbrook
               page_addr);
1134 fd6ce8f6 bellard
#endif
1135 fd6ce8f6 bellard
    }
1136 9fa3e853 bellard
#else
1137 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1138 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1139 9fa3e853 bellard
       allocated in a physical page */
1140 9fa3e853 bellard
    if (!last_first_tb) {
1141 6a00d601 bellard
        tlb_protect_code(page_addr);
1142 9fa3e853 bellard
    }
1143 9fa3e853 bellard
#endif
1144 d720b93d bellard
1145 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1146 fd6ce8f6 bellard
}
1147 fd6ce8f6 bellard
1148 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1149 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1150 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1151 fd6ce8f6 bellard
{
1152 fd6ce8f6 bellard
    TranslationBlock *tb;
1153 fd6ce8f6 bellard
1154 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1155 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1156 d4e8164f bellard
        return NULL;
1157 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1158 fd6ce8f6 bellard
    tb->pc = pc;
1159 b448f2f3 bellard
    tb->cflags = 0;
1160 d4e8164f bellard
    return tb;
1161 d4e8164f bellard
}
1162 d4e8164f bellard
1163 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1164 2e70f6ef pbrook
{
1165 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1166 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1167 2e70f6ef pbrook
       be the last one generated.  */
1168 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1169 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1170 2e70f6ef pbrook
        nb_tbs--;
1171 2e70f6ef pbrook
    }
1172 2e70f6ef pbrook
}
1173 2e70f6ef pbrook
1174 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1175 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1176 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1177 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1178 d4e8164f bellard
{
1179 9fa3e853 bellard
    unsigned int h;
1180 9fa3e853 bellard
    TranslationBlock **ptb;
1181 9fa3e853 bellard
1182 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1183 c8a706fe pbrook
       before we are done.  */
1184 c8a706fe pbrook
    mmap_lock();
1185 9fa3e853 bellard
    /* add in the physical hash table */
1186 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1187 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1188 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1189 9fa3e853 bellard
    *ptb = tb;
1190 fd6ce8f6 bellard
1191 fd6ce8f6 bellard
    /* add in the page list */
1192 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1193 9fa3e853 bellard
    if (phys_page2 != -1)
1194 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1195 9fa3e853 bellard
    else
1196 9fa3e853 bellard
        tb->page_addr[1] = -1;
1197 9fa3e853 bellard
1198 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1199 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1200 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1201 d4e8164f bellard
1202 d4e8164f bellard
    /* init original jump addresses */
1203 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1204 d4e8164f bellard
        tb_reset_jump(tb, 0);
1205 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1206 d4e8164f bellard
        tb_reset_jump(tb, 1);
1207 8a40a180 bellard
1208 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1209 8a40a180 bellard
    tb_page_check();
1210 8a40a180 bellard
#endif
1211 c8a706fe pbrook
    mmap_unlock();
1212 fd6ce8f6 bellard
}
1213 fd6ce8f6 bellard
1214 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1215 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1216 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1217 fd6ce8f6 bellard
{
1218 9fa3e853 bellard
    int m_min, m_max, m;
1219 9fa3e853 bellard
    unsigned long v;
1220 9fa3e853 bellard
    TranslationBlock *tb;
1221 a513fe19 bellard
1222 a513fe19 bellard
    if (nb_tbs <= 0)
1223 a513fe19 bellard
        return NULL;
1224 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1225 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1226 a513fe19 bellard
        return NULL;
1227 a513fe19 bellard
    /* binary search (cf Knuth) */
1228 a513fe19 bellard
    m_min = 0;
1229 a513fe19 bellard
    m_max = nb_tbs - 1;
1230 a513fe19 bellard
    while (m_min <= m_max) {
1231 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1232 a513fe19 bellard
        tb = &tbs[m];
1233 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1234 a513fe19 bellard
        if (v == tc_ptr)
1235 a513fe19 bellard
            return tb;
1236 a513fe19 bellard
        else if (tc_ptr < v) {
1237 a513fe19 bellard
            m_max = m - 1;
1238 a513fe19 bellard
        } else {
1239 a513fe19 bellard
            m_min = m + 1;
1240 a513fe19 bellard
        }
1241 5fafdf24 ths
    }
1242 a513fe19 bellard
    return &tbs[m_max];
1243 a513fe19 bellard
}
1244 7501267e bellard
1245 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1246 ea041c0e bellard
1247 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1248 ea041c0e bellard
{
1249 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1250 ea041c0e bellard
    unsigned int n1;
1251 ea041c0e bellard
1252 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1253 ea041c0e bellard
    if (tb1 != NULL) {
1254 ea041c0e bellard
        /* find head of list */
1255 ea041c0e bellard
        for(;;) {
1256 ea041c0e bellard
            n1 = (long)tb1 & 3;
1257 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1258 ea041c0e bellard
            if (n1 == 2)
1259 ea041c0e bellard
                break;
1260 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1261 ea041c0e bellard
        }
1262 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1263 ea041c0e bellard
        tb_next = tb1;
1264 ea041c0e bellard
1265 ea041c0e bellard
        /* remove tb from the jmp_first list */
1266 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1267 ea041c0e bellard
        for(;;) {
1268 ea041c0e bellard
            tb1 = *ptb;
1269 ea041c0e bellard
            n1 = (long)tb1 & 3;
1270 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1272 ea041c0e bellard
                break;
1273 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1274 ea041c0e bellard
        }
1275 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1276 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1277 3b46e624 ths
1278 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1279 ea041c0e bellard
        tb_reset_jump(tb, n);
1280 ea041c0e bellard
1281 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1282 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1283 ea041c0e bellard
    }
1284 ea041c0e bellard
}
1285 ea041c0e bellard
1286 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1287 ea041c0e bellard
{
1288 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1289 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1290 ea041c0e bellard
}
1291 ea041c0e bellard
1292 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1293 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1294 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1295 94df27fd Paul Brook
{
1296 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1297 94df27fd Paul Brook
}
1298 94df27fd Paul Brook
#else
1299 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300 d720b93d bellard
{
1301 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1302 9b3c35e0 j_mayer
    target_ulong pd;
1303 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1304 c2f07f81 pbrook
    PhysPageDesc *p;
1305 d720b93d bellard
1306 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1307 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308 c2f07f81 pbrook
    if (!p) {
1309 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1310 c2f07f81 pbrook
    } else {
1311 c2f07f81 pbrook
        pd = p->phys_offset;
1312 c2f07f81 pbrook
    }
1313 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1314 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1315 d720b93d bellard
}
1316 c27004ec bellard
#endif
1317 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1318 d720b93d bellard
1319 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1320 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1321 c527ee8f Paul Brook
1322 c527ee8f Paul Brook
{
1323 c527ee8f Paul Brook
}
1324 c527ee8f Paul Brook
1325 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1326 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1327 c527ee8f Paul Brook
{
1328 c527ee8f Paul Brook
    return -ENOSYS;
1329 c527ee8f Paul Brook
}
1330 c527ee8f Paul Brook
#else
1331 6658ffb8 pbrook
/* Add a watchpoint.  */
1332 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1333 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1334 6658ffb8 pbrook
{
1335 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1336 c0ce998e aliguori
    CPUWatchpoint *wp;
1337 6658ffb8 pbrook
1338 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1339 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1340 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1341 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1342 b4051334 aliguori
        return -EINVAL;
1343 b4051334 aliguori
    }
1344 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1345 a1d1bb31 aliguori
1346 a1d1bb31 aliguori
    wp->vaddr = addr;
1347 b4051334 aliguori
    wp->len_mask = len_mask;
1348 a1d1bb31 aliguori
    wp->flags = flags;
1349 a1d1bb31 aliguori
1350 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1351 c0ce998e aliguori
    if (flags & BP_GDB)
1352 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1353 c0ce998e aliguori
    else
1354 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1355 6658ffb8 pbrook
1356 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1357 a1d1bb31 aliguori
1358 a1d1bb31 aliguori
    if (watchpoint)
1359 a1d1bb31 aliguori
        *watchpoint = wp;
1360 a1d1bb31 aliguori
    return 0;
1361 6658ffb8 pbrook
}
1362 6658ffb8 pbrook
1363 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1364 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1365 a1d1bb31 aliguori
                          int flags)
1366 6658ffb8 pbrook
{
1367 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1368 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1369 6658ffb8 pbrook
1370 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1371 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1372 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1373 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1374 6658ffb8 pbrook
            return 0;
1375 6658ffb8 pbrook
        }
1376 6658ffb8 pbrook
    }
1377 a1d1bb31 aliguori
    return -ENOENT;
1378 6658ffb8 pbrook
}
1379 6658ffb8 pbrook
1380 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1381 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1382 a1d1bb31 aliguori
{
1383 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1384 7d03f82f edgar_igl
1385 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1386 a1d1bb31 aliguori
1387 a1d1bb31 aliguori
    qemu_free(watchpoint);
1388 a1d1bb31 aliguori
}
1389 a1d1bb31 aliguori
1390 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1391 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1392 a1d1bb31 aliguori
{
1393 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1394 a1d1bb31 aliguori
1395 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1396 a1d1bb31 aliguori
        if (wp->flags & mask)
1397 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1398 c0ce998e aliguori
    }
1399 7d03f82f edgar_igl
}
1400 c527ee8f Paul Brook
#endif
1401 7d03f82f edgar_igl
1402 a1d1bb31 aliguori
/* Add a breakpoint.  */
1403 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1404 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1405 4c3a88a2 bellard
{
1406 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1407 c0ce998e aliguori
    CPUBreakpoint *bp;
1408 3b46e624 ths
1409 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1410 4c3a88a2 bellard
1411 a1d1bb31 aliguori
    bp->pc = pc;
1412 a1d1bb31 aliguori
    bp->flags = flags;
1413 a1d1bb31 aliguori
1414 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1415 c0ce998e aliguori
    if (flags & BP_GDB)
1416 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1417 c0ce998e aliguori
    else
1418 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1419 3b46e624 ths
1420 d720b93d bellard
    breakpoint_invalidate(env, pc);
1421 a1d1bb31 aliguori
1422 a1d1bb31 aliguori
    if (breakpoint)
1423 a1d1bb31 aliguori
        *breakpoint = bp;
1424 4c3a88a2 bellard
    return 0;
1425 4c3a88a2 bellard
#else
1426 a1d1bb31 aliguori
    return -ENOSYS;
1427 4c3a88a2 bellard
#endif
1428 4c3a88a2 bellard
}
1429 4c3a88a2 bellard
1430 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1431 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1432 a1d1bb31 aliguori
{
1433 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1434 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1435 a1d1bb31 aliguori
1436 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1437 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1438 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1439 a1d1bb31 aliguori
            return 0;
1440 a1d1bb31 aliguori
        }
1441 7d03f82f edgar_igl
    }
1442 a1d1bb31 aliguori
    return -ENOENT;
1443 a1d1bb31 aliguori
#else
1444 a1d1bb31 aliguori
    return -ENOSYS;
1445 7d03f82f edgar_igl
#endif
1446 7d03f82f edgar_igl
}
1447 7d03f82f edgar_igl
1448 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1449 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1450 4c3a88a2 bellard
{
1451 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1452 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1453 d720b93d bellard
1454 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1455 a1d1bb31 aliguori
1456 a1d1bb31 aliguori
    qemu_free(breakpoint);
1457 a1d1bb31 aliguori
#endif
1458 a1d1bb31 aliguori
}
1459 a1d1bb31 aliguori
1460 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1461 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1462 a1d1bb31 aliguori
{
1463 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1464 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1465 a1d1bb31 aliguori
1466 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1467 a1d1bb31 aliguori
        if (bp->flags & mask)
1468 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1469 c0ce998e aliguori
    }
1470 4c3a88a2 bellard
#endif
1471 4c3a88a2 bellard
}
1472 4c3a88a2 bellard
1473 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1474 c33a346e bellard
   CPU loop after each instruction */
1475 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1476 c33a346e bellard
{
1477 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1478 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1479 c33a346e bellard
        env->singlestep_enabled = enabled;
1480 e22a25c9 aliguori
        if (kvm_enabled())
1481 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1482 e22a25c9 aliguori
        else {
1483 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1484 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1485 e22a25c9 aliguori
            tb_flush(env);
1486 e22a25c9 aliguori
        }
1487 c33a346e bellard
    }
1488 c33a346e bellard
#endif
1489 c33a346e bellard
}
1490 c33a346e bellard
1491 34865134 bellard
/* enable or disable low levels log */
1492 34865134 bellard
void cpu_set_log(int log_flags)
1493 34865134 bellard
{
1494 34865134 bellard
    loglevel = log_flags;
1495 34865134 bellard
    if (loglevel && !logfile) {
1496 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1497 34865134 bellard
        if (!logfile) {
1498 34865134 bellard
            perror(logfilename);
1499 34865134 bellard
            _exit(1);
1500 34865134 bellard
        }
1501 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1502 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1503 9fa3e853 bellard
        {
1504 b55266b5 blueswir1
            static char logfile_buf[4096];
1505 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1506 9fa3e853 bellard
        }
1507 bf65f53f Filip Navara
#elif !defined(_WIN32)
1508 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1509 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1510 9fa3e853 bellard
#endif
1511 e735b91c pbrook
        log_append = 1;
1512 e735b91c pbrook
    }
1513 e735b91c pbrook
    if (!loglevel && logfile) {
1514 e735b91c pbrook
        fclose(logfile);
1515 e735b91c pbrook
        logfile = NULL;
1516 34865134 bellard
    }
1517 34865134 bellard
}
1518 34865134 bellard
1519 34865134 bellard
void cpu_set_log_filename(const char *filename)
1520 34865134 bellard
{
1521 34865134 bellard
    logfilename = strdup(filename);
1522 e735b91c pbrook
    if (logfile) {
1523 e735b91c pbrook
        fclose(logfile);
1524 e735b91c pbrook
        logfile = NULL;
1525 e735b91c pbrook
    }
1526 e735b91c pbrook
    cpu_set_log(loglevel);
1527 34865134 bellard
}
1528 c33a346e bellard
1529 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1530 ea041c0e bellard
{
1531 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1532 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1533 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1534 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1535 ea041c0e bellard
    TranslationBlock *tb;
1536 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1537 59817ccb bellard
1538 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1539 3098dba0 aurel32
    tb = env->current_tb;
1540 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1541 3098dba0 aurel32
       all the potentially executing TB */
1542 f76cfe56 Riku Voipio
    if (tb) {
1543 3098dba0 aurel32
        env->current_tb = NULL;
1544 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1545 be214e6c aurel32
    }
1546 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1547 3098dba0 aurel32
}
1548 3098dba0 aurel32
1549 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1550 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1551 3098dba0 aurel32
{
1552 3098dba0 aurel32
    int old_mask;
1553 be214e6c aurel32
1554 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1555 68a79315 bellard
    env->interrupt_request |= mask;
1556 3098dba0 aurel32
1557 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1558 8edac960 aliguori
    /*
1559 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1560 8edac960 aliguori
     * case its halted.
1561 8edac960 aliguori
     */
1562 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1563 8edac960 aliguori
        qemu_cpu_kick(env);
1564 8edac960 aliguori
        return;
1565 8edac960 aliguori
    }
1566 8edac960 aliguori
#endif
1567 8edac960 aliguori
1568 2e70f6ef pbrook
    if (use_icount) {
1569 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1570 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1571 2e70f6ef pbrook
        if (!can_do_io(env)
1572 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1573 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1574 2e70f6ef pbrook
        }
1575 2e70f6ef pbrook
#endif
1576 2e70f6ef pbrook
    } else {
1577 3098dba0 aurel32
        cpu_unlink_tb(env);
1578 ea041c0e bellard
    }
1579 ea041c0e bellard
}
1580 ea041c0e bellard
1581 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1582 b54ad049 bellard
{
1583 b54ad049 bellard
    env->interrupt_request &= ~mask;
1584 b54ad049 bellard
}
1585 b54ad049 bellard
1586 3098dba0 aurel32
void cpu_exit(CPUState *env)
1587 3098dba0 aurel32
{
1588 3098dba0 aurel32
    env->exit_request = 1;
1589 3098dba0 aurel32
    cpu_unlink_tb(env);
1590 3098dba0 aurel32
}
1591 3098dba0 aurel32
1592 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1593 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1594 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1595 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1596 f193c797 bellard
      "show target assembly code for each compiled TB" },
1597 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1598 57fec1fe bellard
      "show micro ops for each compiled TB" },
1599 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1600 e01a1157 blueswir1
      "show micro ops "
1601 e01a1157 blueswir1
#ifdef TARGET_I386
1602 e01a1157 blueswir1
      "before eflags optimization and "
1603 f193c797 bellard
#endif
1604 e01a1157 blueswir1
      "after liveness analysis" },
1605 f193c797 bellard
    { CPU_LOG_INT, "int",
1606 f193c797 bellard
      "show interrupts/exceptions in short format" },
1607 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1608 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1609 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1610 e91c8a77 ths
      "show CPU state before block translation" },
1611 f193c797 bellard
#ifdef TARGET_I386
1612 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1613 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1614 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1615 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1616 f193c797 bellard
#endif
1617 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1618 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1619 fd872598 bellard
      "show all i/o ports accesses" },
1620 8e3a9fd2 bellard
#endif
1621 f193c797 bellard
    { 0, NULL, NULL },
1622 f193c797 bellard
};
1623 f193c797 bellard
1624 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1625 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1626 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1627 f6f3fbca Michael S. Tsirkin
1628 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1629 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1630 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1631 f6f3fbca Michael S. Tsirkin
{
1632 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1633 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1634 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1635 f6f3fbca Michael S. Tsirkin
    }
1636 f6f3fbca Michael S. Tsirkin
}
1637 f6f3fbca Michael S. Tsirkin
1638 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1639 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1640 f6f3fbca Michael S. Tsirkin
{
1641 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1642 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1643 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1644 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1645 f6f3fbca Michael S. Tsirkin
            return r;
1646 f6f3fbca Michael S. Tsirkin
    }
1647 f6f3fbca Michael S. Tsirkin
    return 0;
1648 f6f3fbca Michael S. Tsirkin
}
1649 f6f3fbca Michael S. Tsirkin
1650 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1651 f6f3fbca Michael S. Tsirkin
{
1652 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1653 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1654 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1655 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1656 f6f3fbca Michael S. Tsirkin
            return r;
1657 f6f3fbca Michael S. Tsirkin
    }
1658 f6f3fbca Michael S. Tsirkin
    return 0;
1659 f6f3fbca Michael S. Tsirkin
}
1660 f6f3fbca Michael S. Tsirkin
1661 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1662 f6f3fbca Michael S. Tsirkin
                                         CPUPhysMemoryClient *client)
1663 f6f3fbca Michael S. Tsirkin
{
1664 f6f3fbca Michael S. Tsirkin
    PhysPageDesc *pd;
1665 f6f3fbca Michael S. Tsirkin
    int l1, l2;
1666 f6f3fbca Michael S. Tsirkin
1667 f6f3fbca Michael S. Tsirkin
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1668 f6f3fbca Michael S. Tsirkin
        pd = phys_map[l1];
1669 f6f3fbca Michael S. Tsirkin
        if (!pd) {
1670 f6f3fbca Michael S. Tsirkin
            continue;
1671 f6f3fbca Michael S. Tsirkin
        }
1672 f6f3fbca Michael S. Tsirkin
        for (l2 = 0; l2 < L2_SIZE; ++l2) {
1673 f6f3fbca Michael S. Tsirkin
            if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1674 f6f3fbca Michael S. Tsirkin
                continue;
1675 f6f3fbca Michael S. Tsirkin
            }
1676 f6f3fbca Michael S. Tsirkin
            client->set_memory(client, pd[l2].region_offset,
1677 f6f3fbca Michael S. Tsirkin
                               TARGET_PAGE_SIZE, pd[l2].phys_offset);
1678 f6f3fbca Michael S. Tsirkin
        }
1679 f6f3fbca Michael S. Tsirkin
    }
1680 f6f3fbca Michael S. Tsirkin
}
1681 f6f3fbca Michael S. Tsirkin
1682 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1683 f6f3fbca Michael S. Tsirkin
{
1684 f6f3fbca Michael S. Tsirkin
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1685 f6f3fbca Michael S. Tsirkin
1686 f6f3fbca Michael S. Tsirkin
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1687 f6f3fbca Michael S. Tsirkin
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1688 f6f3fbca Michael S. Tsirkin
#endif
1689 f6f3fbca Michael S. Tsirkin
    void **phys_map = (void **)l1_phys_map;
1690 f6f3fbca Michael S. Tsirkin
    int l1;
1691 f6f3fbca Michael S. Tsirkin
    if (!l1_phys_map) {
1692 f6f3fbca Michael S. Tsirkin
        return;
1693 f6f3fbca Michael S. Tsirkin
    }
1694 f6f3fbca Michael S. Tsirkin
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1695 f6f3fbca Michael S. Tsirkin
        if (phys_map[l1]) {
1696 f6f3fbca Michael S. Tsirkin
            phys_page_for_each_in_l1_map(phys_map[l1], client);
1697 f6f3fbca Michael S. Tsirkin
        }
1698 f6f3fbca Michael S. Tsirkin
    }
1699 f6f3fbca Michael S. Tsirkin
#else
1700 f6f3fbca Michael S. Tsirkin
    if (!l1_phys_map) {
1701 f6f3fbca Michael S. Tsirkin
        return;
1702 f6f3fbca Michael S. Tsirkin
    }
1703 f6f3fbca Michael S. Tsirkin
    phys_page_for_each_in_l1_map(l1_phys_map, client);
1704 f6f3fbca Michael S. Tsirkin
#endif
1705 f6f3fbca Michael S. Tsirkin
}
1706 f6f3fbca Michael S. Tsirkin
1707 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1708 f6f3fbca Michael S. Tsirkin
{
1709 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1710 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1711 f6f3fbca Michael S. Tsirkin
}
1712 f6f3fbca Michael S. Tsirkin
1713 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1714 f6f3fbca Michael S. Tsirkin
{
1715 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1716 f6f3fbca Michael S. Tsirkin
}
1717 f6f3fbca Michael S. Tsirkin
#endif
1718 f6f3fbca Michael S. Tsirkin
1719 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1720 f193c797 bellard
{
1721 f193c797 bellard
    if (strlen(s2) != n)
1722 f193c797 bellard
        return 0;
1723 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1724 f193c797 bellard
}
1725 3b46e624 ths
1726 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1727 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1728 f193c797 bellard
{
1729 c7cd6a37 blueswir1
    const CPULogItem *item;
1730 f193c797 bellard
    int mask;
1731 f193c797 bellard
    const char *p, *p1;
1732 f193c797 bellard
1733 f193c797 bellard
    p = str;
1734 f193c797 bellard
    mask = 0;
1735 f193c797 bellard
    for(;;) {
1736 f193c797 bellard
        p1 = strchr(p, ',');
1737 f193c797 bellard
        if (!p1)
1738 f193c797 bellard
            p1 = p + strlen(p);
1739 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1740 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1741 8e3a9fd2 bellard
                        mask |= item->mask;
1742 8e3a9fd2 bellard
                }
1743 8e3a9fd2 bellard
        } else {
1744 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1745 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1746 f193c797 bellard
                goto found;
1747 f193c797 bellard
        }
1748 f193c797 bellard
        return 0;
1749 8e3a9fd2 bellard
        }
1750 f193c797 bellard
    found:
1751 f193c797 bellard
        mask |= item->mask;
1752 f193c797 bellard
        if (*p1 != ',')
1753 f193c797 bellard
            break;
1754 f193c797 bellard
        p = p1 + 1;
1755 f193c797 bellard
    }
1756 f193c797 bellard
    return mask;
1757 f193c797 bellard
}
1758 ea041c0e bellard
1759 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1760 7501267e bellard
{
1761 7501267e bellard
    va_list ap;
1762 493ae1f0 pbrook
    va_list ap2;
1763 7501267e bellard
1764 7501267e bellard
    va_start(ap, fmt);
1765 493ae1f0 pbrook
    va_copy(ap2, ap);
1766 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1767 7501267e bellard
    vfprintf(stderr, fmt, ap);
1768 7501267e bellard
    fprintf(stderr, "\n");
1769 7501267e bellard
#ifdef TARGET_I386
1770 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1771 7fe48483 bellard
#else
1772 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1773 7501267e bellard
#endif
1774 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1775 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1776 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1777 93fcfe39 aliguori
        qemu_log("\n");
1778 f9373291 j_mayer
#ifdef TARGET_I386
1779 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1780 f9373291 j_mayer
#else
1781 93fcfe39 aliguori
        log_cpu_state(env, 0);
1782 f9373291 j_mayer
#endif
1783 31b1a7b4 aliguori
        qemu_log_flush();
1784 93fcfe39 aliguori
        qemu_log_close();
1785 924edcae balrog
    }
1786 493ae1f0 pbrook
    va_end(ap2);
1787 f9373291 j_mayer
    va_end(ap);
1788 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1789 fd052bf6 Riku Voipio
    {
1790 fd052bf6 Riku Voipio
        struct sigaction act;
1791 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1792 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1793 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1794 fd052bf6 Riku Voipio
    }
1795 fd052bf6 Riku Voipio
#endif
1796 7501267e bellard
    abort();
1797 7501267e bellard
}
1798 7501267e bellard
1799 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1800 c5be9f08 ths
{
1801 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1802 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1803 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1804 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1805 5a38f081 aliguori
    CPUBreakpoint *bp;
1806 5a38f081 aliguori
    CPUWatchpoint *wp;
1807 5a38f081 aliguori
#endif
1808 5a38f081 aliguori
1809 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1810 5a38f081 aliguori
1811 5a38f081 aliguori
    /* Preserve chaining and index. */
1812 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1813 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1814 5a38f081 aliguori
1815 5a38f081 aliguori
    /* Clone all break/watchpoints.
1816 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1817 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1818 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1819 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1820 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1821 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1822 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1823 5a38f081 aliguori
    }
1824 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1825 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1826 5a38f081 aliguori
                              wp->flags, NULL);
1827 5a38f081 aliguori
    }
1828 5a38f081 aliguori
#endif
1829 5a38f081 aliguori
1830 c5be9f08 ths
    return new_env;
1831 c5be9f08 ths
}
1832 c5be9f08 ths
1833 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1834 0124311e bellard
1835 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1836 5c751e99 edgar_igl
{
1837 5c751e99 edgar_igl
    unsigned int i;
1838 5c751e99 edgar_igl
1839 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1840 5c751e99 edgar_igl
       overlap the flushed page.  */
1841 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1842 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1843 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1844 5c751e99 edgar_igl
1845 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1846 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1847 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1848 5c751e99 edgar_igl
}
1849 5c751e99 edgar_igl
1850 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1851 08738984 Igor Kovalenko
    .addr_read  = -1,
1852 08738984 Igor Kovalenko
    .addr_write = -1,
1853 08738984 Igor Kovalenko
    .addr_code  = -1,
1854 08738984 Igor Kovalenko
    .addend     = -1,
1855 08738984 Igor Kovalenko
};
1856 08738984 Igor Kovalenko
1857 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1858 ee8b7021 bellard
   implemented yet) */
1859 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1860 33417e70 bellard
{
1861 33417e70 bellard
    int i;
1862 0124311e bellard
1863 9fa3e853 bellard
#if defined(DEBUG_TLB)
1864 9fa3e853 bellard
    printf("tlb_flush:\n");
1865 9fa3e853 bellard
#endif
1866 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1867 0124311e bellard
       links while we are modifying them */
1868 0124311e bellard
    env->current_tb = NULL;
1869 0124311e bellard
1870 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1871 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1872 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1873 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1874 cfde4bd9 Isaku Yamahata
        }
1875 33417e70 bellard
    }
1876 9fa3e853 bellard
1877 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1878 9fa3e853 bellard
1879 e3db7226 bellard
    tlb_flush_count++;
1880 33417e70 bellard
}
1881 33417e70 bellard
1882 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1883 61382a50 bellard
{
1884 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1885 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1886 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1887 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1888 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1889 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1890 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1891 84b7b8e7 bellard
    }
1892 61382a50 bellard
}
1893 61382a50 bellard
1894 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1895 33417e70 bellard
{
1896 8a40a180 bellard
    int i;
1897 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1898 0124311e bellard
1899 9fa3e853 bellard
#if defined(DEBUG_TLB)
1900 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1901 9fa3e853 bellard
#endif
1902 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1903 0124311e bellard
       links while we are modifying them */
1904 0124311e bellard
    env->current_tb = NULL;
1905 61382a50 bellard
1906 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1907 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1908 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1909 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1910 0124311e bellard
1911 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1912 9fa3e853 bellard
}
1913 9fa3e853 bellard
1914 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1915 9fa3e853 bellard
   can be detected */
1916 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1917 9fa3e853 bellard
{
1918 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1919 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1920 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1921 9fa3e853 bellard
}
1922 9fa3e853 bellard
1923 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1924 3a7d929e bellard
   tested for self modifying code */
1925 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1926 3a7d929e bellard
                                    target_ulong vaddr)
1927 9fa3e853 bellard
{
1928 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1929 1ccde1cb bellard
}
1930 1ccde1cb bellard
1931 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1932 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1933 1ccde1cb bellard
{
1934 1ccde1cb bellard
    unsigned long addr;
1935 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1936 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1937 1ccde1cb bellard
        if ((addr - start) < length) {
1938 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1939 1ccde1cb bellard
        }
1940 1ccde1cb bellard
    }
1941 1ccde1cb bellard
}
1942 1ccde1cb bellard
1943 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1944 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1945 0a962c02 bellard
                                     int dirty_flags)
1946 1ccde1cb bellard
{
1947 1ccde1cb bellard
    CPUState *env;
1948 4f2ac237 bellard
    unsigned long length, start1;
1949 0a962c02 bellard
    int i, mask, len;
1950 0a962c02 bellard
    uint8_t *p;
1951 1ccde1cb bellard
1952 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1953 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1954 1ccde1cb bellard
1955 1ccde1cb bellard
    length = end - start;
1956 1ccde1cb bellard
    if (length == 0)
1957 1ccde1cb bellard
        return;
1958 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1959 f23db169 bellard
    mask = ~dirty_flags;
1960 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1961 f23db169 bellard
    for(i = 0; i < len; i++)
1962 f23db169 bellard
        p[i] &= mask;
1963 f23db169 bellard
1964 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1965 1ccde1cb bellard
       when accessing the range */
1966 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1967 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1968 5579c7f3 pbrook
       address comparisons below.  */
1969 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1970 5579c7f3 pbrook
            != (end - 1) - start) {
1971 5579c7f3 pbrook
        abort();
1972 5579c7f3 pbrook
    }
1973 5579c7f3 pbrook
1974 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1975 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1976 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1977 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
1978 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1979 cfde4bd9 Isaku Yamahata
                                      start1, length);
1980 cfde4bd9 Isaku Yamahata
        }
1981 6a00d601 bellard
    }
1982 1ccde1cb bellard
}
1983 1ccde1cb bellard
1984 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1985 74576198 aliguori
{
1986 f6f3fbca Michael S. Tsirkin
    int ret = 0;
1987 74576198 aliguori
    in_migration = enable;
1988 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
1989 f6f3fbca Michael S. Tsirkin
    return ret;
1990 74576198 aliguori
}
1991 74576198 aliguori
1992 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1993 74576198 aliguori
{
1994 74576198 aliguori
    return in_migration;
1995 74576198 aliguori
}
1996 74576198 aliguori
1997 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1998 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
1999 2bec46dc aliguori
{
2000 7b8f3b78 Michael S. Tsirkin
    int ret;
2001 151f7749 Jan Kiszka
2002 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2003 151f7749 Jan Kiszka
    return ret;
2004 2bec46dc aliguori
}
2005 2bec46dc aliguori
2006 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2007 3a7d929e bellard
{
2008 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2009 5579c7f3 pbrook
    void *p;
2010 3a7d929e bellard
2011 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2012 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2013 5579c7f3 pbrook
            + tlb_entry->addend);
2014 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2015 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2016 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2017 3a7d929e bellard
        }
2018 3a7d929e bellard
    }
2019 3a7d929e bellard
}
2020 3a7d929e bellard
2021 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2022 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2023 3a7d929e bellard
{
2024 3a7d929e bellard
    int i;
2025 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2026 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2027 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2028 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2029 cfde4bd9 Isaku Yamahata
    }
2030 3a7d929e bellard
}
2031 3a7d929e bellard
2032 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2033 1ccde1cb bellard
{
2034 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2035 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2036 1ccde1cb bellard
}
2037 1ccde1cb bellard
2038 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2039 0f459d16 pbrook
   so that it is no longer dirty */
2040 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2041 1ccde1cb bellard
{
2042 1ccde1cb bellard
    int i;
2043 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2044 1ccde1cb bellard
2045 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2046 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2047 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2048 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2049 9fa3e853 bellard
}
2050 9fa3e853 bellard
2051 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
2052 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2053 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
2054 59817ccb bellard
   conflicting with the host address space). */
2055 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2056 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
2057 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2058 9fa3e853 bellard
{
2059 92e873b9 bellard
    PhysPageDesc *p;
2060 4f2ac237 bellard
    unsigned long pd;
2061 9fa3e853 bellard
    unsigned int index;
2062 4f2ac237 bellard
    target_ulong address;
2063 0f459d16 pbrook
    target_ulong code_address;
2064 c227f099 Anthony Liguori
    target_phys_addr_t addend;
2065 9fa3e853 bellard
    int ret;
2066 84b7b8e7 bellard
    CPUTLBEntry *te;
2067 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2068 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2069 9fa3e853 bellard
2070 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2071 9fa3e853 bellard
    if (!p) {
2072 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2073 9fa3e853 bellard
    } else {
2074 9fa3e853 bellard
        pd = p->phys_offset;
2075 9fa3e853 bellard
    }
2076 9fa3e853 bellard
#if defined(DEBUG_TLB)
2077 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2078 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2079 9fa3e853 bellard
#endif
2080 9fa3e853 bellard
2081 9fa3e853 bellard
    ret = 0;
2082 0f459d16 pbrook
    address = vaddr;
2083 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2084 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2085 0f459d16 pbrook
        address |= TLB_MMIO;
2086 0f459d16 pbrook
    }
2087 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2088 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2089 0f459d16 pbrook
        /* Normal RAM.  */
2090 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2091 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2092 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2093 0f459d16 pbrook
        else
2094 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2095 0f459d16 pbrook
    } else {
2096 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2097 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2098 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2099 0f459d16 pbrook
           and avoid full address decoding in every device.
2100 0f459d16 pbrook
           We can't use the high bits of pd for this because
2101 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2102 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2103 8da3ff18 pbrook
        if (p) {
2104 8da3ff18 pbrook
            iotlb += p->region_offset;
2105 8da3ff18 pbrook
        } else {
2106 8da3ff18 pbrook
            iotlb += paddr;
2107 8da3ff18 pbrook
        }
2108 0f459d16 pbrook
    }
2109 0f459d16 pbrook
2110 0f459d16 pbrook
    code_address = address;
2111 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2112 0f459d16 pbrook
       watchpoint trap routines.  */
2113 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2114 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2115 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2116 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2117 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2118 0f459d16 pbrook
            address |= TLB_MMIO;
2119 6658ffb8 pbrook
        }
2120 0f459d16 pbrook
    }
2121 d79acba4 balrog
2122 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2123 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2124 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2125 0f459d16 pbrook
    te->addend = addend - vaddr;
2126 0f459d16 pbrook
    if (prot & PAGE_READ) {
2127 0f459d16 pbrook
        te->addr_read = address;
2128 0f459d16 pbrook
    } else {
2129 0f459d16 pbrook
        te->addr_read = -1;
2130 0f459d16 pbrook
    }
2131 5c751e99 edgar_igl
2132 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2133 0f459d16 pbrook
        te->addr_code = code_address;
2134 0f459d16 pbrook
    } else {
2135 0f459d16 pbrook
        te->addr_code = -1;
2136 0f459d16 pbrook
    }
2137 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2138 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2139 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2140 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2141 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2142 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2143 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2144 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2145 9fa3e853 bellard
        } else {
2146 0f459d16 pbrook
            te->addr_write = address;
2147 9fa3e853 bellard
        }
2148 0f459d16 pbrook
    } else {
2149 0f459d16 pbrook
        te->addr_write = -1;
2150 9fa3e853 bellard
    }
2151 9fa3e853 bellard
    return ret;
2152 9fa3e853 bellard
}
2153 9fa3e853 bellard
2154 0124311e bellard
#else
2155 0124311e bellard
2156 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2157 0124311e bellard
{
2158 0124311e bellard
}
2159 0124311e bellard
2160 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2161 0124311e bellard
{
2162 0124311e bellard
}
2163 0124311e bellard
2164 edf8e2af Mika Westerberg
/*
2165 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2166 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2167 edf8e2af Mika Westerberg
 */
2168 edf8e2af Mika Westerberg
int walk_memory_regions(void *priv,
2169 edf8e2af Mika Westerberg
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2170 33417e70 bellard
{
2171 9fa3e853 bellard
    unsigned long start, end;
2172 edf8e2af Mika Westerberg
    PageDesc *p = NULL;
2173 9fa3e853 bellard
    int i, j, prot, prot1;
2174 edf8e2af Mika Westerberg
    int rc = 0;
2175 33417e70 bellard
2176 edf8e2af Mika Westerberg
    start = end = -1;
2177 9fa3e853 bellard
    prot = 0;
2178 edf8e2af Mika Westerberg
2179 edf8e2af Mika Westerberg
    for (i = 0; i <= L1_SIZE; i++) {
2180 edf8e2af Mika Westerberg
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2181 edf8e2af Mika Westerberg
        for (j = 0; j < L2_SIZE; j++) {
2182 edf8e2af Mika Westerberg
            prot1 = (p == NULL) ? 0 : p[j].flags;
2183 edf8e2af Mika Westerberg
            /*
2184 edf8e2af Mika Westerberg
             * "region" is one continuous chunk of memory
2185 edf8e2af Mika Westerberg
             * that has same protection flags set.
2186 edf8e2af Mika Westerberg
             */
2187 9fa3e853 bellard
            if (prot1 != prot) {
2188 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2189 9fa3e853 bellard
                if (start != -1) {
2190 edf8e2af Mika Westerberg
                    rc = (*fn)(priv, start, end, prot);
2191 edf8e2af Mika Westerberg
                    /* callback can stop iteration by returning != 0 */
2192 edf8e2af Mika Westerberg
                    if (rc != 0)
2193 edf8e2af Mika Westerberg
                        return (rc);
2194 9fa3e853 bellard
                }
2195 9fa3e853 bellard
                if (prot1 != 0)
2196 9fa3e853 bellard
                    start = end;
2197 9fa3e853 bellard
                else
2198 9fa3e853 bellard
                    start = -1;
2199 9fa3e853 bellard
                prot = prot1;
2200 9fa3e853 bellard
            }
2201 edf8e2af Mika Westerberg
            if (p == NULL)
2202 9fa3e853 bellard
                break;
2203 9fa3e853 bellard
        }
2204 33417e70 bellard
    }
2205 edf8e2af Mika Westerberg
    return (rc);
2206 edf8e2af Mika Westerberg
}
2207 edf8e2af Mika Westerberg
2208 edf8e2af Mika Westerberg
static int dump_region(void *priv, unsigned long start,
2209 edf8e2af Mika Westerberg
    unsigned long end, unsigned long prot)
2210 edf8e2af Mika Westerberg
{
2211 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2212 edf8e2af Mika Westerberg
2213 edf8e2af Mika Westerberg
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2214 edf8e2af Mika Westerberg
        start, end, end - start,
2215 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2216 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2217 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2218 edf8e2af Mika Westerberg
2219 edf8e2af Mika Westerberg
    return (0);
2220 edf8e2af Mika Westerberg
}
2221 edf8e2af Mika Westerberg
2222 edf8e2af Mika Westerberg
/* dump memory mappings */
2223 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2224 edf8e2af Mika Westerberg
{
2225 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2226 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2227 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2228 33417e70 bellard
}
2229 33417e70 bellard
2230 53a5960a pbrook
int page_get_flags(target_ulong address)
2231 33417e70 bellard
{
2232 9fa3e853 bellard
    PageDesc *p;
2233 9fa3e853 bellard
2234 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2235 33417e70 bellard
    if (!p)
2236 9fa3e853 bellard
        return 0;
2237 9fa3e853 bellard
    return p->flags;
2238 9fa3e853 bellard
}
2239 9fa3e853 bellard
2240 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2241 ccbb4d44 Stuart Brady
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2242 9fa3e853 bellard
   depending on PAGE_WRITE */
2243 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2244 9fa3e853 bellard
{
2245 9fa3e853 bellard
    PageDesc *p;
2246 53a5960a pbrook
    target_ulong addr;
2247 9fa3e853 bellard
2248 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2249 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2250 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2251 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2252 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2253 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2254 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2255 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2256 17e2377a pbrook
           address space.  */
2257 17e2377a pbrook
        if (!p)
2258 17e2377a pbrook
            return;
2259 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2260 9fa3e853 bellard
           inside */
2261 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2262 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2263 9fa3e853 bellard
            p->first_tb) {
2264 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2265 9fa3e853 bellard
        }
2266 9fa3e853 bellard
        p->flags = flags;
2267 9fa3e853 bellard
    }
2268 33417e70 bellard
}
2269 33417e70 bellard
2270 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2271 3d97b40b ths
{
2272 3d97b40b ths
    PageDesc *p;
2273 3d97b40b ths
    target_ulong end;
2274 3d97b40b ths
    target_ulong addr;
2275 3d97b40b ths
2276 55f280c9 balrog
    if (start + len < start)
2277 55f280c9 balrog
        /* we've wrapped around */
2278 55f280c9 balrog
        return -1;
2279 55f280c9 balrog
2280 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2281 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2282 3d97b40b ths
2283 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2284 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2285 3d97b40b ths
        if( !p )
2286 3d97b40b ths
            return -1;
2287 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2288 3d97b40b ths
            return -1;
2289 3d97b40b ths
2290 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2291 3d97b40b ths
            return -1;
2292 dae3270c bellard
        if (flags & PAGE_WRITE) {
2293 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2294 dae3270c bellard
                return -1;
2295 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2296 dae3270c bellard
               contains translated code */
2297 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2298 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2299 dae3270c bellard
                    return -1;
2300 dae3270c bellard
            }
2301 dae3270c bellard
            return 0;
2302 dae3270c bellard
        }
2303 3d97b40b ths
    }
2304 3d97b40b ths
    return 0;
2305 3d97b40b ths
}
2306 3d97b40b ths
2307 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2308 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2309 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2310 9fa3e853 bellard
{
2311 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2312 9fa3e853 bellard
    PageDesc *p, *p1;
2313 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2314 9fa3e853 bellard
2315 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2316 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2317 c8a706fe pbrook
       practice it seems to be ok.  */
2318 c8a706fe pbrook
    mmap_lock();
2319 c8a706fe pbrook
2320 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2321 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2322 9fa3e853 bellard
    p1 = page_find(page_index);
2323 c8a706fe pbrook
    if (!p1) {
2324 c8a706fe pbrook
        mmap_unlock();
2325 9fa3e853 bellard
        return 0;
2326 c8a706fe pbrook
    }
2327 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2328 9fa3e853 bellard
    p = p1;
2329 9fa3e853 bellard
    prot = 0;
2330 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2331 9fa3e853 bellard
        prot |= p->flags;
2332 9fa3e853 bellard
        p++;
2333 9fa3e853 bellard
    }
2334 9fa3e853 bellard
    /* if the page was really writable, then we change its
2335 9fa3e853 bellard
       protection back to writable */
2336 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2337 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2338 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2339 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2340 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2341 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2342 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2343 9fa3e853 bellard
               the corresponding translated code. */
2344 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2345 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2346 9fa3e853 bellard
            tb_invalidate_check(address);
2347 9fa3e853 bellard
#endif
2348 c8a706fe pbrook
            mmap_unlock();
2349 9fa3e853 bellard
            return 1;
2350 9fa3e853 bellard
        }
2351 9fa3e853 bellard
    }
2352 c8a706fe pbrook
    mmap_unlock();
2353 9fa3e853 bellard
    return 0;
2354 9fa3e853 bellard
}
2355 9fa3e853 bellard
2356 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2357 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2358 1ccde1cb bellard
{
2359 1ccde1cb bellard
}
2360 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2361 9fa3e853 bellard
2362 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2363 8da3ff18 pbrook
2364 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2365 c04b2b78 Paul Brook
typedef struct subpage_t {
2366 c04b2b78 Paul Brook
    target_phys_addr_t base;
2367 c04b2b78 Paul Brook
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2368 c04b2b78 Paul Brook
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2369 c04b2b78 Paul Brook
    void *opaque[TARGET_PAGE_SIZE][2][4];
2370 c04b2b78 Paul Brook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2371 c04b2b78 Paul Brook
} subpage_t;
2372 c04b2b78 Paul Brook
2373 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2374 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2375 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2376 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2377 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2378 db7b5426 blueswir1
                      need_subpage)                                     \
2379 db7b5426 blueswir1
    do {                                                                \
2380 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2381 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2382 db7b5426 blueswir1
        else {                                                          \
2383 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2384 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2385 db7b5426 blueswir1
                need_subpage = 1;                                       \
2386 db7b5426 blueswir1
        }                                                               \
2387 db7b5426 blueswir1
                                                                        \
2388 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2389 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2390 db7b5426 blueswir1
        else {                                                          \
2391 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2392 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2393 db7b5426 blueswir1
                need_subpage = 1;                                       \
2394 db7b5426 blueswir1
        }                                                               \
2395 db7b5426 blueswir1
    } while (0)
2396 db7b5426 blueswir1
2397 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2398 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2399 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2400 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2401 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2402 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2403 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2404 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2405 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2406 c227f099 Anthony Liguori
                                         ram_addr_t size,
2407 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2408 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2409 33417e70 bellard
{
2410 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2411 92e873b9 bellard
    PhysPageDesc *p;
2412 9d42037b bellard
    CPUState *env;
2413 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2414 db7b5426 blueswir1
    void *subpage;
2415 33417e70 bellard
2416 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2417 f6f3fbca Michael S. Tsirkin
2418 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2419 67c4d23c pbrook
        region_offset = start_addr;
2420 67c4d23c pbrook
    }
2421 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2422 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2423 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2424 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2425 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2427 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2428 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2429 db7b5426 blueswir1
            int need_subpage = 0;
2430 db7b5426 blueswir1
2431 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2432 db7b5426 blueswir1
                          need_subpage);
2433 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2434 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2435 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2436 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2437 8da3ff18 pbrook
                                           p->region_offset);
2438 db7b5426 blueswir1
                } else {
2439 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2440 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2441 db7b5426 blueswir1
                }
2442 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2443 8da3ff18 pbrook
                                 region_offset);
2444 8da3ff18 pbrook
                p->region_offset = 0;
2445 db7b5426 blueswir1
            } else {
2446 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2447 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2448 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2449 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2450 db7b5426 blueswir1
            }
2451 db7b5426 blueswir1
        } else {
2452 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2453 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2454 8da3ff18 pbrook
            p->region_offset = region_offset;
2455 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2456 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2457 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2458 0e8f0967 pbrook
            } else {
2459 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2460 db7b5426 blueswir1
                int need_subpage = 0;
2461 db7b5426 blueswir1
2462 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2463 db7b5426 blueswir1
                              end_addr2, need_subpage);
2464 db7b5426 blueswir1
2465 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2466 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2467 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2468 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2469 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2470 8da3ff18 pbrook
                                     phys_offset, region_offset);
2471 8da3ff18 pbrook
                    p->region_offset = 0;
2472 db7b5426 blueswir1
                }
2473 db7b5426 blueswir1
            }
2474 db7b5426 blueswir1
        }
2475 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2476 33417e70 bellard
    }
2477 3b46e624 ths
2478 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2479 9d42037b bellard
       reset the modified entries */
2480 9d42037b bellard
    /* XXX: slow ! */
2481 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2482 9d42037b bellard
        tlb_flush(env, 1);
2483 9d42037b bellard
    }
2484 33417e70 bellard
}
2485 33417e70 bellard
2486 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2487 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2488 ba863458 bellard
{
2489 ba863458 bellard
    PhysPageDesc *p;
2490 ba863458 bellard
2491 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2492 ba863458 bellard
    if (!p)
2493 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2494 ba863458 bellard
    return p->phys_offset;
2495 ba863458 bellard
}
2496 ba863458 bellard
2497 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2498 f65ed4c1 aliguori
{
2499 f65ed4c1 aliguori
    if (kvm_enabled())
2500 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2501 f65ed4c1 aliguori
}
2502 f65ed4c1 aliguori
2503 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2504 f65ed4c1 aliguori
{
2505 f65ed4c1 aliguori
    if (kvm_enabled())
2506 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2507 f65ed4c1 aliguori
}
2508 f65ed4c1 aliguori
2509 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2510 62a2744c Sheng Yang
{
2511 62a2744c Sheng Yang
    if (kvm_enabled())
2512 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2513 62a2744c Sheng Yang
}
2514 62a2744c Sheng Yang
2515 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2516 c902760f Marcelo Tosatti
2517 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2518 c902760f Marcelo Tosatti
2519 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2520 c902760f Marcelo Tosatti
2521 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2522 c902760f Marcelo Tosatti
{
2523 c902760f Marcelo Tosatti
    struct statfs fs;
2524 c902760f Marcelo Tosatti
    int ret;
2525 c902760f Marcelo Tosatti
2526 c902760f Marcelo Tosatti
    do {
2527 c902760f Marcelo Tosatti
            ret = statfs(path, &fs);
2528 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2529 c902760f Marcelo Tosatti
2530 c902760f Marcelo Tosatti
    if (ret != 0) {
2531 c902760f Marcelo Tosatti
            perror("statfs");
2532 c902760f Marcelo Tosatti
            return 0;
2533 c902760f Marcelo Tosatti
    }
2534 c902760f Marcelo Tosatti
2535 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2536 c902760f Marcelo Tosatti
            fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2537 c902760f Marcelo Tosatti
2538 c902760f Marcelo Tosatti
    return fs.f_bsize;
2539 c902760f Marcelo Tosatti
}
2540 c902760f Marcelo Tosatti
2541 c902760f Marcelo Tosatti
static void *file_ram_alloc(ram_addr_t memory, const char *path)
2542 c902760f Marcelo Tosatti
{
2543 c902760f Marcelo Tosatti
    char *filename;
2544 c902760f Marcelo Tosatti
    void *area;
2545 c902760f Marcelo Tosatti
    int fd;
2546 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2547 c902760f Marcelo Tosatti
    int flags;
2548 c902760f Marcelo Tosatti
#endif
2549 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2550 c902760f Marcelo Tosatti
2551 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2552 c902760f Marcelo Tosatti
    if (!hpagesize) {
2553 c902760f Marcelo Tosatti
        return NULL;
2554 c902760f Marcelo Tosatti
    }
2555 c902760f Marcelo Tosatti
2556 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2557 c902760f Marcelo Tosatti
        return NULL;
2558 c902760f Marcelo Tosatti
    }
2559 c902760f Marcelo Tosatti
2560 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2561 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2562 c902760f Marcelo Tosatti
        return NULL;
2563 c902760f Marcelo Tosatti
    }
2564 c902760f Marcelo Tosatti
2565 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2566 c902760f Marcelo Tosatti
        return NULL;
2567 c902760f Marcelo Tosatti
    }
2568 c902760f Marcelo Tosatti
2569 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2570 c902760f Marcelo Tosatti
    if (fd < 0) {
2571 c902760f Marcelo Tosatti
        perror("mkstemp");
2572 c902760f Marcelo Tosatti
        free(filename);
2573 c902760f Marcelo Tosatti
        return NULL;
2574 c902760f Marcelo Tosatti
    }
2575 c902760f Marcelo Tosatti
    unlink(filename);
2576 c902760f Marcelo Tosatti
    free(filename);
2577 c902760f Marcelo Tosatti
2578 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2579 c902760f Marcelo Tosatti
2580 c902760f Marcelo Tosatti
    /*
2581 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2582 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2583 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2584 c902760f Marcelo Tosatti
     * mmap will fail.
2585 c902760f Marcelo Tosatti
     */
2586 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2587 c902760f Marcelo Tosatti
        perror("ftruncate");
2588 c902760f Marcelo Tosatti
2589 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2590 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2591 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2592 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2593 c902760f Marcelo Tosatti
     */
2594 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2595 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2596 c902760f Marcelo Tosatti
#else
2597 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2598 c902760f Marcelo Tosatti
#endif
2599 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2600 c902760f Marcelo Tosatti
        perror("file_ram_alloc: can't mmap RAM pages");
2601 c902760f Marcelo Tosatti
        close(fd);
2602 c902760f Marcelo Tosatti
        return (NULL);
2603 c902760f Marcelo Tosatti
    }
2604 c902760f Marcelo Tosatti
    return area;
2605 c902760f Marcelo Tosatti
}
2606 c902760f Marcelo Tosatti
#endif
2607 c902760f Marcelo Tosatti
2608 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2609 94a6b54f pbrook
{
2610 94a6b54f pbrook
    RAMBlock *new_block;
2611 94a6b54f pbrook
2612 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2613 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2614 94a6b54f pbrook
2615 c902760f Marcelo Tosatti
    if (mem_path) {
2616 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2617 c902760f Marcelo Tosatti
        new_block->host = file_ram_alloc(size, mem_path);
2618 c902760f Marcelo Tosatti
        if (!new_block->host)
2619 c902760f Marcelo Tosatti
            exit(1);
2620 c902760f Marcelo Tosatti
#else
2621 c902760f Marcelo Tosatti
        fprintf(stderr, "-mem-path option unsupported\n");
2622 c902760f Marcelo Tosatti
        exit(1);
2623 c902760f Marcelo Tosatti
#endif
2624 c902760f Marcelo Tosatti
    } else {
2625 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2626 c902760f Marcelo Tosatti
        /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2627 c902760f Marcelo Tosatti
        new_block->host = mmap((void*)0x1000000, size,
2628 c902760f Marcelo Tosatti
                                PROT_EXEC|PROT_READ|PROT_WRITE,
2629 c902760f Marcelo Tosatti
                                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2630 6b02494d Alexander Graf
#else
2631 c902760f Marcelo Tosatti
        new_block->host = qemu_vmalloc(size);
2632 6b02494d Alexander Graf
#endif
2633 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2634 c902760f Marcelo Tosatti
        madvise(new_block->host, size, MADV_MERGEABLE);
2635 ccb167e9 Izik Eidus
#endif
2636 c902760f Marcelo Tosatti
    }
2637 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2638 94a6b54f pbrook
    new_block->length = size;
2639 94a6b54f pbrook
2640 94a6b54f pbrook
    new_block->next = ram_blocks;
2641 94a6b54f pbrook
    ram_blocks = new_block;
2642 94a6b54f pbrook
2643 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2644 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2645 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2646 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2647 94a6b54f pbrook
2648 94a6b54f pbrook
    last_ram_offset += size;
2649 94a6b54f pbrook
2650 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2651 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2652 6f0437e8 Jan Kiszka
2653 94a6b54f pbrook
    return new_block->offset;
2654 94a6b54f pbrook
}
2655 e9a1ab19 bellard
2656 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2657 e9a1ab19 bellard
{
2658 94a6b54f pbrook
    /* TODO: implement this.  */
2659 e9a1ab19 bellard
}
2660 e9a1ab19 bellard
2661 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2662 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2663 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2664 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2665 5579c7f3 pbrook

2666 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2667 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2668 5579c7f3 pbrook
 */
2669 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2670 dc828ca1 pbrook
{
2671 94a6b54f pbrook
    RAMBlock *prev;
2672 94a6b54f pbrook
    RAMBlock **prevp;
2673 94a6b54f pbrook
    RAMBlock *block;
2674 94a6b54f pbrook
2675 94a6b54f pbrook
    prev = NULL;
2676 94a6b54f pbrook
    prevp = &ram_blocks;
2677 94a6b54f pbrook
    block = ram_blocks;
2678 94a6b54f pbrook
    while (block && (block->offset > addr
2679 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2680 94a6b54f pbrook
        if (prev)
2681 94a6b54f pbrook
          prevp = &prev->next;
2682 94a6b54f pbrook
        prev = block;
2683 94a6b54f pbrook
        block = block->next;
2684 94a6b54f pbrook
    }
2685 94a6b54f pbrook
    if (!block) {
2686 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2687 94a6b54f pbrook
        abort();
2688 94a6b54f pbrook
    }
2689 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2690 94a6b54f pbrook
    if (prev) {
2691 94a6b54f pbrook
        prev->next = block->next;
2692 94a6b54f pbrook
        block->next = *prevp;
2693 94a6b54f pbrook
        *prevp = block;
2694 94a6b54f pbrook
    }
2695 94a6b54f pbrook
    return block->host + (addr - block->offset);
2696 dc828ca1 pbrook
}
2697 dc828ca1 pbrook
2698 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2699 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2700 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2701 5579c7f3 pbrook
{
2702 94a6b54f pbrook
    RAMBlock *prev;
2703 94a6b54f pbrook
    RAMBlock *block;
2704 94a6b54f pbrook
    uint8_t *host = ptr;
2705 94a6b54f pbrook
2706 94a6b54f pbrook
    prev = NULL;
2707 94a6b54f pbrook
    block = ram_blocks;
2708 94a6b54f pbrook
    while (block && (block->host > host
2709 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2710 94a6b54f pbrook
        prev = block;
2711 94a6b54f pbrook
        block = block->next;
2712 94a6b54f pbrook
    }
2713 94a6b54f pbrook
    if (!block) {
2714 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2715 94a6b54f pbrook
        abort();
2716 94a6b54f pbrook
    }
2717 94a6b54f pbrook
    return block->offset + (host - block->host);
2718 5579c7f3 pbrook
}
2719 5579c7f3 pbrook
2720 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2721 33417e70 bellard
{
2722 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2723 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2724 67d3b957 pbrook
#endif
2725 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2726 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2727 e18231a3 blueswir1
#endif
2728 e18231a3 blueswir1
    return 0;
2729 e18231a3 blueswir1
}
2730 e18231a3 blueswir1
2731 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2732 e18231a3 blueswir1
{
2733 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2734 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2735 e18231a3 blueswir1
#endif
2736 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2737 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2738 e18231a3 blueswir1
#endif
2739 e18231a3 blueswir1
    return 0;
2740 e18231a3 blueswir1
}
2741 e18231a3 blueswir1
2742 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2743 e18231a3 blueswir1
{
2744 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2745 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2746 e18231a3 blueswir1
#endif
2747 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2748 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2749 b4f0a316 blueswir1
#endif
2750 33417e70 bellard
    return 0;
2751 33417e70 bellard
}
2752 33417e70 bellard
2753 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2754 33417e70 bellard
{
2755 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2756 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2757 67d3b957 pbrook
#endif
2758 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2759 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2760 e18231a3 blueswir1
#endif
2761 e18231a3 blueswir1
}
2762 e18231a3 blueswir1
2763 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2764 e18231a3 blueswir1
{
2765 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2766 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2767 e18231a3 blueswir1
#endif
2768 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2769 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2770 e18231a3 blueswir1
#endif
2771 e18231a3 blueswir1
}
2772 e18231a3 blueswir1
2773 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2774 e18231a3 blueswir1
{
2775 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2776 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2777 e18231a3 blueswir1
#endif
2778 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2779 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2780 b4f0a316 blueswir1
#endif
2781 33417e70 bellard
}
2782 33417e70 bellard
2783 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2784 33417e70 bellard
    unassigned_mem_readb,
2785 e18231a3 blueswir1
    unassigned_mem_readw,
2786 e18231a3 blueswir1
    unassigned_mem_readl,
2787 33417e70 bellard
};
2788 33417e70 bellard
2789 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2790 33417e70 bellard
    unassigned_mem_writeb,
2791 e18231a3 blueswir1
    unassigned_mem_writew,
2792 e18231a3 blueswir1
    unassigned_mem_writel,
2793 33417e70 bellard
};
2794 33417e70 bellard
2795 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2796 0f459d16 pbrook
                                uint32_t val)
2797 9fa3e853 bellard
{
2798 3a7d929e bellard
    int dirty_flags;
2799 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2800 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2801 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2802 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2803 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2804 9fa3e853 bellard
#endif
2805 3a7d929e bellard
    }
2806 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2807 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2808 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2809 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2810 f23db169 bellard
       flushed */
2811 f23db169 bellard
    if (dirty_flags == 0xff)
2812 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2813 9fa3e853 bellard
}
2814 9fa3e853 bellard
2815 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2816 0f459d16 pbrook
                                uint32_t val)
2817 9fa3e853 bellard
{
2818 3a7d929e bellard
    int dirty_flags;
2819 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2820 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2821 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2822 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2823 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2824 9fa3e853 bellard
#endif
2825 3a7d929e bellard
    }
2826 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2827 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2828 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2829 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2830 f23db169 bellard
       flushed */
2831 f23db169 bellard
    if (dirty_flags == 0xff)
2832 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2833 9fa3e853 bellard
}
2834 9fa3e853 bellard
2835 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2836 0f459d16 pbrook
                                uint32_t val)
2837 9fa3e853 bellard
{
2838 3a7d929e bellard
    int dirty_flags;
2839 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2840 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2841 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2842 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2843 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2844 9fa3e853 bellard
#endif
2845 3a7d929e bellard
    }
2846 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2847 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2848 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2849 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2850 f23db169 bellard
       flushed */
2851 f23db169 bellard
    if (dirty_flags == 0xff)
2852 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2853 9fa3e853 bellard
}
2854 9fa3e853 bellard
2855 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
2856 9fa3e853 bellard
    NULL, /* never used */
2857 9fa3e853 bellard
    NULL, /* never used */
2858 9fa3e853 bellard
    NULL, /* never used */
2859 9fa3e853 bellard
};
2860 9fa3e853 bellard
2861 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2862 1ccde1cb bellard
    notdirty_mem_writeb,
2863 1ccde1cb bellard
    notdirty_mem_writew,
2864 1ccde1cb bellard
    notdirty_mem_writel,
2865 1ccde1cb bellard
};
2866 1ccde1cb bellard
2867 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2868 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2869 0f459d16 pbrook
{
2870 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2871 06d55cc1 aliguori
    target_ulong pc, cs_base;
2872 06d55cc1 aliguori
    TranslationBlock *tb;
2873 0f459d16 pbrook
    target_ulong vaddr;
2874 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2875 06d55cc1 aliguori
    int cpu_flags;
2876 0f459d16 pbrook
2877 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2878 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2879 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2880 06d55cc1 aliguori
         * current instruction. */
2881 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2882 06d55cc1 aliguori
        return;
2883 06d55cc1 aliguori
    }
2884 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2885 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2886 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2887 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2888 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2889 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2890 6e140f28 aliguori
                env->watchpoint_hit = wp;
2891 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2892 6e140f28 aliguori
                if (!tb) {
2893 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2894 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2895 6e140f28 aliguori
                }
2896 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2897 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2898 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2899 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2900 6e140f28 aliguori
                } else {
2901 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2902 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2903 6e140f28 aliguori
                }
2904 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2905 06d55cc1 aliguori
            }
2906 6e140f28 aliguori
        } else {
2907 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2908 0f459d16 pbrook
        }
2909 0f459d16 pbrook
    }
2910 0f459d16 pbrook
}
2911 0f459d16 pbrook
2912 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2913 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2914 6658ffb8 pbrook
   phys routines.  */
2915 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2916 6658ffb8 pbrook
{
2917 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2918 6658ffb8 pbrook
    return ldub_phys(addr);
2919 6658ffb8 pbrook
}
2920 6658ffb8 pbrook
2921 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2922 6658ffb8 pbrook
{
2923 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2924 6658ffb8 pbrook
    return lduw_phys(addr);
2925 6658ffb8 pbrook
}
2926 6658ffb8 pbrook
2927 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2928 6658ffb8 pbrook
{
2929 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2930 6658ffb8 pbrook
    return ldl_phys(addr);
2931 6658ffb8 pbrook
}
2932 6658ffb8 pbrook
2933 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2934 6658ffb8 pbrook
                             uint32_t val)
2935 6658ffb8 pbrook
{
2936 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2937 6658ffb8 pbrook
    stb_phys(addr, val);
2938 6658ffb8 pbrook
}
2939 6658ffb8 pbrook
2940 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2941 6658ffb8 pbrook
                             uint32_t val)
2942 6658ffb8 pbrook
{
2943 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2944 6658ffb8 pbrook
    stw_phys(addr, val);
2945 6658ffb8 pbrook
}
2946 6658ffb8 pbrook
2947 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2948 6658ffb8 pbrook
                             uint32_t val)
2949 6658ffb8 pbrook
{
2950 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2951 6658ffb8 pbrook
    stl_phys(addr, val);
2952 6658ffb8 pbrook
}
2953 6658ffb8 pbrook
2954 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2955 6658ffb8 pbrook
    watch_mem_readb,
2956 6658ffb8 pbrook
    watch_mem_readw,
2957 6658ffb8 pbrook
    watch_mem_readl,
2958 6658ffb8 pbrook
};
2959 6658ffb8 pbrook
2960 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2961 6658ffb8 pbrook
    watch_mem_writeb,
2962 6658ffb8 pbrook
    watch_mem_writew,
2963 6658ffb8 pbrook
    watch_mem_writel,
2964 6658ffb8 pbrook
};
2965 6658ffb8 pbrook
2966 c227f099 Anthony Liguori
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2967 db7b5426 blueswir1
                                 unsigned int len)
2968 db7b5426 blueswir1
{
2969 db7b5426 blueswir1
    uint32_t ret;
2970 db7b5426 blueswir1
    unsigned int idx;
2971 db7b5426 blueswir1
2972 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2973 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2974 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2975 db7b5426 blueswir1
           mmio, len, addr, idx);
2976 db7b5426 blueswir1
#endif
2977 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2978 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2979 db7b5426 blueswir1
2980 db7b5426 blueswir1
    return ret;
2981 db7b5426 blueswir1
}
2982 db7b5426 blueswir1
2983 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2984 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2985 db7b5426 blueswir1
{
2986 db7b5426 blueswir1
    unsigned int idx;
2987 db7b5426 blueswir1
2988 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2989 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2990 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2991 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2992 db7b5426 blueswir1
#endif
2993 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2994 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2995 8da3ff18 pbrook
                                  value);
2996 db7b5426 blueswir1
}
2997 db7b5426 blueswir1
2998 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2999 db7b5426 blueswir1
{
3000 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3001 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3002 db7b5426 blueswir1
#endif
3003 db7b5426 blueswir1
3004 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3005 db7b5426 blueswir1
}
3006 db7b5426 blueswir1
3007 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3008 db7b5426 blueswir1
                            uint32_t value)
3009 db7b5426 blueswir1
{
3010 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3011 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3012 db7b5426 blueswir1
#endif
3013 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3014 db7b5426 blueswir1
}
3015 db7b5426 blueswir1
3016 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3017 db7b5426 blueswir1
{
3018 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3019 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3020 db7b5426 blueswir1
#endif
3021 db7b5426 blueswir1
3022 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3023 db7b5426 blueswir1
}
3024 db7b5426 blueswir1
3025 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3026 db7b5426 blueswir1
                            uint32_t value)
3027 db7b5426 blueswir1
{
3028 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3029 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3030 db7b5426 blueswir1
#endif
3031 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3032 db7b5426 blueswir1
}
3033 db7b5426 blueswir1
3034 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3035 db7b5426 blueswir1
{
3036 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3037 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3038 db7b5426 blueswir1
#endif
3039 db7b5426 blueswir1
3040 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3041 db7b5426 blueswir1
}
3042 db7b5426 blueswir1
3043 db7b5426 blueswir1
static void subpage_writel (void *opaque,
3044 c227f099 Anthony Liguori
                         target_phys_addr_t addr, uint32_t value)
3045 db7b5426 blueswir1
{
3046 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3047 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3048 db7b5426 blueswir1
#endif
3049 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3050 db7b5426 blueswir1
}
3051 db7b5426 blueswir1
3052 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3053 db7b5426 blueswir1
    &subpage_readb,
3054 db7b5426 blueswir1
    &subpage_readw,
3055 db7b5426 blueswir1
    &subpage_readl,
3056 db7b5426 blueswir1
};
3057 db7b5426 blueswir1
3058 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3059 db7b5426 blueswir1
    &subpage_writeb,
3060 db7b5426 blueswir1
    &subpage_writew,
3061 db7b5426 blueswir1
    &subpage_writel,
3062 db7b5426 blueswir1
};
3063 db7b5426 blueswir1
3064 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3065 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3066 db7b5426 blueswir1
{
3067 db7b5426 blueswir1
    int idx, eidx;
3068 4254fab8 blueswir1
    unsigned int i;
3069 db7b5426 blueswir1
3070 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3071 db7b5426 blueswir1
        return -1;
3072 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3073 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3074 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3075 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3076 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3077 db7b5426 blueswir1
#endif
3078 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
3079 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3080 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
3081 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
3082 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3083 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3084 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
3085 3ee89922 blueswir1
            }
3086 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
3087 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3088 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3089 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
3090 3ee89922 blueswir1
            }
3091 4254fab8 blueswir1
        }
3092 db7b5426 blueswir1
    }
3093 db7b5426 blueswir1
3094 db7b5426 blueswir1
    return 0;
3095 db7b5426 blueswir1
}
3096 db7b5426 blueswir1
3097 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3098 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset)
3099 db7b5426 blueswir1
{
3100 c227f099 Anthony Liguori
    subpage_t *mmio;
3101 db7b5426 blueswir1
    int subpage_memory;
3102 db7b5426 blueswir1
3103 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3104 1eec614b aliguori
3105 1eec614b aliguori
    mmio->base = base;
3106 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3107 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3108 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3109 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3110 db7b5426 blueswir1
#endif
3111 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3112 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3113 8da3ff18 pbrook
                         region_offset);
3114 db7b5426 blueswir1
3115 db7b5426 blueswir1
    return mmio;
3116 db7b5426 blueswir1
}
3117 db7b5426 blueswir1
3118 88715657 aliguori
static int get_free_io_mem_idx(void)
3119 88715657 aliguori
{
3120 88715657 aliguori
    int i;
3121 88715657 aliguori
3122 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3123 88715657 aliguori
        if (!io_mem_used[i]) {
3124 88715657 aliguori
            io_mem_used[i] = 1;
3125 88715657 aliguori
            return i;
3126 88715657 aliguori
        }
3127 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3128 88715657 aliguori
    return -1;
3129 88715657 aliguori
}
3130 88715657 aliguori
3131 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3132 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3133 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3134 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3135 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3136 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3137 4254fab8 blueswir1
   returned if error. */
3138 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3139 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3140 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3141 1eed09cb Avi Kivity
                                        void *opaque)
3142 33417e70 bellard
{
3143 4254fab8 blueswir1
    int i, subwidth = 0;
3144 33417e70 bellard
3145 33417e70 bellard
    if (io_index <= 0) {
3146 88715657 aliguori
        io_index = get_free_io_mem_idx();
3147 88715657 aliguori
        if (io_index == -1)
3148 88715657 aliguori
            return io_index;
3149 33417e70 bellard
    } else {
3150 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3151 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3152 33417e70 bellard
            return -1;
3153 33417e70 bellard
    }
3154 b5ff1b31 bellard
3155 33417e70 bellard
    for(i = 0;i < 3; i++) {
3156 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3157 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3158 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3159 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3160 33417e70 bellard
    }
3161 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3162 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3163 33417e70 bellard
}
3164 61382a50 bellard
3165 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3166 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3167 1eed09cb Avi Kivity
                           void *opaque)
3168 1eed09cb Avi Kivity
{
3169 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3170 1eed09cb Avi Kivity
}
3171 1eed09cb Avi Kivity
3172 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3173 88715657 aliguori
{
3174 88715657 aliguori
    int i;
3175 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3176 88715657 aliguori
3177 88715657 aliguori
    for (i=0;i < 3; i++) {
3178 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3179 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3180 88715657 aliguori
    }
3181 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3182 88715657 aliguori
    io_mem_used[io_index] = 0;
3183 88715657 aliguori
}
3184 88715657 aliguori
3185 e9179ce1 Avi Kivity
static void io_mem_init(void)
3186 e9179ce1 Avi Kivity
{
3187 e9179ce1 Avi Kivity
    int i;
3188 e9179ce1 Avi Kivity
3189 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3190 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3191 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3192 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3193 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3194 e9179ce1 Avi Kivity
3195 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3196 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3197 e9179ce1 Avi Kivity
}
3198 e9179ce1 Avi Kivity
3199 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3200 e2eef170 pbrook
3201 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3202 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3203 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3204 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3205 13eb76e0 bellard
{
3206 13eb76e0 bellard
    int l, flags;
3207 13eb76e0 bellard
    target_ulong page;
3208 53a5960a pbrook
    void * p;
3209 13eb76e0 bellard
3210 13eb76e0 bellard
    while (len > 0) {
3211 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3212 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3213 13eb76e0 bellard
        if (l > len)
3214 13eb76e0 bellard
            l = len;
3215 13eb76e0 bellard
        flags = page_get_flags(page);
3216 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3217 a68fe89c Paul Brook
            return -1;
3218 13eb76e0 bellard
        if (is_write) {
3219 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3220 a68fe89c Paul Brook
                return -1;
3221 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3222 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3223 a68fe89c Paul Brook
                return -1;
3224 72fb7daa aurel32
            memcpy(p, buf, l);
3225 72fb7daa aurel32
            unlock_user(p, addr, l);
3226 13eb76e0 bellard
        } else {
3227 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3228 a68fe89c Paul Brook
                return -1;
3229 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3230 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3231 a68fe89c Paul Brook
                return -1;
3232 72fb7daa aurel32
            memcpy(buf, p, l);
3233 5b257578 aurel32
            unlock_user(p, addr, 0);
3234 13eb76e0 bellard
        }
3235 13eb76e0 bellard
        len -= l;
3236 13eb76e0 bellard
        buf += l;
3237 13eb76e0 bellard
        addr += l;
3238 13eb76e0 bellard
    }
3239 a68fe89c Paul Brook
    return 0;
3240 13eb76e0 bellard
}
3241 8df1cd07 bellard
3242 13eb76e0 bellard
#else
3243 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3244 13eb76e0 bellard
                            int len, int is_write)
3245 13eb76e0 bellard
{
3246 13eb76e0 bellard
    int l, io_index;
3247 13eb76e0 bellard
    uint8_t *ptr;
3248 13eb76e0 bellard
    uint32_t val;
3249 c227f099 Anthony Liguori
    target_phys_addr_t page;
3250 2e12669a bellard
    unsigned long pd;
3251 92e873b9 bellard
    PhysPageDesc *p;
3252 3b46e624 ths
3253 13eb76e0 bellard
    while (len > 0) {
3254 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3255 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3256 13eb76e0 bellard
        if (l > len)
3257 13eb76e0 bellard
            l = len;
3258 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3259 13eb76e0 bellard
        if (!p) {
3260 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3261 13eb76e0 bellard
        } else {
3262 13eb76e0 bellard
            pd = p->phys_offset;
3263 13eb76e0 bellard
        }
3264 3b46e624 ths
3265 13eb76e0 bellard
        if (is_write) {
3266 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3267 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3268 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3269 8da3ff18 pbrook
                if (p)
3270 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3271 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3272 6a00d601 bellard
                   potential bugs */
3273 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3274 1c213d19 bellard
                    /* 32 bit write access */
3275 c27004ec bellard
                    val = ldl_p(buf);
3276 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3277 13eb76e0 bellard
                    l = 4;
3278 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3279 1c213d19 bellard
                    /* 16 bit write access */
3280 c27004ec bellard
                    val = lduw_p(buf);
3281 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3282 13eb76e0 bellard
                    l = 2;
3283 13eb76e0 bellard
                } else {
3284 1c213d19 bellard
                    /* 8 bit write access */
3285 c27004ec bellard
                    val = ldub_p(buf);
3286 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3287 13eb76e0 bellard
                    l = 1;
3288 13eb76e0 bellard
                }
3289 13eb76e0 bellard
            } else {
3290 b448f2f3 bellard
                unsigned long addr1;
3291 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3292 13eb76e0 bellard
                /* RAM case */
3293 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3294 13eb76e0 bellard
                memcpy(ptr, buf, l);
3295 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3296 3a7d929e bellard
                    /* invalidate code */
3297 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3298 3a7d929e bellard
                    /* set dirty bit */
3299 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3300 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3301 3a7d929e bellard
                }
3302 13eb76e0 bellard
            }
3303 13eb76e0 bellard
        } else {
3304 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3305 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3306 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3307 13eb76e0 bellard
                /* I/O case */
3308 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3309 8da3ff18 pbrook
                if (p)
3310 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3311 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3312 13eb76e0 bellard
                    /* 32 bit read access */
3313 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3314 c27004ec bellard
                    stl_p(buf, val);
3315 13eb76e0 bellard
                    l = 4;
3316 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3317 13eb76e0 bellard
                    /* 16 bit read access */
3318 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3319 c27004ec bellard
                    stw_p(buf, val);
3320 13eb76e0 bellard
                    l = 2;
3321 13eb76e0 bellard
                } else {
3322 1c213d19 bellard
                    /* 8 bit read access */
3323 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3324 c27004ec bellard
                    stb_p(buf, val);
3325 13eb76e0 bellard
                    l = 1;
3326 13eb76e0 bellard
                }
3327 13eb76e0 bellard
            } else {
3328 13eb76e0 bellard
                /* RAM case */
3329 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3330 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3331 13eb76e0 bellard
                memcpy(buf, ptr, l);
3332 13eb76e0 bellard
            }
3333 13eb76e0 bellard
        }
3334 13eb76e0 bellard
        len -= l;
3335 13eb76e0 bellard
        buf += l;
3336 13eb76e0 bellard
        addr += l;
3337 13eb76e0 bellard
    }
3338 13eb76e0 bellard
}
3339 8df1cd07 bellard
3340 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3341 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3342 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3343 d0ecd2aa bellard
{
3344 d0ecd2aa bellard
    int l;
3345 d0ecd2aa bellard
    uint8_t *ptr;
3346 c227f099 Anthony Liguori
    target_phys_addr_t page;
3347 d0ecd2aa bellard
    unsigned long pd;
3348 d0ecd2aa bellard
    PhysPageDesc *p;
3349 3b46e624 ths
3350 d0ecd2aa bellard
    while (len > 0) {
3351 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3352 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3353 d0ecd2aa bellard
        if (l > len)
3354 d0ecd2aa bellard
            l = len;
3355 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3356 d0ecd2aa bellard
        if (!p) {
3357 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3358 d0ecd2aa bellard
        } else {
3359 d0ecd2aa bellard
            pd = p->phys_offset;
3360 d0ecd2aa bellard
        }
3361 3b46e624 ths
3362 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3363 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3364 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3365 d0ecd2aa bellard
            /* do nothing */
3366 d0ecd2aa bellard
        } else {
3367 d0ecd2aa bellard
            unsigned long addr1;
3368 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3369 d0ecd2aa bellard
            /* ROM/RAM case */
3370 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3371 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3372 d0ecd2aa bellard
        }
3373 d0ecd2aa bellard
        len -= l;
3374 d0ecd2aa bellard
        buf += l;
3375 d0ecd2aa bellard
        addr += l;
3376 d0ecd2aa bellard
    }
3377 d0ecd2aa bellard
}
3378 d0ecd2aa bellard
3379 6d16c2f8 aliguori
typedef struct {
3380 6d16c2f8 aliguori
    void *buffer;
3381 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3382 c227f099 Anthony Liguori
    target_phys_addr_t len;
3383 6d16c2f8 aliguori
} BounceBuffer;
3384 6d16c2f8 aliguori
3385 6d16c2f8 aliguori
static BounceBuffer bounce;
3386 6d16c2f8 aliguori
3387 ba223c29 aliguori
typedef struct MapClient {
3388 ba223c29 aliguori
    void *opaque;
3389 ba223c29 aliguori
    void (*callback)(void *opaque);
3390 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3391 ba223c29 aliguori
} MapClient;
3392 ba223c29 aliguori
3393 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3394 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3395 ba223c29 aliguori
3396 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3397 ba223c29 aliguori
{
3398 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3399 ba223c29 aliguori
3400 ba223c29 aliguori
    client->opaque = opaque;
3401 ba223c29 aliguori
    client->callback = callback;
3402 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3403 ba223c29 aliguori
    return client;
3404 ba223c29 aliguori
}
3405 ba223c29 aliguori
3406 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3407 ba223c29 aliguori
{
3408 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3409 ba223c29 aliguori
3410 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3411 34d5e948 Isaku Yamahata
    qemu_free(client);
3412 ba223c29 aliguori
}
3413 ba223c29 aliguori
3414 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3415 ba223c29 aliguori
{
3416 ba223c29 aliguori
    MapClient *client;
3417 ba223c29 aliguori
3418 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3419 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3420 ba223c29 aliguori
        client->callback(client->opaque);
3421 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3422 ba223c29 aliguori
    }
3423 ba223c29 aliguori
}
3424 ba223c29 aliguori
3425 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3426 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3427 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3428 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3429 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3430 ba223c29 aliguori
 * likely to succeed.
3431 6d16c2f8 aliguori
 */
3432 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3433 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3434 6d16c2f8 aliguori
                              int is_write)
3435 6d16c2f8 aliguori
{
3436 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3437 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3438 6d16c2f8 aliguori
    int l;
3439 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3440 6d16c2f8 aliguori
    uint8_t *ptr;
3441 c227f099 Anthony Liguori
    target_phys_addr_t page;
3442 6d16c2f8 aliguori
    unsigned long pd;
3443 6d16c2f8 aliguori
    PhysPageDesc *p;
3444 6d16c2f8 aliguori
    unsigned long addr1;
3445 6d16c2f8 aliguori
3446 6d16c2f8 aliguori
    while (len > 0) {
3447 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3448 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3449 6d16c2f8 aliguori
        if (l > len)
3450 6d16c2f8 aliguori
            l = len;
3451 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3452 6d16c2f8 aliguori
        if (!p) {
3453 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3454 6d16c2f8 aliguori
        } else {
3455 6d16c2f8 aliguori
            pd = p->phys_offset;
3456 6d16c2f8 aliguori
        }
3457 6d16c2f8 aliguori
3458 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3459 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3460 6d16c2f8 aliguori
                break;
3461 6d16c2f8 aliguori
            }
3462 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3463 6d16c2f8 aliguori
            bounce.addr = addr;
3464 6d16c2f8 aliguori
            bounce.len = l;
3465 6d16c2f8 aliguori
            if (!is_write) {
3466 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3467 6d16c2f8 aliguori
            }
3468 6d16c2f8 aliguori
            ptr = bounce.buffer;
3469 6d16c2f8 aliguori
        } else {
3470 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3471 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3472 6d16c2f8 aliguori
        }
3473 6d16c2f8 aliguori
        if (!done) {
3474 6d16c2f8 aliguori
            ret = ptr;
3475 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3476 6d16c2f8 aliguori
            break;
3477 6d16c2f8 aliguori
        }
3478 6d16c2f8 aliguori
3479 6d16c2f8 aliguori
        len -= l;
3480 6d16c2f8 aliguori
        addr += l;
3481 6d16c2f8 aliguori
        done += l;
3482 6d16c2f8 aliguori
    }
3483 6d16c2f8 aliguori
    *plen = done;
3484 6d16c2f8 aliguori
    return ret;
3485 6d16c2f8 aliguori
}
3486 6d16c2f8 aliguori
3487 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3488 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3489 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3490 6d16c2f8 aliguori
 */
3491 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3492 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3493 6d16c2f8 aliguori
{
3494 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3495 6d16c2f8 aliguori
        if (is_write) {
3496 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3497 6d16c2f8 aliguori
            while (access_len) {
3498 6d16c2f8 aliguori
                unsigned l;
3499 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3500 6d16c2f8 aliguori
                if (l > access_len)
3501 6d16c2f8 aliguori
                    l = access_len;
3502 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3503 6d16c2f8 aliguori
                    /* invalidate code */
3504 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3505 6d16c2f8 aliguori
                    /* set dirty bit */
3506 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3507 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3508 6d16c2f8 aliguori
                }
3509 6d16c2f8 aliguori
                addr1 += l;
3510 6d16c2f8 aliguori
                access_len -= l;
3511 6d16c2f8 aliguori
            }
3512 6d16c2f8 aliguori
        }
3513 6d16c2f8 aliguori
        return;
3514 6d16c2f8 aliguori
    }
3515 6d16c2f8 aliguori
    if (is_write) {
3516 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3517 6d16c2f8 aliguori
    }
3518 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3519 6d16c2f8 aliguori
    bounce.buffer = NULL;
3520 ba223c29 aliguori
    cpu_notify_map_clients();
3521 6d16c2f8 aliguori
}
3522 d0ecd2aa bellard
3523 8df1cd07 bellard
/* warning: addr must be aligned */
3524 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3525 8df1cd07 bellard
{
3526 8df1cd07 bellard
    int io_index;
3527 8df1cd07 bellard
    uint8_t *ptr;
3528 8df1cd07 bellard
    uint32_t val;
3529 8df1cd07 bellard
    unsigned long pd;
3530 8df1cd07 bellard
    PhysPageDesc *p;
3531 8df1cd07 bellard
3532 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 8df1cd07 bellard
    if (!p) {
3534 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3535 8df1cd07 bellard
    } else {
3536 8df1cd07 bellard
        pd = p->phys_offset;
3537 8df1cd07 bellard
    }
3538 3b46e624 ths
3539 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3540 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3541 8df1cd07 bellard
        /* I/O case */
3542 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3543 8da3ff18 pbrook
        if (p)
3544 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3545 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3546 8df1cd07 bellard
    } else {
3547 8df1cd07 bellard
        /* RAM case */
3548 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3549 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3550 8df1cd07 bellard
        val = ldl_p(ptr);
3551 8df1cd07 bellard
    }
3552 8df1cd07 bellard
    return val;
3553 8df1cd07 bellard
}
3554 8df1cd07 bellard
3555 84b7b8e7 bellard
/* warning: addr must be aligned */
3556 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3557 84b7b8e7 bellard
{
3558 84b7b8e7 bellard
    int io_index;
3559 84b7b8e7 bellard
    uint8_t *ptr;
3560 84b7b8e7 bellard
    uint64_t val;
3561 84b7b8e7 bellard
    unsigned long pd;
3562 84b7b8e7 bellard
    PhysPageDesc *p;
3563 84b7b8e7 bellard
3564 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3565 84b7b8e7 bellard
    if (!p) {
3566 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3567 84b7b8e7 bellard
    } else {
3568 84b7b8e7 bellard
        pd = p->phys_offset;
3569 84b7b8e7 bellard
    }
3570 3b46e624 ths
3571 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3572 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3573 84b7b8e7 bellard
        /* I/O case */
3574 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3575 8da3ff18 pbrook
        if (p)
3576 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3577 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3578 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3579 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3580 84b7b8e7 bellard
#else
3581 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3582 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3583 84b7b8e7 bellard
#endif
3584 84b7b8e7 bellard
    } else {
3585 84b7b8e7 bellard
        /* RAM case */
3586 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3587 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3588 84b7b8e7 bellard
        val = ldq_p(ptr);
3589 84b7b8e7 bellard
    }
3590 84b7b8e7 bellard
    return val;
3591 84b7b8e7 bellard
}
3592 84b7b8e7 bellard
3593 aab33094 bellard
/* XXX: optimize */
3594 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3595 aab33094 bellard
{
3596 aab33094 bellard
    uint8_t val;
3597 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3598 aab33094 bellard
    return val;
3599 aab33094 bellard
}
3600 aab33094 bellard
3601 aab33094 bellard
/* XXX: optimize */
3602 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3603 aab33094 bellard
{
3604 aab33094 bellard
    uint16_t val;
3605 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3606 aab33094 bellard
    return tswap16(val);
3607 aab33094 bellard
}
3608 aab33094 bellard
3609 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3610 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3611 8df1cd07 bellard
   bits are used to track modified PTEs */
3612 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3613 8df1cd07 bellard
{
3614 8df1cd07 bellard
    int io_index;
3615 8df1cd07 bellard
    uint8_t *ptr;
3616 8df1cd07 bellard
    unsigned long pd;
3617 8df1cd07 bellard
    PhysPageDesc *p;
3618 8df1cd07 bellard
3619 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3620 8df1cd07 bellard
    if (!p) {
3621 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3622 8df1cd07 bellard
    } else {
3623 8df1cd07 bellard
        pd = p->phys_offset;
3624 8df1cd07 bellard
    }
3625 3b46e624 ths
3626 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3627 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3628 8da3ff18 pbrook
        if (p)
3629 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3630 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3631 8df1cd07 bellard
    } else {
3632 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3633 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3634 8df1cd07 bellard
        stl_p(ptr, val);
3635 74576198 aliguori
3636 74576198 aliguori
        if (unlikely(in_migration)) {
3637 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3638 74576198 aliguori
                /* invalidate code */
3639 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3640 74576198 aliguori
                /* set dirty bit */
3641 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3642 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3643 74576198 aliguori
            }
3644 74576198 aliguori
        }
3645 8df1cd07 bellard
    }
3646 8df1cd07 bellard
}
3647 8df1cd07 bellard
3648 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3649 bc98a7ef j_mayer
{
3650 bc98a7ef j_mayer
    int io_index;
3651 bc98a7ef j_mayer
    uint8_t *ptr;
3652 bc98a7ef j_mayer
    unsigned long pd;
3653 bc98a7ef j_mayer
    PhysPageDesc *p;
3654 bc98a7ef j_mayer
3655 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3656 bc98a7ef j_mayer
    if (!p) {
3657 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3658 bc98a7ef j_mayer
    } else {
3659 bc98a7ef j_mayer
        pd = p->phys_offset;
3660 bc98a7ef j_mayer
    }
3661 3b46e624 ths
3662 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3663 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3664 8da3ff18 pbrook
        if (p)
3665 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3666 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3667 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3668 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3669 bc98a7ef j_mayer
#else
3670 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3671 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3672 bc98a7ef j_mayer
#endif
3673 bc98a7ef j_mayer
    } else {
3674 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3675 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3676 bc98a7ef j_mayer
        stq_p(ptr, val);
3677 bc98a7ef j_mayer
    }
3678 bc98a7ef j_mayer
}
3679 bc98a7ef j_mayer
3680 8df1cd07 bellard
/* warning: addr must be aligned */
3681 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3682 8df1cd07 bellard
{
3683 8df1cd07 bellard
    int io_index;
3684 8df1cd07 bellard
    uint8_t *ptr;
3685 8df1cd07 bellard
    unsigned long pd;
3686 8df1cd07 bellard
    PhysPageDesc *p;
3687 8df1cd07 bellard
3688 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3689 8df1cd07 bellard
    if (!p) {
3690 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3691 8df1cd07 bellard
    } else {
3692 8df1cd07 bellard
        pd = p->phys_offset;
3693 8df1cd07 bellard
    }
3694 3b46e624 ths
3695 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3696 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3697 8da3ff18 pbrook
        if (p)
3698 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3699 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3700 8df1cd07 bellard
    } else {
3701 8df1cd07 bellard
        unsigned long addr1;
3702 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3703 8df1cd07 bellard
        /* RAM case */
3704 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3705 8df1cd07 bellard
        stl_p(ptr, val);
3706 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3707 3a7d929e bellard
            /* invalidate code */
3708 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3709 3a7d929e bellard
            /* set dirty bit */
3710 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3711 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3712 3a7d929e bellard
        }
3713 8df1cd07 bellard
    }
3714 8df1cd07 bellard
}
3715 8df1cd07 bellard
3716 aab33094 bellard
/* XXX: optimize */
3717 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3718 aab33094 bellard
{
3719 aab33094 bellard
    uint8_t v = val;
3720 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3721 aab33094 bellard
}
3722 aab33094 bellard
3723 aab33094 bellard
/* XXX: optimize */
3724 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3725 aab33094 bellard
{
3726 aab33094 bellard
    uint16_t v = tswap16(val);
3727 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3728 aab33094 bellard
}
3729 aab33094 bellard
3730 aab33094 bellard
/* XXX: optimize */
3731 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3732 aab33094 bellard
{
3733 aab33094 bellard
    val = tswap64(val);
3734 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3735 aab33094 bellard
}
3736 aab33094 bellard
3737 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3738 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3739 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3740 13eb76e0 bellard
{
3741 13eb76e0 bellard
    int l;
3742 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3743 9b3c35e0 j_mayer
    target_ulong page;
3744 13eb76e0 bellard
3745 13eb76e0 bellard
    while (len > 0) {
3746 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3747 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3748 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3749 13eb76e0 bellard
        if (phys_addr == -1)
3750 13eb76e0 bellard
            return -1;
3751 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3752 13eb76e0 bellard
        if (l > len)
3753 13eb76e0 bellard
            l = len;
3754 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3755 5e2972fd aliguori
        if (is_write)
3756 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3757 5e2972fd aliguori
        else
3758 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3759 13eb76e0 bellard
        len -= l;
3760 13eb76e0 bellard
        buf += l;
3761 13eb76e0 bellard
        addr += l;
3762 13eb76e0 bellard
    }
3763 13eb76e0 bellard
    return 0;
3764 13eb76e0 bellard
}
3765 a68fe89c Paul Brook
#endif
3766 13eb76e0 bellard
3767 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3768 2e70f6ef pbrook
   must be at the end of the TB */
3769 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3770 2e70f6ef pbrook
{
3771 2e70f6ef pbrook
    TranslationBlock *tb;
3772 2e70f6ef pbrook
    uint32_t n, cflags;
3773 2e70f6ef pbrook
    target_ulong pc, cs_base;
3774 2e70f6ef pbrook
    uint64_t flags;
3775 2e70f6ef pbrook
3776 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3777 2e70f6ef pbrook
    if (!tb) {
3778 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3779 2e70f6ef pbrook
                  retaddr);
3780 2e70f6ef pbrook
    }
3781 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3782 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3783 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3784 bf20dc07 ths
       occurred.  */
3785 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3786 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3787 2e70f6ef pbrook
    n++;
3788 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3789 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3790 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3791 2e70f6ef pbrook
       branch.  */
3792 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3793 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3794 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3795 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3796 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3797 2e70f6ef pbrook
    }
3798 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3799 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3800 2e70f6ef pbrook
            && n > 1) {
3801 2e70f6ef pbrook
        env->pc -= 2;
3802 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3803 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3804 2e70f6ef pbrook
    }
3805 2e70f6ef pbrook
#endif
3806 2e70f6ef pbrook
    /* This should never happen.  */
3807 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3808 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3809 2e70f6ef pbrook
3810 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3811 2e70f6ef pbrook
    pc = tb->pc;
3812 2e70f6ef pbrook
    cs_base = tb->cs_base;
3813 2e70f6ef pbrook
    flags = tb->flags;
3814 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3815 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3816 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3817 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3818 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3819 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3820 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3821 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3822 2e70f6ef pbrook
       second new TB.  */
3823 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3824 2e70f6ef pbrook
}
3825 2e70f6ef pbrook
3826 e3db7226 bellard
void dump_exec_info(FILE *f,
3827 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3828 e3db7226 bellard
{
3829 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3830 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3831 e3db7226 bellard
    TranslationBlock *tb;
3832 3b46e624 ths
3833 e3db7226 bellard
    target_code_size = 0;
3834 e3db7226 bellard
    max_target_code_size = 0;
3835 e3db7226 bellard
    cross_page = 0;
3836 e3db7226 bellard
    direct_jmp_count = 0;
3837 e3db7226 bellard
    direct_jmp2_count = 0;
3838 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3839 e3db7226 bellard
        tb = &tbs[i];
3840 e3db7226 bellard
        target_code_size += tb->size;
3841 e3db7226 bellard
        if (tb->size > max_target_code_size)
3842 e3db7226 bellard
            max_target_code_size = tb->size;
3843 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3844 e3db7226 bellard
            cross_page++;
3845 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3846 e3db7226 bellard
            direct_jmp_count++;
3847 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3848 e3db7226 bellard
                direct_jmp2_count++;
3849 e3db7226 bellard
            }
3850 e3db7226 bellard
        }
3851 e3db7226 bellard
    }
3852 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3853 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3854 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3855 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3856 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3857 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3858 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3859 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3860 e3db7226 bellard
                max_target_code_size);
3861 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3862 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3863 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3864 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3865 5fafdf24 ths
            cross_page,
3866 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3867 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3868 5fafdf24 ths
                direct_jmp_count,
3869 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3870 e3db7226 bellard
                direct_jmp2_count,
3871 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3872 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3873 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3874 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3875 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3876 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3877 e3db7226 bellard
}
3878 e3db7226 bellard
3879 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3880 61382a50 bellard
3881 61382a50 bellard
#define MMUSUFFIX _cmmu
3882 61382a50 bellard
#define GETPC() NULL
3883 61382a50 bellard
#define env cpu_single_env
3884 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3885 61382a50 bellard
3886 61382a50 bellard
#define SHIFT 0
3887 61382a50 bellard
#include "softmmu_template.h"
3888 61382a50 bellard
3889 61382a50 bellard
#define SHIFT 1
3890 61382a50 bellard
#include "softmmu_template.h"
3891 61382a50 bellard
3892 61382a50 bellard
#define SHIFT 2
3893 61382a50 bellard
#include "softmmu_template.h"
3894 61382a50 bellard
3895 61382a50 bellard
#define SHIFT 3
3896 61382a50 bellard
#include "softmmu_template.h"
3897 61382a50 bellard
3898 61382a50 bellard
#undef env
3899 61382a50 bellard
3900 61382a50 bellard
#endif