Statistics
| Branch: | Revision:

root / exec.c @ 0546b8c2

History | View | Annotate | Download (122.7 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 74576198 aliguori
#include "osdep.h"
33 7ba1e619 aliguori
#include "kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 62152b8a Avi Kivity
#include "memory.h"
37 62152b8a Avi Kivity
#include "exec-memory.h"
38 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
39 53a5960a pbrook
#include <qemu.h>
40 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 f01576f1 Juergen Lock
#include <sys/param.h>
42 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
43 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
44 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 f01576f1 Juergen Lock
#include <sys/time.h>
46 f01576f1 Juergen Lock
#include <sys/proc.h>
47 f01576f1 Juergen Lock
#include <machine/profile.h>
48 f01576f1 Juergen Lock
#define _KERNEL
49 f01576f1 Juergen Lock
#include <sys/user.h>
50 f01576f1 Juergen Lock
#undef _KERNEL
51 f01576f1 Juergen Lock
#undef sigqueue
52 f01576f1 Juergen Lock
#include <libutil.h>
53 f01576f1 Juergen Lock
#endif
54 f01576f1 Juergen Lock
#endif
55 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
56 432d268c Jun Nakajima
#include "xen-mapcache.h"
57 6506e4f9 Stefano Stabellini
#include "trace.h"
58 53a5960a pbrook
#endif
59 54936004 bellard
60 0cac1b66 Blue Swirl
#include "cputlb.h"
61 0cac1b66 Blue Swirl
62 67d95c15 Avi Kivity
#define WANT_EXEC_OBSOLETE
63 67d95c15 Avi Kivity
#include "exec-obsolete.h"
64 67d95c15 Avi Kivity
65 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
66 66e85a21 bellard
//#define DEBUG_FLUSH
67 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
68 fd6ce8f6 bellard
69 fd6ce8f6 bellard
/* make various TB consistency checks */
70 5fafdf24 ths
//#define DEBUG_TB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 6840981d Stefan Weil
#elif defined(_WIN32) && !defined(_WIN64)
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 74576198 aliguori
static int in_migration;
114 94a6b54f pbrook
115 85d59fef Paolo Bonzini
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
116 62152b8a Avi Kivity
117 62152b8a Avi Kivity
static MemoryRegion *system_memory;
118 309cb471 Avi Kivity
static MemoryRegion *system_io;
119 62152b8a Avi Kivity
120 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
122 0e0df1e2 Avi Kivity
123 e2eef170 pbrook
#endif
124 9fa3e853 bellard
125 9349b4f9 Andreas Färber
CPUArchState *first_cpu;
126 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
127 6a00d601 bellard
   cpu_exec() */
128 9349b4f9 Andreas Färber
DEFINE_TLS(CPUArchState *,cpu_single_env);
129 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
130 bf20dc07 ths
   1 = Precise instruction counting.
131 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
132 2e70f6ef pbrook
int use_icount = 0;
133 6a00d601 bellard
134 54936004 bellard
typedef struct PageDesc {
135 92e873b9 bellard
    /* list of TBs intersecting this ram page */
136 fd6ce8f6 bellard
    TranslationBlock *first_tb;
137 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
138 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
139 9fa3e853 bellard
    unsigned int code_write_count;
140 9fa3e853 bellard
    uint8_t *code_bitmap;
141 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
142 9fa3e853 bellard
    unsigned long flags;
143 9fa3e853 bellard
#endif
144 54936004 bellard
} PageDesc;
145 54936004 bellard
146 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
147 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
148 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
149 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
151 41c1b1c9 Paul Brook
#else
152 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
153 41c1b1c9 Paul Brook
#endif
154 bedb69ea j_mayer
#else
155 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156 bedb69ea j_mayer
#endif
157 54936004 bellard
158 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
159 5cd2c5b6 Richard Henderson
#define L2_BITS 10
160 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
161 54936004 bellard
162 3eef53df Avi Kivity
#define P_L2_LEVELS \
163 3eef53df Avi Kivity
    (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164 3eef53df Avi Kivity
165 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
167 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 5cd2c5b6 Richard Henderson
169 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
171 5cd2c5b6 Richard Henderson
#else
172 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
173 5cd2c5b6 Richard Henderson
#endif
174 5cd2c5b6 Richard Henderson
175 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 c6d50674 Stefan Weil
uintptr_t qemu_real_host_page_size;
180 c6d50674 Stefan Weil
uintptr_t qemu_host_page_size;
181 c6d50674 Stefan Weil
uintptr_t qemu_host_page_mask;
182 54936004 bellard
183 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
184 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
185 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
186 54936004 bellard
187 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
188 4346ae3e Avi Kivity
typedef struct PhysPageEntry PhysPageEntry;
189 4346ae3e Avi Kivity
190 5312bd8b Avi Kivity
static MemoryRegionSection *phys_sections;
191 5312bd8b Avi Kivity
static unsigned phys_sections_nb, phys_sections_nb_alloc;
192 5312bd8b Avi Kivity
static uint16_t phys_section_unassigned;
193 aa102231 Avi Kivity
static uint16_t phys_section_notdirty;
194 aa102231 Avi Kivity
static uint16_t phys_section_rom;
195 aa102231 Avi Kivity
static uint16_t phys_section_watch;
196 5312bd8b Avi Kivity
197 4346ae3e Avi Kivity
struct PhysPageEntry {
198 07f07b31 Avi Kivity
    uint16_t is_leaf : 1;
199 07f07b31 Avi Kivity
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 07f07b31 Avi Kivity
    uint16_t ptr : 15;
201 4346ae3e Avi Kivity
};
202 4346ae3e Avi Kivity
203 d6f2ea22 Avi Kivity
/* Simple allocator for PhysPageEntry nodes */
204 d6f2ea22 Avi Kivity
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205 d6f2ea22 Avi Kivity
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206 d6f2ea22 Avi Kivity
207 07f07b31 Avi Kivity
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208 d6f2ea22 Avi Kivity
209 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
210 06ef3525 Avi Kivity
   The bottom level has pointers to MemoryRegionSections.  */
211 07f07b31 Avi Kivity
static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
212 6d9a1304 Paul Brook
213 e2eef170 pbrook
static void io_mem_init(void);
214 62152b8a Avi Kivity
static void memory_map_init(void);
215 e2eef170 pbrook
216 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
217 6658ffb8 pbrook
#endif
218 33417e70 bellard
219 e3db7226 bellard
/* statistics */
220 e3db7226 bellard
static int tb_flush_count;
221 e3db7226 bellard
static int tb_phys_invalidate_count;
222 e3db7226 bellard
223 7cb69cae bellard
#ifdef _WIN32
224 7cb69cae bellard
static void map_exec(void *addr, long size)
225 7cb69cae bellard
{
226 7cb69cae bellard
    DWORD old_protect;
227 7cb69cae bellard
    VirtualProtect(addr, size,
228 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
229 7cb69cae bellard
    
230 7cb69cae bellard
}
231 7cb69cae bellard
#else
232 7cb69cae bellard
static void map_exec(void *addr, long size)
233 7cb69cae bellard
{
234 4369415f bellard
    unsigned long start, end, page_size;
235 7cb69cae bellard
    
236 4369415f bellard
    page_size = getpagesize();
237 7cb69cae bellard
    start = (unsigned long)addr;
238 4369415f bellard
    start &= ~(page_size - 1);
239 7cb69cae bellard
    
240 7cb69cae bellard
    end = (unsigned long)addr + size;
241 4369415f bellard
    end += page_size - 1;
242 4369415f bellard
    end &= ~(page_size - 1);
243 7cb69cae bellard
    
244 7cb69cae bellard
    mprotect((void *)start, end - start,
245 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
246 7cb69cae bellard
}
247 7cb69cae bellard
#endif
248 7cb69cae bellard
249 b346ff46 bellard
static void page_init(void)
250 54936004 bellard
{
251 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
252 54936004 bellard
       TARGET_PAGE_SIZE */
253 c2b48b69 aliguori
#ifdef _WIN32
254 c2b48b69 aliguori
    {
255 c2b48b69 aliguori
        SYSTEM_INFO system_info;
256 c2b48b69 aliguori
257 c2b48b69 aliguori
        GetSystemInfo(&system_info);
258 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
259 c2b48b69 aliguori
    }
260 c2b48b69 aliguori
#else
261 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
262 c2b48b69 aliguori
#endif
263 83fb7adf bellard
    if (qemu_host_page_size == 0)
264 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
265 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
267 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
268 50a9569b balrog
269 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
270 50a9569b balrog
    {
271 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
272 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
273 f01576f1 Juergen Lock
        int i, cnt;
274 f01576f1 Juergen Lock
275 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
276 f01576f1 Juergen Lock
        if (freep) {
277 f01576f1 Juergen Lock
            mmap_lock();
278 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
279 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
280 f01576f1 Juergen Lock
281 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
282 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
283 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
284 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285 f01576f1 Juergen Lock
286 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
287 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
288 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
289 f01576f1 Juergen Lock
                    } else {
290 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 f01576f1 Juergen Lock
                        endaddr = ~0ul;
292 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 f01576f1 Juergen Lock
#endif
294 f01576f1 Juergen Lock
                    }
295 f01576f1 Juergen Lock
                }
296 f01576f1 Juergen Lock
            }
297 f01576f1 Juergen Lock
            free(freep);
298 f01576f1 Juergen Lock
            mmap_unlock();
299 f01576f1 Juergen Lock
        }
300 f01576f1 Juergen Lock
#else
301 50a9569b balrog
        FILE *f;
302 50a9569b balrog
303 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
304 5cd2c5b6 Richard Henderson
305 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
306 50a9569b balrog
        if (f) {
307 5cd2c5b6 Richard Henderson
            mmap_lock();
308 5cd2c5b6 Richard Henderson
309 50a9569b balrog
            do {
310 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
311 5cd2c5b6 Richard Henderson
                int n;
312 5cd2c5b6 Richard Henderson
313 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314 5cd2c5b6 Richard Henderson
315 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
316 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317 5cd2c5b6 Richard Henderson
318 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
319 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
320 5cd2c5b6 Richard Henderson
                    } else {
321 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
322 5cd2c5b6 Richard Henderson
                    }
323 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
324 50a9569b balrog
                }
325 50a9569b balrog
            } while (!feof(f));
326 5cd2c5b6 Richard Henderson
327 50a9569b balrog
            fclose(f);
328 5cd2c5b6 Richard Henderson
            mmap_unlock();
329 50a9569b balrog
        }
330 f01576f1 Juergen Lock
#endif
331 50a9569b balrog
    }
332 50a9569b balrog
#endif
333 54936004 bellard
}
334 54936004 bellard
335 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
336 54936004 bellard
{
337 41c1b1c9 Paul Brook
    PageDesc *pd;
338 41c1b1c9 Paul Brook
    void **lp;
339 41c1b1c9 Paul Brook
    int i;
340 41c1b1c9 Paul Brook
341 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
342 7267c094 Anthony Liguori
    /* We can't use g_malloc because it may recurse into a locked mutex. */
343 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
344 5cd2c5b6 Richard Henderson
    do {                                                \
345 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
346 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
347 5cd2c5b6 Richard Henderson
    } while (0)
348 5cd2c5b6 Richard Henderson
#else
349 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
350 7267c094 Anthony Liguori
    do { P = g_malloc0(SIZE); } while (0)
351 17e2377a pbrook
#endif
352 434929bf aliguori
353 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
354 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355 5cd2c5b6 Richard Henderson
356 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
357 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 5cd2c5b6 Richard Henderson
        void **p = *lp;
359 5cd2c5b6 Richard Henderson
360 5cd2c5b6 Richard Henderson
        if (p == NULL) {
361 5cd2c5b6 Richard Henderson
            if (!alloc) {
362 5cd2c5b6 Richard Henderson
                return NULL;
363 5cd2c5b6 Richard Henderson
            }
364 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
365 5cd2c5b6 Richard Henderson
            *lp = p;
366 17e2377a pbrook
        }
367 5cd2c5b6 Richard Henderson
368 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
369 5cd2c5b6 Richard Henderson
    }
370 5cd2c5b6 Richard Henderson
371 5cd2c5b6 Richard Henderson
    pd = *lp;
372 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
373 5cd2c5b6 Richard Henderson
        if (!alloc) {
374 5cd2c5b6 Richard Henderson
            return NULL;
375 5cd2c5b6 Richard Henderson
        }
376 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 5cd2c5b6 Richard Henderson
        *lp = pd;
378 54936004 bellard
    }
379 5cd2c5b6 Richard Henderson
380 5cd2c5b6 Richard Henderson
#undef ALLOC
381 5cd2c5b6 Richard Henderson
382 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
383 54936004 bellard
}
384 54936004 bellard
385 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
386 54936004 bellard
{
387 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
388 fd6ce8f6 bellard
}
389 fd6ce8f6 bellard
390 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
391 d6f2ea22 Avi Kivity
392 f7bf5461 Avi Kivity
static void phys_map_node_reserve(unsigned nodes)
393 d6f2ea22 Avi Kivity
{
394 f7bf5461 Avi Kivity
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 d6f2ea22 Avi Kivity
        typedef PhysPageEntry Node[L2_SIZE];
396 d6f2ea22 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 f7bf5461 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 f7bf5461 Avi Kivity
                                      phys_map_nodes_nb + nodes);
399 d6f2ea22 Avi Kivity
        phys_map_nodes = g_renew(Node, phys_map_nodes,
400 d6f2ea22 Avi Kivity
                                 phys_map_nodes_nb_alloc);
401 d6f2ea22 Avi Kivity
    }
402 f7bf5461 Avi Kivity
}
403 f7bf5461 Avi Kivity
404 f7bf5461 Avi Kivity
static uint16_t phys_map_node_alloc(void)
405 f7bf5461 Avi Kivity
{
406 f7bf5461 Avi Kivity
    unsigned i;
407 f7bf5461 Avi Kivity
    uint16_t ret;
408 f7bf5461 Avi Kivity
409 f7bf5461 Avi Kivity
    ret = phys_map_nodes_nb++;
410 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
411 f7bf5461 Avi Kivity
    assert(ret != phys_map_nodes_nb_alloc);
412 d6f2ea22 Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
413 07f07b31 Avi Kivity
        phys_map_nodes[ret][i].is_leaf = 0;
414 c19e8800 Avi Kivity
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
415 d6f2ea22 Avi Kivity
    }
416 f7bf5461 Avi Kivity
    return ret;
417 d6f2ea22 Avi Kivity
}
418 d6f2ea22 Avi Kivity
419 d6f2ea22 Avi Kivity
static void phys_map_nodes_reset(void)
420 d6f2ea22 Avi Kivity
{
421 d6f2ea22 Avi Kivity
    phys_map_nodes_nb = 0;
422 d6f2ea22 Avi Kivity
}
423 d6f2ea22 Avi Kivity
424 92e873b9 bellard
425 2999097b Avi Kivity
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 2999097b Avi Kivity
                                target_phys_addr_t *nb, uint16_t leaf,
427 2999097b Avi Kivity
                                int level)
428 f7bf5461 Avi Kivity
{
429 f7bf5461 Avi Kivity
    PhysPageEntry *p;
430 f7bf5461 Avi Kivity
    int i;
431 07f07b31 Avi Kivity
    target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
432 108c49b8 bellard
433 07f07b31 Avi Kivity
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
434 c19e8800 Avi Kivity
        lp->ptr = phys_map_node_alloc();
435 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
436 f7bf5461 Avi Kivity
        if (level == 0) {
437 f7bf5461 Avi Kivity
            for (i = 0; i < L2_SIZE; i++) {
438 07f07b31 Avi Kivity
                p[i].is_leaf = 1;
439 c19e8800 Avi Kivity
                p[i].ptr = phys_section_unassigned;
440 4346ae3e Avi Kivity
            }
441 67c4d23c pbrook
        }
442 f7bf5461 Avi Kivity
    } else {
443 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
444 92e873b9 bellard
    }
445 2999097b Avi Kivity
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
446 f7bf5461 Avi Kivity
447 2999097b Avi Kivity
    while (*nb && lp < &p[L2_SIZE]) {
448 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
449 07f07b31 Avi Kivity
            lp->is_leaf = true;
450 c19e8800 Avi Kivity
            lp->ptr = leaf;
451 07f07b31 Avi Kivity
            *index += step;
452 07f07b31 Avi Kivity
            *nb -= step;
453 2999097b Avi Kivity
        } else {
454 2999097b Avi Kivity
            phys_page_set_level(lp, index, nb, leaf, level - 1);
455 2999097b Avi Kivity
        }
456 2999097b Avi Kivity
        ++lp;
457 f7bf5461 Avi Kivity
    }
458 f7bf5461 Avi Kivity
}
459 f7bf5461 Avi Kivity
460 2999097b Avi Kivity
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 2999097b Avi Kivity
                          uint16_t leaf)
462 f7bf5461 Avi Kivity
{
463 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
464 07f07b31 Avi Kivity
    phys_map_node_reserve(3 * P_L2_LEVELS);
465 5cd2c5b6 Richard Henderson
466 2999097b Avi Kivity
    phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
467 92e873b9 bellard
}
468 92e873b9 bellard
469 0cac1b66 Blue Swirl
MemoryRegionSection *phys_page_find(target_phys_addr_t index)
470 92e873b9 bellard
{
471 31ab2b4a Avi Kivity
    PhysPageEntry lp = phys_map;
472 31ab2b4a Avi Kivity
    PhysPageEntry *p;
473 31ab2b4a Avi Kivity
    int i;
474 31ab2b4a Avi Kivity
    uint16_t s_index = phys_section_unassigned;
475 f1f6e3b8 Avi Kivity
476 07f07b31 Avi Kivity
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
477 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
478 31ab2b4a Avi Kivity
            goto not_found;
479 31ab2b4a Avi Kivity
        }
480 c19e8800 Avi Kivity
        p = phys_map_nodes[lp.ptr];
481 31ab2b4a Avi Kivity
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
482 5312bd8b Avi Kivity
    }
483 31ab2b4a Avi Kivity
484 c19e8800 Avi Kivity
    s_index = lp.ptr;
485 31ab2b4a Avi Kivity
not_found:
486 f3705d53 Avi Kivity
    return &phys_sections[s_index];
487 f3705d53 Avi Kivity
}
488 f3705d53 Avi Kivity
489 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
490 e5548617 Blue Swirl
{
491 e5548617 Blue Swirl
    return mr != &io_mem_ram && mr != &io_mem_rom
492 e5548617 Blue Swirl
        && mr != &io_mem_notdirty && !mr->rom_device
493 e5548617 Blue Swirl
        && mr != &io_mem_watch;
494 e5548617 Blue Swirl
}
495 e5548617 Blue Swirl
496 c8a706fe pbrook
#define mmap_lock() do { } while(0)
497 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
498 9fa3e853 bellard
#endif
499 fd6ce8f6 bellard
500 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501 4369415f bellard
502 4369415f bellard
#if defined(CONFIG_USER_ONLY)
503 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
504 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
505 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
506 4369415f bellard
#endif
507 4369415f bellard
508 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
509 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
511 4369415f bellard
#endif
512 4369415f bellard
513 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
514 26a5f13b bellard
{
515 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
516 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
517 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
519 4369415f bellard
#else
520 26a5f13b bellard
    code_gen_buffer_size = tb_size;
521 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
522 4369415f bellard
#if defined(CONFIG_USER_ONLY)
523 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524 4369415f bellard
#else
525 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
526 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
527 4369415f bellard
#endif
528 26a5f13b bellard
    }
529 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
532 26a5f13b bellard
       the host cpu and OS */
533 26a5f13b bellard
#if defined(__linux__) 
534 26a5f13b bellard
    {
535 26a5f13b bellard
        int flags;
536 141ac468 blueswir1
        void *start = NULL;
537 141ac468 blueswir1
538 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
539 26a5f13b bellard
#if defined(__x86_64__)
540 26a5f13b bellard
        flags |= MAP_32BIT;
541 26a5f13b bellard
        /* Cannot map more than that */
542 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
543 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
544 141ac468 blueswir1
#elif defined(__sparc_v9__)
545 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
546 141ac468 blueswir1
        flags |= MAP_FIXED;
547 141ac468 blueswir1
        start = (void *) 0x60000000UL;
548 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
549 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
550 1cb0661e balrog
#elif defined(__arm__)
551 5c84bd90 Aurelien Jarno
        /* Keep the buffer no bigger than 16MB to branch between blocks */
552 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
553 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
554 eba0b893 Richard Henderson
#elif defined(__s390x__)
555 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
556 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
557 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559 eba0b893 Richard Henderson
        }
560 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
561 26a5f13b bellard
#endif
562 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
563 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
564 26a5f13b bellard
                               flags, -1, 0);
565 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
566 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 26a5f13b bellard
            exit(1);
568 26a5f13b bellard
        }
569 26a5f13b bellard
    }
570 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
571 9f4b09a4 Tobias Nygren
    || defined(__DragonFly__) || defined(__OpenBSD__) \
572 9f4b09a4 Tobias Nygren
    || defined(__NetBSD__)
573 06e67a82 aliguori
    {
574 06e67a82 aliguori
        int flags;
575 06e67a82 aliguori
        void *addr = NULL;
576 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
577 06e67a82 aliguori
#if defined(__x86_64__)
578 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 06e67a82 aliguori
         * 0x40000000 is free */
580 06e67a82 aliguori
        flags |= MAP_FIXED;
581 06e67a82 aliguori
        addr = (void *)0x40000000;
582 06e67a82 aliguori
        /* Cannot map more than that */
583 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
584 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
585 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
586 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
587 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
588 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
589 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
591 4cd31ad2 Blue Swirl
        }
592 06e67a82 aliguori
#endif
593 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
595 06e67a82 aliguori
                               flags, -1, 0);
596 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
597 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 06e67a82 aliguori
            exit(1);
599 06e67a82 aliguori
        }
600 06e67a82 aliguori
    }
601 26a5f13b bellard
#else
602 7267c094 Anthony Liguori
    code_gen_buffer = g_malloc(code_gen_buffer_size);
603 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
604 26a5f13b bellard
#endif
605 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
606 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
607 a884da8a Peter Maydell
    code_gen_buffer_max_size = code_gen_buffer_size -
608 a884da8a Peter Maydell
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
609 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
610 7267c094 Anthony Liguori
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
611 26a5f13b bellard
}
612 26a5f13b bellard
613 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
614 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
615 26a5f13b bellard
   size. */
616 d5ab9713 Jan Kiszka
void tcg_exec_init(unsigned long tb_size)
617 26a5f13b bellard
{
618 26a5f13b bellard
    cpu_gen_init();
619 26a5f13b bellard
    code_gen_alloc(tb_size);
620 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
621 813da627 Richard Henderson
    tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
622 4369415f bellard
    page_init();
623 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
625 9002ec79 Richard Henderson
       initialize the prologue now.  */
626 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
627 9002ec79 Richard Henderson
#endif
628 26a5f13b bellard
}
629 26a5f13b bellard
630 d5ab9713 Jan Kiszka
bool tcg_enabled(void)
631 d5ab9713 Jan Kiszka
{
632 d5ab9713 Jan Kiszka
    return code_gen_buffer != NULL;
633 d5ab9713 Jan Kiszka
}
634 d5ab9713 Jan Kiszka
635 d5ab9713 Jan Kiszka
void cpu_exec_init_all(void)
636 d5ab9713 Jan Kiszka
{
637 d5ab9713 Jan Kiszka
#if !defined(CONFIG_USER_ONLY)
638 d5ab9713 Jan Kiszka
    memory_map_init();
639 d5ab9713 Jan Kiszka
    io_mem_init();
640 d5ab9713 Jan Kiszka
#endif
641 d5ab9713 Jan Kiszka
}
642 d5ab9713 Jan Kiszka
643 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644 9656f324 pbrook
645 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
646 e7f4eff7 Juan Quintela
{
647 9349b4f9 Andreas Färber
    CPUArchState *env = opaque;
648 9656f324 pbrook
649 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 3098dba0 aurel32
       version_id is increased. */
651 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
652 9656f324 pbrook
    tlb_flush(env, 1);
653 9656f324 pbrook
654 9656f324 pbrook
    return 0;
655 9656f324 pbrook
}
656 e7f4eff7 Juan Quintela
657 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
658 e7f4eff7 Juan Quintela
    .name = "cpu_common",
659 e7f4eff7 Juan Quintela
    .version_id = 1,
660 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
661 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
662 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
663 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
664 9349b4f9 Andreas Färber
        VMSTATE_UINT32(halted, CPUArchState),
665 9349b4f9 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUArchState),
666 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
667 e7f4eff7 Juan Quintela
    }
668 e7f4eff7 Juan Quintela
};
669 9656f324 pbrook
#endif
670 9656f324 pbrook
671 9349b4f9 Andreas Färber
CPUArchState *qemu_get_cpu(int cpu)
672 950f1472 Glauber Costa
{
673 9349b4f9 Andreas Färber
    CPUArchState *env = first_cpu;
674 950f1472 Glauber Costa
675 950f1472 Glauber Costa
    while (env) {
676 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
677 950f1472 Glauber Costa
            break;
678 950f1472 Glauber Costa
        env = env->next_cpu;
679 950f1472 Glauber Costa
    }
680 950f1472 Glauber Costa
681 950f1472 Glauber Costa
    return env;
682 950f1472 Glauber Costa
}
683 950f1472 Glauber Costa
684 9349b4f9 Andreas Färber
void cpu_exec_init(CPUArchState *env)
685 fd6ce8f6 bellard
{
686 9349b4f9 Andreas Färber
    CPUArchState **penv;
687 6a00d601 bellard
    int cpu_index;
688 6a00d601 bellard
689 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
690 c2764719 pbrook
    cpu_list_lock();
691 c2764719 pbrook
#endif
692 6a00d601 bellard
    env->next_cpu = NULL;
693 6a00d601 bellard
    penv = &first_cpu;
694 6a00d601 bellard
    cpu_index = 0;
695 6a00d601 bellard
    while (*penv != NULL) {
696 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
697 6a00d601 bellard
        cpu_index++;
698 6a00d601 bellard
    }
699 6a00d601 bellard
    env->cpu_index = cpu_index;
700 268a362c aliguori
    env->numa_node = 0;
701 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
702 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
703 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
704 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
705 dc7a09cf Jan Kiszka
#endif
706 6a00d601 bellard
    *penv = env;
707 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
708 c2764719 pbrook
    cpu_list_unlock();
709 c2764719 pbrook
#endif
710 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
711 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
713 b3c7724c pbrook
                    cpu_save, cpu_load, env);
714 b3c7724c pbrook
#endif
715 fd6ce8f6 bellard
}
716 fd6ce8f6 bellard
717 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
718 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
719 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
720 d1a1eb74 Tristan Gingold
{
721 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
722 d1a1eb74 Tristan Gingold
723 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
724 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 d1a1eb74 Tristan Gingold
        return NULL;
726 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
727 d1a1eb74 Tristan Gingold
    tb->pc = pc;
728 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
729 d1a1eb74 Tristan Gingold
    return tb;
730 d1a1eb74 Tristan Gingold
}
731 d1a1eb74 Tristan Gingold
732 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
733 d1a1eb74 Tristan Gingold
{
734 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
735 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
736 d1a1eb74 Tristan Gingold
       be the last one generated.  */
737 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
739 d1a1eb74 Tristan Gingold
        nb_tbs--;
740 d1a1eb74 Tristan Gingold
    }
741 d1a1eb74 Tristan Gingold
}
742 d1a1eb74 Tristan Gingold
743 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
744 9fa3e853 bellard
{
745 9fa3e853 bellard
    if (p->code_bitmap) {
746 7267c094 Anthony Liguori
        g_free(p->code_bitmap);
747 9fa3e853 bellard
        p->code_bitmap = NULL;
748 9fa3e853 bellard
    }
749 9fa3e853 bellard
    p->code_write_count = 0;
750 9fa3e853 bellard
}
751 9fa3e853 bellard
752 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
753 5cd2c5b6 Richard Henderson
754 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
755 fd6ce8f6 bellard
{
756 5cd2c5b6 Richard Henderson
    int i;
757 fd6ce8f6 bellard
758 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
759 5cd2c5b6 Richard Henderson
        return;
760 5cd2c5b6 Richard Henderson
    }
761 5cd2c5b6 Richard Henderson
    if (level == 0) {
762 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
763 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
764 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
765 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
766 fd6ce8f6 bellard
        }
767 5cd2c5b6 Richard Henderson
    } else {
768 5cd2c5b6 Richard Henderson
        void **pp = *lp;
769 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
770 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
771 5cd2c5b6 Richard Henderson
        }
772 5cd2c5b6 Richard Henderson
    }
773 5cd2c5b6 Richard Henderson
}
774 5cd2c5b6 Richard Henderson
775 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
776 5cd2c5b6 Richard Henderson
{
777 5cd2c5b6 Richard Henderson
    int i;
778 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
779 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
780 fd6ce8f6 bellard
    }
781 fd6ce8f6 bellard
}
782 fd6ce8f6 bellard
783 fd6ce8f6 bellard
/* flush all the translation blocks */
784 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
785 9349b4f9 Andreas Färber
void tb_flush(CPUArchState *env1)
786 fd6ce8f6 bellard
{
787 9349b4f9 Andreas Färber
    CPUArchState *env;
788 0124311e bellard
#if defined(DEBUG_FLUSH)
789 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
791 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
792 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
793 fd6ce8f6 bellard
#endif
794 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
795 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
796 a208e54a pbrook
797 fd6ce8f6 bellard
    nb_tbs = 0;
798 3b46e624 ths
799 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 6a00d601 bellard
    }
802 9fa3e853 bellard
803 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
804 fd6ce8f6 bellard
    page_flush_tb();
805 9fa3e853 bellard
806 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
807 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
808 d4e8164f bellard
       expensive */
809 e3db7226 bellard
    tb_flush_count++;
810 fd6ce8f6 bellard
}
811 fd6ce8f6 bellard
812 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
813 fd6ce8f6 bellard
814 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
815 fd6ce8f6 bellard
{
816 fd6ce8f6 bellard
    TranslationBlock *tb;
817 fd6ce8f6 bellard
    int i;
818 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
819 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
821 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
823 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
824 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
825 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
826 fd6ce8f6 bellard
            }
827 fd6ce8f6 bellard
        }
828 fd6ce8f6 bellard
    }
829 fd6ce8f6 bellard
}
830 fd6ce8f6 bellard
831 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
832 fd6ce8f6 bellard
static void tb_page_check(void)
833 fd6ce8f6 bellard
{
834 fd6ce8f6 bellard
    TranslationBlock *tb;
835 fd6ce8f6 bellard
    int i, flags1, flags2;
836 3b46e624 ths
837 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
839 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
840 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
841 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
843 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
844 fd6ce8f6 bellard
            }
845 fd6ce8f6 bellard
        }
846 fd6ce8f6 bellard
    }
847 fd6ce8f6 bellard
}
848 fd6ce8f6 bellard
849 fd6ce8f6 bellard
#endif
850 fd6ce8f6 bellard
851 fd6ce8f6 bellard
/* invalidate one TB */
852 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 fd6ce8f6 bellard
                             int next_offset)
854 fd6ce8f6 bellard
{
855 fd6ce8f6 bellard
    TranslationBlock *tb1;
856 fd6ce8f6 bellard
    for(;;) {
857 fd6ce8f6 bellard
        tb1 = *ptb;
858 fd6ce8f6 bellard
        if (tb1 == tb) {
859 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 fd6ce8f6 bellard
            break;
861 fd6ce8f6 bellard
        }
862 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863 fd6ce8f6 bellard
    }
864 fd6ce8f6 bellard
}
865 fd6ce8f6 bellard
866 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
867 9fa3e853 bellard
{
868 9fa3e853 bellard
    TranslationBlock *tb1;
869 9fa3e853 bellard
    unsigned int n1;
870 9fa3e853 bellard
871 9fa3e853 bellard
    for(;;) {
872 9fa3e853 bellard
        tb1 = *ptb;
873 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
874 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
875 9fa3e853 bellard
        if (tb1 == tb) {
876 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
877 9fa3e853 bellard
            break;
878 9fa3e853 bellard
        }
879 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
880 9fa3e853 bellard
    }
881 9fa3e853 bellard
}
882 9fa3e853 bellard
883 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884 d4e8164f bellard
{
885 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
886 d4e8164f bellard
    unsigned int n1;
887 d4e8164f bellard
888 d4e8164f bellard
    ptb = &tb->jmp_next[n];
889 d4e8164f bellard
    tb1 = *ptb;
890 d4e8164f bellard
    if (tb1) {
891 d4e8164f bellard
        /* find tb(n) in circular list */
892 d4e8164f bellard
        for(;;) {
893 d4e8164f bellard
            tb1 = *ptb;
894 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
895 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
896 d4e8164f bellard
            if (n1 == n && tb1 == tb)
897 d4e8164f bellard
                break;
898 d4e8164f bellard
            if (n1 == 2) {
899 d4e8164f bellard
                ptb = &tb1->jmp_first;
900 d4e8164f bellard
            } else {
901 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
902 d4e8164f bellard
            }
903 d4e8164f bellard
        }
904 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
905 d4e8164f bellard
        *ptb = tb->jmp_next[n];
906 d4e8164f bellard
907 d4e8164f bellard
        tb->jmp_next[n] = NULL;
908 d4e8164f bellard
    }
909 d4e8164f bellard
}
910 d4e8164f bellard
911 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
912 d4e8164f bellard
   another TB */
913 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
914 d4e8164f bellard
{
915 8efe0ca8 Stefan Weil
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
916 d4e8164f bellard
}
917 d4e8164f bellard
918 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
919 fd6ce8f6 bellard
{
920 9349b4f9 Andreas Färber
    CPUArchState *env;
921 8a40a180 bellard
    PageDesc *p;
922 d4e8164f bellard
    unsigned int h, n1;
923 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
924 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
925 3b46e624 ths
926 8a40a180 bellard
    /* remove the TB from the hash list */
927 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
929 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
930 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
931 8a40a180 bellard
932 8a40a180 bellard
    /* remove the TB from the page list */
933 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
934 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
936 8a40a180 bellard
        invalidate_page_bitmap(p);
937 8a40a180 bellard
    }
938 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
941 8a40a180 bellard
        invalidate_page_bitmap(p);
942 8a40a180 bellard
    }
943 8a40a180 bellard
944 36bdbe54 bellard
    tb_invalidated_flag = 1;
945 59817ccb bellard
946 fd6ce8f6 bellard
    /* remove the TB from the hash list */
947 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
948 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
950 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
951 6a00d601 bellard
    }
952 d4e8164f bellard
953 d4e8164f bellard
    /* suppress this TB from the two jump lists */
954 d4e8164f bellard
    tb_jmp_remove(tb, 0);
955 d4e8164f bellard
    tb_jmp_remove(tb, 1);
956 d4e8164f bellard
957 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
958 d4e8164f bellard
    tb1 = tb->jmp_first;
959 d4e8164f bellard
    for(;;) {
960 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
961 d4e8164f bellard
        if (n1 == 2)
962 d4e8164f bellard
            break;
963 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
964 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
965 d4e8164f bellard
        tb_reset_jump(tb1, n1);
966 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
967 d4e8164f bellard
        tb1 = tb2;
968 d4e8164f bellard
    }
969 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
970 9fa3e853 bellard
971 e3db7226 bellard
    tb_phys_invalidate_count++;
972 9fa3e853 bellard
}
973 9fa3e853 bellard
974 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
975 9fa3e853 bellard
{
976 9fa3e853 bellard
    int end, mask, end1;
977 9fa3e853 bellard
978 9fa3e853 bellard
    end = start + len;
979 9fa3e853 bellard
    tab += start >> 3;
980 9fa3e853 bellard
    mask = 0xff << (start & 7);
981 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
982 9fa3e853 bellard
        if (start < end) {
983 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
984 9fa3e853 bellard
            *tab |= mask;
985 9fa3e853 bellard
        }
986 9fa3e853 bellard
    } else {
987 9fa3e853 bellard
        *tab++ |= mask;
988 9fa3e853 bellard
        start = (start + 8) & ~7;
989 9fa3e853 bellard
        end1 = end & ~7;
990 9fa3e853 bellard
        while (start < end1) {
991 9fa3e853 bellard
            *tab++ = 0xff;
992 9fa3e853 bellard
            start += 8;
993 9fa3e853 bellard
        }
994 9fa3e853 bellard
        if (start < end) {
995 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
996 9fa3e853 bellard
            *tab |= mask;
997 9fa3e853 bellard
        }
998 9fa3e853 bellard
    }
999 9fa3e853 bellard
}
1000 9fa3e853 bellard
1001 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
1002 9fa3e853 bellard
{
1003 9fa3e853 bellard
    int n, tb_start, tb_end;
1004 9fa3e853 bellard
    TranslationBlock *tb;
1005 3b46e624 ths
1006 7267c094 Anthony Liguori
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1007 9fa3e853 bellard
1008 9fa3e853 bellard
    tb = p->first_tb;
1009 9fa3e853 bellard
    while (tb != NULL) {
1010 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1011 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1012 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1013 9fa3e853 bellard
        if (n == 0) {
1014 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1015 9fa3e853 bellard
               it is not a problem */
1016 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1018 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
1019 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
1020 9fa3e853 bellard
        } else {
1021 9fa3e853 bellard
            tb_start = 0;
1022 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 9fa3e853 bellard
        }
1024 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 9fa3e853 bellard
        tb = tb->page_next[n];
1026 9fa3e853 bellard
    }
1027 9fa3e853 bellard
}
1028 9fa3e853 bellard
1029 9349b4f9 Andreas Färber
TranslationBlock *tb_gen_code(CPUArchState *env,
1030 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
1031 2e70f6ef pbrook
                              int flags, int cflags)
1032 d720b93d bellard
{
1033 d720b93d bellard
    TranslationBlock *tb;
1034 d720b93d bellard
    uint8_t *tc_ptr;
1035 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
1036 41c1b1c9 Paul Brook
    target_ulong virt_page2;
1037 d720b93d bellard
    int code_gen_size;
1038 d720b93d bellard
1039 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
1040 c27004ec bellard
    tb = tb_alloc(pc);
1041 d720b93d bellard
    if (!tb) {
1042 d720b93d bellard
        /* flush must be done */
1043 d720b93d bellard
        tb_flush(env);
1044 d720b93d bellard
        /* cannot fail at this point */
1045 c27004ec bellard
        tb = tb_alloc(pc);
1046 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
1047 2e70f6ef pbrook
        tb_invalidated_flag = 1;
1048 d720b93d bellard
    }
1049 d720b93d bellard
    tc_ptr = code_gen_ptr;
1050 d720b93d bellard
    tb->tc_ptr = tc_ptr;
1051 d720b93d bellard
    tb->cs_base = cs_base;
1052 d720b93d bellard
    tb->flags = flags;
1053 d720b93d bellard
    tb->cflags = cflags;
1054 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
1055 8efe0ca8 Stefan Weil
    code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 8efe0ca8 Stefan Weil
                             CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1057 3b46e624 ths
1058 d720b93d bellard
    /* check next page if needed */
1059 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1060 d720b93d bellard
    phys_page2 = -1;
1061 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1062 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1063 d720b93d bellard
    }
1064 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1065 2e70f6ef pbrook
    return tb;
1066 d720b93d bellard
}
1067 3b46e624 ths
1068 77a8f1a5 Alexander Graf
/*
1069 8e0fdce3 Jan Kiszka
 * Invalidate all TBs which intersect with the target physical address range
1070 8e0fdce3 Jan Kiszka
 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 8e0fdce3 Jan Kiszka
 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 8e0fdce3 Jan Kiszka
 * access: the virtual CPU will exit the current TB if code is modified inside
1073 8e0fdce3 Jan Kiszka
 * this TB.
1074 77a8f1a5 Alexander Graf
 */
1075 77a8f1a5 Alexander Graf
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 77a8f1a5 Alexander Graf
                              int is_cpu_write_access)
1077 77a8f1a5 Alexander Graf
{
1078 77a8f1a5 Alexander Graf
    while (start < end) {
1079 77a8f1a5 Alexander Graf
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 77a8f1a5 Alexander Graf
        start &= TARGET_PAGE_MASK;
1081 77a8f1a5 Alexander Graf
        start += TARGET_PAGE_SIZE;
1082 77a8f1a5 Alexander Graf
    }
1083 77a8f1a5 Alexander Graf
}
1084 77a8f1a5 Alexander Graf
1085 8e0fdce3 Jan Kiszka
/*
1086 8e0fdce3 Jan Kiszka
 * Invalidate all TBs which intersect with the target physical address range
1087 8e0fdce3 Jan Kiszka
 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 8e0fdce3 Jan Kiszka
 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 8e0fdce3 Jan Kiszka
 * access: the virtual CPU will exit the current TB if code is modified inside
1090 8e0fdce3 Jan Kiszka
 * this TB.
1091 8e0fdce3 Jan Kiszka
 */
1092 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1093 d720b93d bellard
                                   int is_cpu_write_access)
1094 d720b93d bellard
{
1095 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1096 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1097 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1098 6b917547 aliguori
    PageDesc *p;
1099 6b917547 aliguori
    int n;
1100 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1101 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1102 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1103 6b917547 aliguori
    int current_tb_modified = 0;
1104 6b917547 aliguori
    target_ulong current_pc = 0;
1105 6b917547 aliguori
    target_ulong current_cs_base = 0;
1106 6b917547 aliguori
    int current_flags = 0;
1107 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1108 9fa3e853 bellard
1109 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1110 5fafdf24 ths
    if (!p)
1111 9fa3e853 bellard
        return;
1112 5fafdf24 ths
    if (!p->code_bitmap &&
1113 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 d720b93d bellard
        is_cpu_write_access) {
1115 9fa3e853 bellard
        /* build code bitmap */
1116 9fa3e853 bellard
        build_page_bitmap(p);
1117 9fa3e853 bellard
    }
1118 9fa3e853 bellard
1119 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1120 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 9fa3e853 bellard
    tb = p->first_tb;
1122 9fa3e853 bellard
    while (tb != NULL) {
1123 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1124 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1125 9fa3e853 bellard
        tb_next = tb->page_next[n];
1126 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1127 9fa3e853 bellard
        if (n == 0) {
1128 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1129 9fa3e853 bellard
               it is not a problem */
1130 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1132 9fa3e853 bellard
        } else {
1133 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1134 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 9fa3e853 bellard
        }
1136 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1137 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1138 d720b93d bellard
            if (current_tb_not_found) {
1139 d720b93d bellard
                current_tb_not_found = 0;
1140 d720b93d bellard
                current_tb = NULL;
1141 2e70f6ef pbrook
                if (env->mem_io_pc) {
1142 d720b93d bellard
                    /* now we have a real cpu fault */
1143 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1144 d720b93d bellard
                }
1145 d720b93d bellard
            }
1146 d720b93d bellard
            if (current_tb == tb &&
1147 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1148 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1149 d720b93d bellard
                its execution. We could be more precise by checking
1150 d720b93d bellard
                that the modification is after the current PC, but it
1151 d720b93d bellard
                would require a specialized function to partially
1152 d720b93d bellard
                restore the CPU state */
1153 3b46e624 ths
1154 d720b93d bellard
                current_tb_modified = 1;
1155 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1156 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 6b917547 aliguori
                                     &current_flags);
1158 d720b93d bellard
            }
1159 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1160 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1161 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1162 6f5a9f7e bellard
            saved_tb = NULL;
1163 6f5a9f7e bellard
            if (env) {
1164 6f5a9f7e bellard
                saved_tb = env->current_tb;
1165 6f5a9f7e bellard
                env->current_tb = NULL;
1166 6f5a9f7e bellard
            }
1167 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1168 6f5a9f7e bellard
            if (env) {
1169 6f5a9f7e bellard
                env->current_tb = saved_tb;
1170 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1171 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1172 6f5a9f7e bellard
            }
1173 9fa3e853 bellard
        }
1174 9fa3e853 bellard
        tb = tb_next;
1175 9fa3e853 bellard
    }
1176 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1177 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1178 9fa3e853 bellard
    if (!p->first_tb) {
1179 9fa3e853 bellard
        invalidate_page_bitmap(p);
1180 d720b93d bellard
        if (is_cpu_write_access) {
1181 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1182 d720b93d bellard
        }
1183 d720b93d bellard
    }
1184 d720b93d bellard
#endif
1185 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1186 d720b93d bellard
    if (current_tb_modified) {
1187 d720b93d bellard
        /* we generate a block containing just the instruction
1188 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1189 d720b93d bellard
           itself */
1190 ea1c1802 bellard
        env->current_tb = NULL;
1191 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1192 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1193 9fa3e853 bellard
    }
1194 fd6ce8f6 bellard
#endif
1195 9fa3e853 bellard
}
1196 fd6ce8f6 bellard
1197 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1198 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1199 9fa3e853 bellard
{
1200 9fa3e853 bellard
    PageDesc *p;
1201 9fa3e853 bellard
    int offset, b;
1202 59817ccb bellard
#if 0
1203 a4193c8a bellard
    if (1) {
1204 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1206 93fcfe39 aliguori
                  cpu_single_env->eip,
1207 8efe0ca8 Stefan Weil
                  cpu_single_env->eip +
1208 8efe0ca8 Stefan Weil
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1209 59817ccb bellard
    }
1210 59817ccb bellard
#endif
1211 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1212 5fafdf24 ths
    if (!p)
1213 9fa3e853 bellard
        return;
1214 9fa3e853 bellard
    if (p->code_bitmap) {
1215 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1216 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1218 9fa3e853 bellard
            goto do_invalidate;
1219 9fa3e853 bellard
    } else {
1220 9fa3e853 bellard
    do_invalidate:
1221 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1222 9fa3e853 bellard
    }
1223 9fa3e853 bellard
}
1224 9fa3e853 bellard
1225 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1226 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1227 20503968 Blue Swirl
                                    uintptr_t pc, void *puc)
1228 9fa3e853 bellard
{
1229 6b917547 aliguori
    TranslationBlock *tb;
1230 9fa3e853 bellard
    PageDesc *p;
1231 6b917547 aliguori
    int n;
1232 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1233 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1234 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1235 6b917547 aliguori
    int current_tb_modified = 0;
1236 6b917547 aliguori
    target_ulong current_pc = 0;
1237 6b917547 aliguori
    target_ulong current_cs_base = 0;
1238 6b917547 aliguori
    int current_flags = 0;
1239 d720b93d bellard
#endif
1240 9fa3e853 bellard
1241 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1242 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1243 5fafdf24 ths
    if (!p)
1244 9fa3e853 bellard
        return;
1245 9fa3e853 bellard
    tb = p->first_tb;
1246 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1247 d720b93d bellard
    if (tb && pc != 0) {
1248 d720b93d bellard
        current_tb = tb_find_pc(pc);
1249 d720b93d bellard
    }
1250 d720b93d bellard
#endif
1251 9fa3e853 bellard
    while (tb != NULL) {
1252 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1253 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1254 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1255 d720b93d bellard
        if (current_tb == tb &&
1256 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1257 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1258 d720b93d bellard
                   its execution. We could be more precise by checking
1259 d720b93d bellard
                   that the modification is after the current PC, but it
1260 d720b93d bellard
                   would require a specialized function to partially
1261 d720b93d bellard
                   restore the CPU state */
1262 3b46e624 ths
1263 d720b93d bellard
            current_tb_modified = 1;
1264 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1265 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 6b917547 aliguori
                                 &current_flags);
1267 d720b93d bellard
        }
1268 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1269 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1270 9fa3e853 bellard
        tb = tb->page_next[n];
1271 9fa3e853 bellard
    }
1272 fd6ce8f6 bellard
    p->first_tb = NULL;
1273 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1274 d720b93d bellard
    if (current_tb_modified) {
1275 d720b93d bellard
        /* we generate a block containing just the instruction
1276 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1277 d720b93d bellard
           itself */
1278 ea1c1802 bellard
        env->current_tb = NULL;
1279 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1280 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1281 d720b93d bellard
    }
1282 d720b93d bellard
#endif
1283 fd6ce8f6 bellard
}
1284 9fa3e853 bellard
#endif
1285 fd6ce8f6 bellard
1286 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1287 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1288 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1289 fd6ce8f6 bellard
{
1290 fd6ce8f6 bellard
    PageDesc *p;
1291 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1292 4429ab44 Juan Quintela
    bool page_already_protected;
1293 4429ab44 Juan Quintela
#endif
1294 9fa3e853 bellard
1295 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1296 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1297 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1298 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1299 4429ab44 Juan Quintela
    page_already_protected = p->first_tb != NULL;
1300 4429ab44 Juan Quintela
#endif
1301 8efe0ca8 Stefan Weil
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1302 9fa3e853 bellard
    invalidate_page_bitmap(p);
1303 fd6ce8f6 bellard
1304 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1305 d720b93d bellard
1306 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1307 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1308 53a5960a pbrook
        target_ulong addr;
1309 53a5960a pbrook
        PageDesc *p2;
1310 9fa3e853 bellard
        int prot;
1311 9fa3e853 bellard
1312 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1313 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1314 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1315 fd6ce8f6 bellard
        prot = 0;
1316 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1318 53a5960a pbrook
1319 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 53a5960a pbrook
            if (!p2)
1321 53a5960a pbrook
                continue;
1322 53a5960a pbrook
            prot |= p2->flags;
1323 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1324 53a5960a pbrook
          }
1325 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1326 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1328 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1329 53a5960a pbrook
               page_addr);
1330 fd6ce8f6 bellard
#endif
1331 fd6ce8f6 bellard
    }
1332 9fa3e853 bellard
#else
1333 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1334 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1335 9fa3e853 bellard
       allocated in a physical page */
1336 4429ab44 Juan Quintela
    if (!page_already_protected) {
1337 6a00d601 bellard
        tlb_protect_code(page_addr);
1338 9fa3e853 bellard
    }
1339 9fa3e853 bellard
#endif
1340 d720b93d bellard
1341 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1342 fd6ce8f6 bellard
}
1343 fd6ce8f6 bellard
1344 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1345 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1346 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1347 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1348 d4e8164f bellard
{
1349 9fa3e853 bellard
    unsigned int h;
1350 9fa3e853 bellard
    TranslationBlock **ptb;
1351 9fa3e853 bellard
1352 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1353 c8a706fe pbrook
       before we are done.  */
1354 c8a706fe pbrook
    mmap_lock();
1355 9fa3e853 bellard
    /* add in the physical hash table */
1356 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1357 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1358 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1359 9fa3e853 bellard
    *ptb = tb;
1360 fd6ce8f6 bellard
1361 fd6ce8f6 bellard
    /* add in the page list */
1362 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 9fa3e853 bellard
    if (phys_page2 != -1)
1364 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1365 9fa3e853 bellard
    else
1366 9fa3e853 bellard
        tb->page_addr[1] = -1;
1367 9fa3e853 bellard
1368 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1369 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1370 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1371 d4e8164f bellard
1372 d4e8164f bellard
    /* init original jump addresses */
1373 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1374 d4e8164f bellard
        tb_reset_jump(tb, 0);
1375 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1376 d4e8164f bellard
        tb_reset_jump(tb, 1);
1377 8a40a180 bellard
1378 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1379 8a40a180 bellard
    tb_page_check();
1380 8a40a180 bellard
#endif
1381 c8a706fe pbrook
    mmap_unlock();
1382 fd6ce8f6 bellard
}
1383 fd6ce8f6 bellard
1384 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1386 6375e09e Stefan Weil
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1387 fd6ce8f6 bellard
{
1388 9fa3e853 bellard
    int m_min, m_max, m;
1389 8efe0ca8 Stefan Weil
    uintptr_t v;
1390 9fa3e853 bellard
    TranslationBlock *tb;
1391 a513fe19 bellard
1392 a513fe19 bellard
    if (nb_tbs <= 0)
1393 a513fe19 bellard
        return NULL;
1394 8efe0ca8 Stefan Weil
    if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 8efe0ca8 Stefan Weil
        tc_ptr >= (uintptr_t)code_gen_ptr) {
1396 a513fe19 bellard
        return NULL;
1397 8efe0ca8 Stefan Weil
    }
1398 a513fe19 bellard
    /* binary search (cf Knuth) */
1399 a513fe19 bellard
    m_min = 0;
1400 a513fe19 bellard
    m_max = nb_tbs - 1;
1401 a513fe19 bellard
    while (m_min <= m_max) {
1402 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1403 a513fe19 bellard
        tb = &tbs[m];
1404 8efe0ca8 Stefan Weil
        v = (uintptr_t)tb->tc_ptr;
1405 a513fe19 bellard
        if (v == tc_ptr)
1406 a513fe19 bellard
            return tb;
1407 a513fe19 bellard
        else if (tc_ptr < v) {
1408 a513fe19 bellard
            m_max = m - 1;
1409 a513fe19 bellard
        } else {
1410 a513fe19 bellard
            m_min = m + 1;
1411 a513fe19 bellard
        }
1412 5fafdf24 ths
    }
1413 a513fe19 bellard
    return &tbs[m_max];
1414 a513fe19 bellard
}
1415 7501267e bellard
1416 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1417 ea041c0e bellard
1418 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419 ea041c0e bellard
{
1420 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1421 ea041c0e bellard
    unsigned int n1;
1422 ea041c0e bellard
1423 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1424 ea041c0e bellard
    if (tb1 != NULL) {
1425 ea041c0e bellard
        /* find head of list */
1426 ea041c0e bellard
        for(;;) {
1427 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1428 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1429 ea041c0e bellard
            if (n1 == 2)
1430 ea041c0e bellard
                break;
1431 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1432 ea041c0e bellard
        }
1433 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1434 ea041c0e bellard
        tb_next = tb1;
1435 ea041c0e bellard
1436 ea041c0e bellard
        /* remove tb from the jmp_first list */
1437 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1438 ea041c0e bellard
        for(;;) {
1439 ea041c0e bellard
            tb1 = *ptb;
1440 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1441 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1442 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1443 ea041c0e bellard
                break;
1444 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1445 ea041c0e bellard
        }
1446 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1447 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1448 3b46e624 ths
1449 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1450 ea041c0e bellard
        tb_reset_jump(tb, n);
1451 ea041c0e bellard
1452 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1453 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1454 ea041c0e bellard
    }
1455 ea041c0e bellard
}
1456 ea041c0e bellard
1457 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1458 ea041c0e bellard
{
1459 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1460 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1461 ea041c0e bellard
}
1462 ea041c0e bellard
1463 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1464 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1465 9349b4f9 Andreas Färber
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1466 94df27fd Paul Brook
{
1467 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468 94df27fd Paul Brook
}
1469 94df27fd Paul Brook
#else
1470 1e7855a5 Max Filippov
void tb_invalidate_phys_addr(target_phys_addr_t addr)
1471 d720b93d bellard
{
1472 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1473 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1474 d720b93d bellard
1475 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
1476 f3705d53 Avi Kivity
    if (!(memory_region_is_ram(section->mr)
1477 f3705d53 Avi Kivity
          || (section->mr->rom_device && section->mr->readable))) {
1478 06ef3525 Avi Kivity
        return;
1479 06ef3525 Avi Kivity
    }
1480 f3705d53 Avi Kivity
    ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1481 cc5bea60 Blue Swirl
        + memory_region_section_addr(section, addr);
1482 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1483 d720b93d bellard
}
1484 1e7855a5 Max Filippov
1485 1e7855a5 Max Filippov
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486 1e7855a5 Max Filippov
{
1487 9d70c4b7 Max Filippov
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 9d70c4b7 Max Filippov
            (pc & ~TARGET_PAGE_MASK));
1489 1e7855a5 Max Filippov
}
1490 c27004ec bellard
#endif
1491 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1492 d720b93d bellard
1493 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1494 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1495 c527ee8f Paul Brook
1496 c527ee8f Paul Brook
{
1497 c527ee8f Paul Brook
}
1498 c527ee8f Paul Brook
1499 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1500 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1501 c527ee8f Paul Brook
{
1502 c527ee8f Paul Brook
    return -ENOSYS;
1503 c527ee8f Paul Brook
}
1504 c527ee8f Paul Brook
#else
1505 6658ffb8 pbrook
/* Add a watchpoint.  */
1506 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1507 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1508 6658ffb8 pbrook
{
1509 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1510 c0ce998e aliguori
    CPUWatchpoint *wp;
1511 6658ffb8 pbrook
1512 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1513 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
1515 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 b4051334 aliguori
        return -EINVAL;
1518 b4051334 aliguori
    }
1519 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
1520 a1d1bb31 aliguori
1521 a1d1bb31 aliguori
    wp->vaddr = addr;
1522 b4051334 aliguori
    wp->len_mask = len_mask;
1523 a1d1bb31 aliguori
    wp->flags = flags;
1524 a1d1bb31 aliguori
1525 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1526 c0ce998e aliguori
    if (flags & BP_GDB)
1527 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1528 c0ce998e aliguori
    else
1529 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1530 6658ffb8 pbrook
1531 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1532 a1d1bb31 aliguori
1533 a1d1bb31 aliguori
    if (watchpoint)
1534 a1d1bb31 aliguori
        *watchpoint = wp;
1535 a1d1bb31 aliguori
    return 0;
1536 6658ffb8 pbrook
}
1537 6658ffb8 pbrook
1538 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1539 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1540 a1d1bb31 aliguori
                          int flags)
1541 6658ffb8 pbrook
{
1542 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1543 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1544 6658ffb8 pbrook
1545 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1546 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1547 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1548 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1549 6658ffb8 pbrook
            return 0;
1550 6658ffb8 pbrook
        }
1551 6658ffb8 pbrook
    }
1552 a1d1bb31 aliguori
    return -ENOENT;
1553 6658ffb8 pbrook
}
1554 6658ffb8 pbrook
1555 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1556 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1557 a1d1bb31 aliguori
{
1558 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1559 7d03f82f edgar_igl
1560 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1561 a1d1bb31 aliguori
1562 7267c094 Anthony Liguori
    g_free(watchpoint);
1563 a1d1bb31 aliguori
}
1564 a1d1bb31 aliguori
1565 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1566 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1567 a1d1bb31 aliguori
{
1568 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1569 a1d1bb31 aliguori
1570 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1571 a1d1bb31 aliguori
        if (wp->flags & mask)
1572 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1573 c0ce998e aliguori
    }
1574 7d03f82f edgar_igl
}
1575 c527ee8f Paul Brook
#endif
1576 7d03f82f edgar_igl
1577 a1d1bb31 aliguori
/* Add a breakpoint.  */
1578 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1579 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1580 4c3a88a2 bellard
{
1581 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1582 c0ce998e aliguori
    CPUBreakpoint *bp;
1583 3b46e624 ths
1584 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
1585 4c3a88a2 bellard
1586 a1d1bb31 aliguori
    bp->pc = pc;
1587 a1d1bb31 aliguori
    bp->flags = flags;
1588 a1d1bb31 aliguori
1589 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1590 c0ce998e aliguori
    if (flags & BP_GDB)
1591 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1592 c0ce998e aliguori
    else
1593 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1594 3b46e624 ths
1595 d720b93d bellard
    breakpoint_invalidate(env, pc);
1596 a1d1bb31 aliguori
1597 a1d1bb31 aliguori
    if (breakpoint)
1598 a1d1bb31 aliguori
        *breakpoint = bp;
1599 4c3a88a2 bellard
    return 0;
1600 4c3a88a2 bellard
#else
1601 a1d1bb31 aliguori
    return -ENOSYS;
1602 4c3a88a2 bellard
#endif
1603 4c3a88a2 bellard
}
1604 4c3a88a2 bellard
1605 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1606 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1607 a1d1bb31 aliguori
{
1608 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1609 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1610 a1d1bb31 aliguori
1611 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1612 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1613 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1614 a1d1bb31 aliguori
            return 0;
1615 a1d1bb31 aliguori
        }
1616 7d03f82f edgar_igl
    }
1617 a1d1bb31 aliguori
    return -ENOENT;
1618 a1d1bb31 aliguori
#else
1619 a1d1bb31 aliguori
    return -ENOSYS;
1620 7d03f82f edgar_igl
#endif
1621 7d03f82f edgar_igl
}
1622 7d03f82f edgar_igl
1623 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1624 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1625 4c3a88a2 bellard
{
1626 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1627 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1628 d720b93d bellard
1629 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1630 a1d1bb31 aliguori
1631 7267c094 Anthony Liguori
    g_free(breakpoint);
1632 a1d1bb31 aliguori
#endif
1633 a1d1bb31 aliguori
}
1634 a1d1bb31 aliguori
1635 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1636 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1637 a1d1bb31 aliguori
{
1638 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1639 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1640 a1d1bb31 aliguori
1641 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1642 a1d1bb31 aliguori
        if (bp->flags & mask)
1643 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1644 c0ce998e aliguori
    }
1645 4c3a88a2 bellard
#endif
1646 4c3a88a2 bellard
}
1647 4c3a88a2 bellard
1648 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 c33a346e bellard
   CPU loop after each instruction */
1650 9349b4f9 Andreas Färber
void cpu_single_step(CPUArchState *env, int enabled)
1651 c33a346e bellard
{
1652 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1653 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1654 c33a346e bellard
        env->singlestep_enabled = enabled;
1655 e22a25c9 aliguori
        if (kvm_enabled())
1656 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1657 e22a25c9 aliguori
        else {
1658 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1659 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1660 e22a25c9 aliguori
            tb_flush(env);
1661 e22a25c9 aliguori
        }
1662 c33a346e bellard
    }
1663 c33a346e bellard
#endif
1664 c33a346e bellard
}
1665 c33a346e bellard
1666 9349b4f9 Andreas Färber
static void cpu_unlink_tb(CPUArchState *env)
1667 ea041c0e bellard
{
1668 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1669 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1670 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1671 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1672 ea041c0e bellard
    TranslationBlock *tb;
1673 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1674 59817ccb bellard
1675 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1676 3098dba0 aurel32
    tb = env->current_tb;
1677 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1678 3098dba0 aurel32
       all the potentially executing TB */
1679 f76cfe56 Riku Voipio
    if (tb) {
1680 3098dba0 aurel32
        env->current_tb = NULL;
1681 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1682 be214e6c aurel32
    }
1683 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1684 3098dba0 aurel32
}
1685 3098dba0 aurel32
1686 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1687 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1688 9349b4f9 Andreas Färber
static void tcg_handle_interrupt(CPUArchState *env, int mask)
1689 3098dba0 aurel32
{
1690 3098dba0 aurel32
    int old_mask;
1691 be214e6c aurel32
1692 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1693 68a79315 bellard
    env->interrupt_request |= mask;
1694 3098dba0 aurel32
1695 8edac960 aliguori
    /*
1696 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1697 8edac960 aliguori
     * case its halted.
1698 8edac960 aliguori
     */
1699 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1700 8edac960 aliguori
        qemu_cpu_kick(env);
1701 8edac960 aliguori
        return;
1702 8edac960 aliguori
    }
1703 8edac960 aliguori
1704 2e70f6ef pbrook
    if (use_icount) {
1705 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1706 2e70f6ef pbrook
        if (!can_do_io(env)
1707 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1708 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1709 2e70f6ef pbrook
        }
1710 2e70f6ef pbrook
    } else {
1711 3098dba0 aurel32
        cpu_unlink_tb(env);
1712 ea041c0e bellard
    }
1713 ea041c0e bellard
}
1714 ea041c0e bellard
1715 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716 ec6959d0 Jan Kiszka
1717 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1718 97ffbd8d Jan Kiszka
1719 9349b4f9 Andreas Färber
void cpu_interrupt(CPUArchState *env, int mask)
1720 97ffbd8d Jan Kiszka
{
1721 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1722 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1723 97ffbd8d Jan Kiszka
}
1724 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1725 97ffbd8d Jan Kiszka
1726 9349b4f9 Andreas Färber
void cpu_reset_interrupt(CPUArchState *env, int mask)
1727 b54ad049 bellard
{
1728 b54ad049 bellard
    env->interrupt_request &= ~mask;
1729 b54ad049 bellard
}
1730 b54ad049 bellard
1731 9349b4f9 Andreas Färber
void cpu_exit(CPUArchState *env)
1732 3098dba0 aurel32
{
1733 3098dba0 aurel32
    env->exit_request = 1;
1734 3098dba0 aurel32
    cpu_unlink_tb(env);
1735 3098dba0 aurel32
}
1736 3098dba0 aurel32
1737 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
1738 7501267e bellard
{
1739 7501267e bellard
    va_list ap;
1740 493ae1f0 pbrook
    va_list ap2;
1741 7501267e bellard
1742 7501267e bellard
    va_start(ap, fmt);
1743 493ae1f0 pbrook
    va_copy(ap2, ap);
1744 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1745 7501267e bellard
    vfprintf(stderr, fmt, ap);
1746 7501267e bellard
    fprintf(stderr, "\n");
1747 7501267e bellard
#ifdef TARGET_I386
1748 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749 7fe48483 bellard
#else
1750 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1751 7501267e bellard
#endif
1752 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1753 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1754 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1755 93fcfe39 aliguori
        qemu_log("\n");
1756 f9373291 j_mayer
#ifdef TARGET_I386
1757 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1758 f9373291 j_mayer
#else
1759 93fcfe39 aliguori
        log_cpu_state(env, 0);
1760 f9373291 j_mayer
#endif
1761 31b1a7b4 aliguori
        qemu_log_flush();
1762 93fcfe39 aliguori
        qemu_log_close();
1763 924edcae balrog
    }
1764 493ae1f0 pbrook
    va_end(ap2);
1765 f9373291 j_mayer
    va_end(ap);
1766 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1767 fd052bf6 Riku Voipio
    {
1768 fd052bf6 Riku Voipio
        struct sigaction act;
1769 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1770 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1771 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1772 fd052bf6 Riku Voipio
    }
1773 fd052bf6 Riku Voipio
#endif
1774 7501267e bellard
    abort();
1775 7501267e bellard
}
1776 7501267e bellard
1777 9349b4f9 Andreas Färber
CPUArchState *cpu_copy(CPUArchState *env)
1778 c5be9f08 ths
{
1779 9349b4f9 Andreas Färber
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 9349b4f9 Andreas Färber
    CPUArchState *next_cpu = new_env->next_cpu;
1781 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1782 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1783 5a38f081 aliguori
    CPUBreakpoint *bp;
1784 5a38f081 aliguori
    CPUWatchpoint *wp;
1785 5a38f081 aliguori
#endif
1786 5a38f081 aliguori
1787 9349b4f9 Andreas Färber
    memcpy(new_env, env, sizeof(CPUArchState));
1788 5a38f081 aliguori
1789 5a38f081 aliguori
    /* Preserve chaining and index. */
1790 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1791 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1792 5a38f081 aliguori
1793 5a38f081 aliguori
    /* Clone all break/watchpoints.
1794 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1795 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1796 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1797 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1798 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1799 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1800 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1801 5a38f081 aliguori
    }
1802 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1803 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 5a38f081 aliguori
                              wp->flags, NULL);
1805 5a38f081 aliguori
    }
1806 5a38f081 aliguori
#endif
1807 5a38f081 aliguori
1808 c5be9f08 ths
    return new_env;
1809 c5be9f08 ths
}
1810 c5be9f08 ths
1811 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1812 0cac1b66 Blue Swirl
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1813 5c751e99 edgar_igl
{
1814 5c751e99 edgar_igl
    unsigned int i;
1815 5c751e99 edgar_igl
1816 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1817 5c751e99 edgar_igl
       overlap the flushed page.  */
1818 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1820 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1821 5c751e99 edgar_igl
1822 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1823 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1824 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1825 5c751e99 edgar_igl
}
1826 5c751e99 edgar_igl
1827 d24981d3 Juan Quintela
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1828 d24981d3 Juan Quintela
                                      uintptr_t length)
1829 d24981d3 Juan Quintela
{
1830 d24981d3 Juan Quintela
    uintptr_t start1;
1831 d24981d3 Juan Quintela
1832 d24981d3 Juan Quintela
    /* we modify the TLB cache so that the dirty bit will be set again
1833 d24981d3 Juan Quintela
       when accessing the range */
1834 d24981d3 Juan Quintela
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1835 d24981d3 Juan Quintela
    /* Check that we don't span multiple blocks - this breaks the
1836 d24981d3 Juan Quintela
       address comparisons below.  */
1837 d24981d3 Juan Quintela
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1838 d24981d3 Juan Quintela
            != (end - 1) - start) {
1839 d24981d3 Juan Quintela
        abort();
1840 d24981d3 Juan Quintela
    }
1841 d24981d3 Juan Quintela
    cpu_tlb_reset_dirty_all(start1, length);
1842 d24981d3 Juan Quintela
1843 d24981d3 Juan Quintela
}
1844 d24981d3 Juan Quintela
1845 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1846 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1847 0a962c02 bellard
                                     int dirty_flags)
1848 1ccde1cb bellard
{
1849 d24981d3 Juan Quintela
    uintptr_t length;
1850 1ccde1cb bellard
1851 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1852 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1853 1ccde1cb bellard
1854 1ccde1cb bellard
    length = end - start;
1855 1ccde1cb bellard
    if (length == 0)
1856 1ccde1cb bellard
        return;
1857 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1858 f23db169 bellard
1859 d24981d3 Juan Quintela
    if (tcg_enabled()) {
1860 d24981d3 Juan Quintela
        tlb_reset_dirty_range_all(start, end, length);
1861 5579c7f3 pbrook
    }
1862 1ccde1cb bellard
}
1863 1ccde1cb bellard
1864 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1865 74576198 aliguori
{
1866 f6f3fbca Michael S. Tsirkin
    int ret = 0;
1867 74576198 aliguori
    in_migration = enable;
1868 f6f3fbca Michael S. Tsirkin
    return ret;
1869 74576198 aliguori
}
1870 74576198 aliguori
1871 e5548617 Blue Swirl
target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1872 e5548617 Blue Swirl
                                                   MemoryRegionSection *section,
1873 e5548617 Blue Swirl
                                                   target_ulong vaddr,
1874 e5548617 Blue Swirl
                                                   target_phys_addr_t paddr,
1875 e5548617 Blue Swirl
                                                   int prot,
1876 e5548617 Blue Swirl
                                                   target_ulong *address)
1877 e5548617 Blue Swirl
{
1878 e5548617 Blue Swirl
    target_phys_addr_t iotlb;
1879 e5548617 Blue Swirl
    CPUWatchpoint *wp;
1880 e5548617 Blue Swirl
1881 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
1882 e5548617 Blue Swirl
        /* Normal RAM.  */
1883 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1884 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, paddr);
1885 e5548617 Blue Swirl
        if (!section->readonly) {
1886 e5548617 Blue Swirl
            iotlb |= phys_section_notdirty;
1887 e5548617 Blue Swirl
        } else {
1888 e5548617 Blue Swirl
            iotlb |= phys_section_rom;
1889 e5548617 Blue Swirl
        }
1890 e5548617 Blue Swirl
    } else {
1891 e5548617 Blue Swirl
        /* IO handlers are currently passed a physical address.
1892 e5548617 Blue Swirl
           It would be nice to pass an offset from the base address
1893 e5548617 Blue Swirl
           of that region.  This would avoid having to special case RAM,
1894 e5548617 Blue Swirl
           and avoid full address decoding in every device.
1895 e5548617 Blue Swirl
           We can't use the high bits of pd for this because
1896 e5548617 Blue Swirl
           IO_MEM_ROMD uses these as a ram address.  */
1897 e5548617 Blue Swirl
        iotlb = section - phys_sections;
1898 cc5bea60 Blue Swirl
        iotlb += memory_region_section_addr(section, paddr);
1899 e5548617 Blue Swirl
    }
1900 e5548617 Blue Swirl
1901 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
1902 e5548617 Blue Swirl
       watchpoint trap routines.  */
1903 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1904 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1905 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
1906 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1907 e5548617 Blue Swirl
                iotlb = phys_section_watch + paddr;
1908 e5548617 Blue Swirl
                *address |= TLB_MMIO;
1909 e5548617 Blue Swirl
                break;
1910 e5548617 Blue Swirl
            }
1911 e5548617 Blue Swirl
        }
1912 e5548617 Blue Swirl
    }
1913 e5548617 Blue Swirl
1914 e5548617 Blue Swirl
    return iotlb;
1915 e5548617 Blue Swirl
}
1916 e5548617 Blue Swirl
1917 0124311e bellard
#else
1918 edf8e2af Mika Westerberg
/*
1919 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
1920 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
1921 edf8e2af Mika Westerberg
 */
1922 5cd2c5b6 Richard Henderson
1923 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
1924 5cd2c5b6 Richard Henderson
{
1925 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
1926 5cd2c5b6 Richard Henderson
    void *priv;
1927 8efe0ca8 Stefan Weil
    uintptr_t start;
1928 5cd2c5b6 Richard Henderson
    int prot;
1929 5cd2c5b6 Richard Henderson
};
1930 5cd2c5b6 Richard Henderson
1931 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1932 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
1933 5cd2c5b6 Richard Henderson
{
1934 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
1935 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
1936 5cd2c5b6 Richard Henderson
        if (rc != 0) {
1937 5cd2c5b6 Richard Henderson
            return rc;
1938 5cd2c5b6 Richard Henderson
        }
1939 5cd2c5b6 Richard Henderson
    }
1940 5cd2c5b6 Richard Henderson
1941 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
1942 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
1943 5cd2c5b6 Richard Henderson
1944 5cd2c5b6 Richard Henderson
    return 0;
1945 5cd2c5b6 Richard Henderson
}
1946 5cd2c5b6 Richard Henderson
1947 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1948 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
1949 5cd2c5b6 Richard Henderson
{
1950 b480d9b7 Paul Brook
    abi_ulong pa;
1951 5cd2c5b6 Richard Henderson
    int i, rc;
1952 5cd2c5b6 Richard Henderson
1953 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1954 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
1955 5cd2c5b6 Richard Henderson
    }
1956 5cd2c5b6 Richard Henderson
1957 5cd2c5b6 Richard Henderson
    if (level == 0) {
1958 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
1959 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1960 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
1961 5cd2c5b6 Richard Henderson
1962 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
1963 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
1964 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
1965 5cd2c5b6 Richard Henderson
                if (rc != 0) {
1966 5cd2c5b6 Richard Henderson
                    return rc;
1967 9fa3e853 bellard
                }
1968 9fa3e853 bellard
            }
1969 5cd2c5b6 Richard Henderson
        }
1970 5cd2c5b6 Richard Henderson
    } else {
1971 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1972 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1973 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
1974 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
1975 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1976 5cd2c5b6 Richard Henderson
            if (rc != 0) {
1977 5cd2c5b6 Richard Henderson
                return rc;
1978 5cd2c5b6 Richard Henderson
            }
1979 5cd2c5b6 Richard Henderson
        }
1980 5cd2c5b6 Richard Henderson
    }
1981 5cd2c5b6 Richard Henderson
1982 5cd2c5b6 Richard Henderson
    return 0;
1983 5cd2c5b6 Richard Henderson
}
1984 5cd2c5b6 Richard Henderson
1985 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1986 5cd2c5b6 Richard Henderson
{
1987 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
1988 8efe0ca8 Stefan Weil
    uintptr_t i;
1989 5cd2c5b6 Richard Henderson
1990 5cd2c5b6 Richard Henderson
    data.fn = fn;
1991 5cd2c5b6 Richard Henderson
    data.priv = priv;
1992 5cd2c5b6 Richard Henderson
    data.start = -1ul;
1993 5cd2c5b6 Richard Henderson
    data.prot = 0;
1994 5cd2c5b6 Richard Henderson
1995 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
1996 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1997 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1998 5cd2c5b6 Richard Henderson
        if (rc != 0) {
1999 5cd2c5b6 Richard Henderson
            return rc;
2000 9fa3e853 bellard
        }
2001 33417e70 bellard
    }
2002 5cd2c5b6 Richard Henderson
2003 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2004 edf8e2af Mika Westerberg
}
2005 edf8e2af Mika Westerberg
2006 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2007 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2008 edf8e2af Mika Westerberg
{
2009 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2010 edf8e2af Mika Westerberg
2011 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2012 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2013 edf8e2af Mika Westerberg
        start, end, end - start,
2014 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2015 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2016 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2017 edf8e2af Mika Westerberg
2018 edf8e2af Mika Westerberg
    return (0);
2019 edf8e2af Mika Westerberg
}
2020 edf8e2af Mika Westerberg
2021 edf8e2af Mika Westerberg
/* dump memory mappings */
2022 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2023 edf8e2af Mika Westerberg
{
2024 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2025 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2026 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2027 33417e70 bellard
}
2028 33417e70 bellard
2029 53a5960a pbrook
int page_get_flags(target_ulong address)
2030 33417e70 bellard
{
2031 9fa3e853 bellard
    PageDesc *p;
2032 9fa3e853 bellard
2033 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2034 33417e70 bellard
    if (!p)
2035 9fa3e853 bellard
        return 0;
2036 9fa3e853 bellard
    return p->flags;
2037 9fa3e853 bellard
}
2038 9fa3e853 bellard
2039 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2040 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2041 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2042 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2043 9fa3e853 bellard
{
2044 376a7909 Richard Henderson
    target_ulong addr, len;
2045 376a7909 Richard Henderson
2046 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2047 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2048 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2049 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2051 376a7909 Richard Henderson
#endif
2052 376a7909 Richard Henderson
    assert(start < end);
2053 9fa3e853 bellard
2054 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2055 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2056 376a7909 Richard Henderson
2057 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2058 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2059 376a7909 Richard Henderson
    }
2060 376a7909 Richard Henderson
2061 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2062 376a7909 Richard Henderson
         len != 0;
2063 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2064 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2065 376a7909 Richard Henderson
2066 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2067 376a7909 Richard Henderson
           the code inside.  */
2068 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2069 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2070 9fa3e853 bellard
            p->first_tb) {
2071 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2072 9fa3e853 bellard
        }
2073 9fa3e853 bellard
        p->flags = flags;
2074 9fa3e853 bellard
    }
2075 33417e70 bellard
}
2076 33417e70 bellard
2077 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2078 3d97b40b ths
{
2079 3d97b40b ths
    PageDesc *p;
2080 3d97b40b ths
    target_ulong end;
2081 3d97b40b ths
    target_ulong addr;
2082 3d97b40b ths
2083 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2084 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2085 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2086 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2088 376a7909 Richard Henderson
#endif
2089 376a7909 Richard Henderson
2090 3e0650a9 Richard Henderson
    if (len == 0) {
2091 3e0650a9 Richard Henderson
        return 0;
2092 3e0650a9 Richard Henderson
    }
2093 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2094 376a7909 Richard Henderson
        /* We've wrapped around.  */
2095 55f280c9 balrog
        return -1;
2096 376a7909 Richard Henderson
    }
2097 55f280c9 balrog
2098 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2099 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2100 3d97b40b ths
2101 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2102 376a7909 Richard Henderson
         len != 0;
2103 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2104 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2105 3d97b40b ths
        if( !p )
2106 3d97b40b ths
            return -1;
2107 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2108 3d97b40b ths
            return -1;
2109 3d97b40b ths
2110 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2111 3d97b40b ths
            return -1;
2112 dae3270c bellard
        if (flags & PAGE_WRITE) {
2113 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2114 dae3270c bellard
                return -1;
2115 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2116 dae3270c bellard
               contains translated code */
2117 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2118 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2119 dae3270c bellard
                    return -1;
2120 dae3270c bellard
            }
2121 dae3270c bellard
            return 0;
2122 dae3270c bellard
        }
2123 3d97b40b ths
    }
2124 3d97b40b ths
    return 0;
2125 3d97b40b ths
}
2126 3d97b40b ths
2127 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2128 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2129 6375e09e Stefan Weil
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2130 9fa3e853 bellard
{
2131 45d679d6 Aurelien Jarno
    unsigned int prot;
2132 45d679d6 Aurelien Jarno
    PageDesc *p;
2133 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2134 9fa3e853 bellard
2135 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2136 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2137 c8a706fe pbrook
       practice it seems to be ok.  */
2138 c8a706fe pbrook
    mmap_lock();
2139 c8a706fe pbrook
2140 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2141 45d679d6 Aurelien Jarno
    if (!p) {
2142 c8a706fe pbrook
        mmap_unlock();
2143 9fa3e853 bellard
        return 0;
2144 c8a706fe pbrook
    }
2145 45d679d6 Aurelien Jarno
2146 9fa3e853 bellard
    /* if the page was really writable, then we change its
2147 9fa3e853 bellard
       protection back to writable */
2148 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2149 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2150 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2151 45d679d6 Aurelien Jarno
2152 45d679d6 Aurelien Jarno
        prot = 0;
2153 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2154 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2155 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2156 45d679d6 Aurelien Jarno
            prot |= p->flags;
2157 45d679d6 Aurelien Jarno
2158 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2159 9fa3e853 bellard
               the corresponding translated code. */
2160 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2161 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2162 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2163 9fa3e853 bellard
#endif
2164 9fa3e853 bellard
        }
2165 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2166 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2167 45d679d6 Aurelien Jarno
2168 45d679d6 Aurelien Jarno
        mmap_unlock();
2169 45d679d6 Aurelien Jarno
        return 1;
2170 9fa3e853 bellard
    }
2171 c8a706fe pbrook
    mmap_unlock();
2172 9fa3e853 bellard
    return 0;
2173 9fa3e853 bellard
}
2174 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2175 9fa3e853 bellard
2176 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2177 8da3ff18 pbrook
2178 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179 c04b2b78 Paul Brook
typedef struct subpage_t {
2180 70c68e44 Avi Kivity
    MemoryRegion iomem;
2181 c04b2b78 Paul Brook
    target_phys_addr_t base;
2182 5312bd8b Avi Kivity
    uint16_t sub_section[TARGET_PAGE_SIZE];
2183 c04b2b78 Paul Brook
} subpage_t;
2184 c04b2b78 Paul Brook
2185 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2186 5312bd8b Avi Kivity
                             uint16_t section);
2187 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base);
2188 5312bd8b Avi Kivity
static void destroy_page_desc(uint16_t section_index)
2189 54688b1e Avi Kivity
{
2190 5312bd8b Avi Kivity
    MemoryRegionSection *section = &phys_sections[section_index];
2191 5312bd8b Avi Kivity
    MemoryRegion *mr = section->mr;
2192 54688b1e Avi Kivity
2193 54688b1e Avi Kivity
    if (mr->subpage) {
2194 54688b1e Avi Kivity
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
2195 54688b1e Avi Kivity
        memory_region_destroy(&subpage->iomem);
2196 54688b1e Avi Kivity
        g_free(subpage);
2197 54688b1e Avi Kivity
    }
2198 54688b1e Avi Kivity
}
2199 54688b1e Avi Kivity
2200 4346ae3e Avi Kivity
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2201 54688b1e Avi Kivity
{
2202 54688b1e Avi Kivity
    unsigned i;
2203 d6f2ea22 Avi Kivity
    PhysPageEntry *p;
2204 54688b1e Avi Kivity
2205 c19e8800 Avi Kivity
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
2206 54688b1e Avi Kivity
        return;
2207 54688b1e Avi Kivity
    }
2208 54688b1e Avi Kivity
2209 c19e8800 Avi Kivity
    p = phys_map_nodes[lp->ptr];
2210 4346ae3e Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
2211 07f07b31 Avi Kivity
        if (!p[i].is_leaf) {
2212 54688b1e Avi Kivity
            destroy_l2_mapping(&p[i], level - 1);
2213 4346ae3e Avi Kivity
        } else {
2214 c19e8800 Avi Kivity
            destroy_page_desc(p[i].ptr);
2215 54688b1e Avi Kivity
        }
2216 54688b1e Avi Kivity
    }
2217 07f07b31 Avi Kivity
    lp->is_leaf = 0;
2218 c19e8800 Avi Kivity
    lp->ptr = PHYS_MAP_NODE_NIL;
2219 54688b1e Avi Kivity
}
2220 54688b1e Avi Kivity
2221 54688b1e Avi Kivity
static void destroy_all_mappings(void)
2222 54688b1e Avi Kivity
{
2223 3eef53df Avi Kivity
    destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2224 d6f2ea22 Avi Kivity
    phys_map_nodes_reset();
2225 54688b1e Avi Kivity
}
2226 54688b1e Avi Kivity
2227 5312bd8b Avi Kivity
static uint16_t phys_section_add(MemoryRegionSection *section)
2228 5312bd8b Avi Kivity
{
2229 5312bd8b Avi Kivity
    if (phys_sections_nb == phys_sections_nb_alloc) {
2230 5312bd8b Avi Kivity
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2231 5312bd8b Avi Kivity
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
2232 5312bd8b Avi Kivity
                                phys_sections_nb_alloc);
2233 5312bd8b Avi Kivity
    }
2234 5312bd8b Avi Kivity
    phys_sections[phys_sections_nb] = *section;
2235 5312bd8b Avi Kivity
    return phys_sections_nb++;
2236 5312bd8b Avi Kivity
}
2237 5312bd8b Avi Kivity
2238 5312bd8b Avi Kivity
static void phys_sections_clear(void)
2239 5312bd8b Avi Kivity
{
2240 5312bd8b Avi Kivity
    phys_sections_nb = 0;
2241 5312bd8b Avi Kivity
}
2242 5312bd8b Avi Kivity
2243 0f0cb164 Avi Kivity
static void register_subpage(MemoryRegionSection *section)
2244 0f0cb164 Avi Kivity
{
2245 0f0cb164 Avi Kivity
    subpage_t *subpage;
2246 0f0cb164 Avi Kivity
    target_phys_addr_t base = section->offset_within_address_space
2247 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
2248 f3705d53 Avi Kivity
    MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2249 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
2250 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
2251 0f0cb164 Avi Kivity
        .size = TARGET_PAGE_SIZE,
2252 0f0cb164 Avi Kivity
    };
2253 0f0cb164 Avi Kivity
    target_phys_addr_t start, end;
2254 0f0cb164 Avi Kivity
2255 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2256 0f0cb164 Avi Kivity
2257 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
2258 0f0cb164 Avi Kivity
        subpage = subpage_init(base);
2259 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
2260 2999097b Avi Kivity
        phys_page_set(base >> TARGET_PAGE_BITS, 1,
2261 2999097b Avi Kivity
                      phys_section_add(&subsection));
2262 0f0cb164 Avi Kivity
    } else {
2263 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
2264 0f0cb164 Avi Kivity
    }
2265 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2266 adb2a9b5 Tyler Hall
    end = start + section->size - 1;
2267 0f0cb164 Avi Kivity
    subpage_register(subpage, start, end, phys_section_add(section));
2268 0f0cb164 Avi Kivity
}
2269 0f0cb164 Avi Kivity
2270 0f0cb164 Avi Kivity
2271 0f0cb164 Avi Kivity
static void register_multipage(MemoryRegionSection *section)
2272 33417e70 bellard
{
2273 dd81124b Avi Kivity
    target_phys_addr_t start_addr = section->offset_within_address_space;
2274 dd81124b Avi Kivity
    ram_addr_t size = section->size;
2275 2999097b Avi Kivity
    target_phys_addr_t addr;
2276 5312bd8b Avi Kivity
    uint16_t section_index = phys_section_add(section);
2277 dd81124b Avi Kivity
2278 3b8e6a2d Edgar E. Iglesias
    assert(size);
2279 f6f3fbca Michael S. Tsirkin
2280 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2281 2999097b Avi Kivity
    phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2282 2999097b Avi Kivity
                  section_index);
2283 33417e70 bellard
}
2284 33417e70 bellard
2285 0f0cb164 Avi Kivity
void cpu_register_physical_memory_log(MemoryRegionSection *section,
2286 0f0cb164 Avi Kivity
                                      bool readonly)
2287 0f0cb164 Avi Kivity
{
2288 0f0cb164 Avi Kivity
    MemoryRegionSection now = *section, remain = *section;
2289 0f0cb164 Avi Kivity
2290 0f0cb164 Avi Kivity
    if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2291 0f0cb164 Avi Kivity
        || (now.size < TARGET_PAGE_SIZE)) {
2292 0f0cb164 Avi Kivity
        now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2293 0f0cb164 Avi Kivity
                       - now.offset_within_address_space,
2294 0f0cb164 Avi Kivity
                       now.size);
2295 0f0cb164 Avi Kivity
        register_subpage(&now);
2296 0f0cb164 Avi Kivity
        remain.size -= now.size;
2297 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2298 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2299 0f0cb164 Avi Kivity
    }
2300 69b67646 Tyler Hall
    while (remain.size >= TARGET_PAGE_SIZE) {
2301 69b67646 Tyler Hall
        now = remain;
2302 69b67646 Tyler Hall
        if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2303 69b67646 Tyler Hall
            now.size = TARGET_PAGE_SIZE;
2304 69b67646 Tyler Hall
            register_subpage(&now);
2305 69b67646 Tyler Hall
        } else {
2306 69b67646 Tyler Hall
            now.size &= TARGET_PAGE_MASK;
2307 69b67646 Tyler Hall
            register_multipage(&now);
2308 69b67646 Tyler Hall
        }
2309 0f0cb164 Avi Kivity
        remain.size -= now.size;
2310 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2311 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2312 0f0cb164 Avi Kivity
    }
2313 0f0cb164 Avi Kivity
    now = remain;
2314 0f0cb164 Avi Kivity
    if (now.size) {
2315 0f0cb164 Avi Kivity
        register_subpage(&now);
2316 0f0cb164 Avi Kivity
    }
2317 0f0cb164 Avi Kivity
}
2318 0f0cb164 Avi Kivity
2319 0f0cb164 Avi Kivity
2320 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2321 f65ed4c1 aliguori
{
2322 f65ed4c1 aliguori
    if (kvm_enabled())
2323 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2324 f65ed4c1 aliguori
}
2325 f65ed4c1 aliguori
2326 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2327 f65ed4c1 aliguori
{
2328 f65ed4c1 aliguori
    if (kvm_enabled())
2329 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2330 f65ed4c1 aliguori
}
2331 f65ed4c1 aliguori
2332 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2333 62a2744c Sheng Yang
{
2334 62a2744c Sheng Yang
    if (kvm_enabled())
2335 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2336 62a2744c Sheng Yang
}
2337 62a2744c Sheng Yang
2338 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2339 c902760f Marcelo Tosatti
2340 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2341 c902760f Marcelo Tosatti
2342 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2343 c902760f Marcelo Tosatti
2344 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2345 c902760f Marcelo Tosatti
{
2346 c902760f Marcelo Tosatti
    struct statfs fs;
2347 c902760f Marcelo Tosatti
    int ret;
2348 c902760f Marcelo Tosatti
2349 c902760f Marcelo Tosatti
    do {
2350 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2351 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2352 c902760f Marcelo Tosatti
2353 c902760f Marcelo Tosatti
    if (ret != 0) {
2354 9742bf26 Yoshiaki Tamura
        perror(path);
2355 9742bf26 Yoshiaki Tamura
        return 0;
2356 c902760f Marcelo Tosatti
    }
2357 c902760f Marcelo Tosatti
2358 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2359 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2360 c902760f Marcelo Tosatti
2361 c902760f Marcelo Tosatti
    return fs.f_bsize;
2362 c902760f Marcelo Tosatti
}
2363 c902760f Marcelo Tosatti
2364 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2365 04b16653 Alex Williamson
                            ram_addr_t memory,
2366 04b16653 Alex Williamson
                            const char *path)
2367 c902760f Marcelo Tosatti
{
2368 c902760f Marcelo Tosatti
    char *filename;
2369 c902760f Marcelo Tosatti
    void *area;
2370 c902760f Marcelo Tosatti
    int fd;
2371 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2372 c902760f Marcelo Tosatti
    int flags;
2373 c902760f Marcelo Tosatti
#endif
2374 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2375 c902760f Marcelo Tosatti
2376 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2377 c902760f Marcelo Tosatti
    if (!hpagesize) {
2378 9742bf26 Yoshiaki Tamura
        return NULL;
2379 c902760f Marcelo Tosatti
    }
2380 c902760f Marcelo Tosatti
2381 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2382 c902760f Marcelo Tosatti
        return NULL;
2383 c902760f Marcelo Tosatti
    }
2384 c902760f Marcelo Tosatti
2385 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2386 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2387 c902760f Marcelo Tosatti
        return NULL;
2388 c902760f Marcelo Tosatti
    }
2389 c902760f Marcelo Tosatti
2390 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2391 9742bf26 Yoshiaki Tamura
        return NULL;
2392 c902760f Marcelo Tosatti
    }
2393 c902760f Marcelo Tosatti
2394 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2395 c902760f Marcelo Tosatti
    if (fd < 0) {
2396 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2397 9742bf26 Yoshiaki Tamura
        free(filename);
2398 9742bf26 Yoshiaki Tamura
        return NULL;
2399 c902760f Marcelo Tosatti
    }
2400 c902760f Marcelo Tosatti
    unlink(filename);
2401 c902760f Marcelo Tosatti
    free(filename);
2402 c902760f Marcelo Tosatti
2403 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2404 c902760f Marcelo Tosatti
2405 c902760f Marcelo Tosatti
    /*
2406 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2407 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2408 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2409 c902760f Marcelo Tosatti
     * mmap will fail.
2410 c902760f Marcelo Tosatti
     */
2411 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2412 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2413 c902760f Marcelo Tosatti
2414 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2415 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2416 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2417 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2418 c902760f Marcelo Tosatti
     */
2419 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2420 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2421 c902760f Marcelo Tosatti
#else
2422 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2423 c902760f Marcelo Tosatti
#endif
2424 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2425 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2426 9742bf26 Yoshiaki Tamura
        close(fd);
2427 9742bf26 Yoshiaki Tamura
        return (NULL);
2428 c902760f Marcelo Tosatti
    }
2429 04b16653 Alex Williamson
    block->fd = fd;
2430 c902760f Marcelo Tosatti
    return area;
2431 c902760f Marcelo Tosatti
}
2432 c902760f Marcelo Tosatti
#endif
2433 c902760f Marcelo Tosatti
2434 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2435 d17b5288 Alex Williamson
{
2436 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2437 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2438 04b16653 Alex Williamson
2439 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2440 04b16653 Alex Williamson
        return 0;
2441 04b16653 Alex Williamson
2442 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2443 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
2444 04b16653 Alex Williamson
2445 04b16653 Alex Williamson
        end = block->offset + block->length;
2446 04b16653 Alex Williamson
2447 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2448 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2449 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2450 04b16653 Alex Williamson
            }
2451 04b16653 Alex Williamson
        }
2452 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2453 3e837b2c Alex Williamson
            offset = end;
2454 04b16653 Alex Williamson
            mingap = next - end;
2455 04b16653 Alex Williamson
        }
2456 04b16653 Alex Williamson
    }
2457 3e837b2c Alex Williamson
2458 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
2459 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2460 3e837b2c Alex Williamson
                (uint64_t)size);
2461 3e837b2c Alex Williamson
        abort();
2462 3e837b2c Alex Williamson
    }
2463 3e837b2c Alex Williamson
2464 04b16653 Alex Williamson
    return offset;
2465 04b16653 Alex Williamson
}
2466 04b16653 Alex Williamson
2467 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2468 04b16653 Alex Williamson
{
2469 d17b5288 Alex Williamson
    RAMBlock *block;
2470 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2471 d17b5288 Alex Williamson
2472 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2473 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2474 d17b5288 Alex Williamson
2475 d17b5288 Alex Williamson
    return last;
2476 d17b5288 Alex Williamson
}
2477 d17b5288 Alex Williamson
2478 ddb97f1d Jason Baron
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2479 ddb97f1d Jason Baron
{
2480 ddb97f1d Jason Baron
    int ret;
2481 ddb97f1d Jason Baron
    QemuOpts *machine_opts;
2482 ddb97f1d Jason Baron
2483 ddb97f1d Jason Baron
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2484 ddb97f1d Jason Baron
    machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2485 ddb97f1d Jason Baron
    if (machine_opts &&
2486 ddb97f1d Jason Baron
        !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2487 ddb97f1d Jason Baron
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2488 ddb97f1d Jason Baron
        if (ret) {
2489 ddb97f1d Jason Baron
            perror("qemu_madvise");
2490 ddb97f1d Jason Baron
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2491 ddb97f1d Jason Baron
                            "but dump_guest_core=off specified\n");
2492 ddb97f1d Jason Baron
        }
2493 ddb97f1d Jason Baron
    }
2494 ddb97f1d Jason Baron
}
2495 ddb97f1d Jason Baron
2496 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2497 84b89d78 Cam Macdonell
{
2498 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2499 84b89d78 Cam Macdonell
2500 c5705a77 Avi Kivity
    new_block = NULL;
2501 c5705a77 Avi Kivity
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2502 c5705a77 Avi Kivity
        if (block->offset == addr) {
2503 c5705a77 Avi Kivity
            new_block = block;
2504 c5705a77 Avi Kivity
            break;
2505 c5705a77 Avi Kivity
        }
2506 c5705a77 Avi Kivity
    }
2507 c5705a77 Avi Kivity
    assert(new_block);
2508 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
2509 84b89d78 Cam Macdonell
2510 09e5ab63 Anthony Liguori
    if (dev) {
2511 09e5ab63 Anthony Liguori
        char *id = qdev_get_dev_path(dev);
2512 84b89d78 Cam Macdonell
        if (id) {
2513 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2514 7267c094 Anthony Liguori
            g_free(id);
2515 84b89d78 Cam Macdonell
        }
2516 84b89d78 Cam Macdonell
    }
2517 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2518 84b89d78 Cam Macdonell
2519 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2520 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2521 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2522 84b89d78 Cam Macdonell
                    new_block->idstr);
2523 84b89d78 Cam Macdonell
            abort();
2524 84b89d78 Cam Macdonell
        }
2525 84b89d78 Cam Macdonell
    }
2526 c5705a77 Avi Kivity
}
2527 c5705a77 Avi Kivity
2528 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2529 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
2530 c5705a77 Avi Kivity
{
2531 c5705a77 Avi Kivity
    RAMBlock *new_block;
2532 c5705a77 Avi Kivity
2533 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
2534 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
2535 84b89d78 Cam Macdonell
2536 7c637366 Avi Kivity
    new_block->mr = mr;
2537 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2538 6977dfe6 Yoshiaki Tamura
    if (host) {
2539 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2540 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2541 6977dfe6 Yoshiaki Tamura
    } else {
2542 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2543 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2544 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2545 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2546 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2547 e78815a5 Andreas Färber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2548 6977dfe6 Yoshiaki Tamura
            }
2549 c902760f Marcelo Tosatti
#else
2550 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2551 6977dfe6 Yoshiaki Tamura
            exit(1);
2552 c902760f Marcelo Tosatti
#endif
2553 6977dfe6 Yoshiaki Tamura
        } else {
2554 868bb33f Jan Kiszka
            if (xen_enabled()) {
2555 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
2556 fdec9918 Christian Borntraeger
            } else if (kvm_enabled()) {
2557 fdec9918 Christian Borntraeger
                /* some s390/kvm configurations have special constraints */
2558 fdec9918 Christian Borntraeger
                new_block->host = kvm_vmalloc(size);
2559 432d268c Jun Nakajima
            } else {
2560 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2561 432d268c Jun Nakajima
            }
2562 e78815a5 Andreas Färber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2563 6977dfe6 Yoshiaki Tamura
        }
2564 c902760f Marcelo Tosatti
    }
2565 94a6b54f pbrook
    new_block->length = size;
2566 94a6b54f pbrook
2567 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2568 94a6b54f pbrook
2569 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2570 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2571 5fda043f Igor Mitsyanko
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2572 5fda043f Igor Mitsyanko
           0, size >> TARGET_PAGE_BITS);
2573 1720aeee Juan Quintela
    cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
2574 94a6b54f pbrook
2575 ddb97f1d Jason Baron
    qemu_ram_setup_dump(new_block->host, size);
2576 ddb97f1d Jason Baron
2577 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2578 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2579 6f0437e8 Jan Kiszka
2580 94a6b54f pbrook
    return new_block->offset;
2581 94a6b54f pbrook
}
2582 e9a1ab19 bellard
2583 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2584 6977dfe6 Yoshiaki Tamura
{
2585 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
2586 6977dfe6 Yoshiaki Tamura
}
2587 6977dfe6 Yoshiaki Tamura
2588 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2589 1f2e98b6 Alex Williamson
{
2590 1f2e98b6 Alex Williamson
    RAMBlock *block;
2591 1f2e98b6 Alex Williamson
2592 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2593 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2594 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2595 7267c094 Anthony Liguori
            g_free(block);
2596 1f2e98b6 Alex Williamson
            return;
2597 1f2e98b6 Alex Williamson
        }
2598 1f2e98b6 Alex Williamson
    }
2599 1f2e98b6 Alex Williamson
}
2600 1f2e98b6 Alex Williamson
2601 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2602 e9a1ab19 bellard
{
2603 04b16653 Alex Williamson
    RAMBlock *block;
2604 04b16653 Alex Williamson
2605 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2606 04b16653 Alex Williamson
        if (addr == block->offset) {
2607 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2608 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2609 cd19cfa2 Huang Ying
                ;
2610 cd19cfa2 Huang Ying
            } else if (mem_path) {
2611 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2612 04b16653 Alex Williamson
                if (block->fd) {
2613 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2614 04b16653 Alex Williamson
                    close(block->fd);
2615 04b16653 Alex Williamson
                } else {
2616 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2617 04b16653 Alex Williamson
                }
2618 fd28aa13 Jan Kiszka
#else
2619 fd28aa13 Jan Kiszka
                abort();
2620 04b16653 Alex Williamson
#endif
2621 04b16653 Alex Williamson
            } else {
2622 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2623 04b16653 Alex Williamson
                munmap(block->host, block->length);
2624 04b16653 Alex Williamson
#else
2625 868bb33f Jan Kiszka
                if (xen_enabled()) {
2626 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
2627 432d268c Jun Nakajima
                } else {
2628 432d268c Jun Nakajima
                    qemu_vfree(block->host);
2629 432d268c Jun Nakajima
                }
2630 04b16653 Alex Williamson
#endif
2631 04b16653 Alex Williamson
            }
2632 7267c094 Anthony Liguori
            g_free(block);
2633 04b16653 Alex Williamson
            return;
2634 04b16653 Alex Williamson
        }
2635 04b16653 Alex Williamson
    }
2636 04b16653 Alex Williamson
2637 e9a1ab19 bellard
}
2638 e9a1ab19 bellard
2639 cd19cfa2 Huang Ying
#ifndef _WIN32
2640 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2641 cd19cfa2 Huang Ying
{
2642 cd19cfa2 Huang Ying
    RAMBlock *block;
2643 cd19cfa2 Huang Ying
    ram_addr_t offset;
2644 cd19cfa2 Huang Ying
    int flags;
2645 cd19cfa2 Huang Ying
    void *area, *vaddr;
2646 cd19cfa2 Huang Ying
2647 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2648 cd19cfa2 Huang Ying
        offset = addr - block->offset;
2649 cd19cfa2 Huang Ying
        if (offset < block->length) {
2650 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
2651 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2652 cd19cfa2 Huang Ying
                ;
2653 cd19cfa2 Huang Ying
            } else {
2654 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
2655 cd19cfa2 Huang Ying
                munmap(vaddr, length);
2656 cd19cfa2 Huang Ying
                if (mem_path) {
2657 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
2658 cd19cfa2 Huang Ying
                    if (block->fd) {
2659 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
2660 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2661 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
2662 cd19cfa2 Huang Ying
#else
2663 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
2664 cd19cfa2 Huang Ying
#endif
2665 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2666 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
2667 cd19cfa2 Huang Ying
                    } else {
2668 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2669 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2670 cd19cfa2 Huang Ying
                                    flags, -1, 0);
2671 cd19cfa2 Huang Ying
                    }
2672 fd28aa13 Jan Kiszka
#else
2673 fd28aa13 Jan Kiszka
                    abort();
2674 cd19cfa2 Huang Ying
#endif
2675 cd19cfa2 Huang Ying
                } else {
2676 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2677 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2678 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2679 cd19cfa2 Huang Ying
                                flags, -1, 0);
2680 cd19cfa2 Huang Ying
#else
2681 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2682 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2683 cd19cfa2 Huang Ying
                                flags, -1, 0);
2684 cd19cfa2 Huang Ying
#endif
2685 cd19cfa2 Huang Ying
                }
2686 cd19cfa2 Huang Ying
                if (area != vaddr) {
2687 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
2688 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2689 cd19cfa2 Huang Ying
                            length, addr);
2690 cd19cfa2 Huang Ying
                    exit(1);
2691 cd19cfa2 Huang Ying
                }
2692 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2693 ddb97f1d Jason Baron
                qemu_ram_setup_dump(vaddr, length);
2694 cd19cfa2 Huang Ying
            }
2695 cd19cfa2 Huang Ying
            return;
2696 cd19cfa2 Huang Ying
        }
2697 cd19cfa2 Huang Ying
    }
2698 cd19cfa2 Huang Ying
}
2699 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
2700 cd19cfa2 Huang Ying
2701 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2702 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2703 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2704 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2705 5579c7f3 pbrook

2706 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2707 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2708 5579c7f3 pbrook
 */
2709 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2710 dc828ca1 pbrook
{
2711 94a6b54f pbrook
    RAMBlock *block;
2712 94a6b54f pbrook
2713 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2714 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2715 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
2716 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
2717 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
2718 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2719 7d82af38 Vincent Palatin
            }
2720 868bb33f Jan Kiszka
            if (xen_enabled()) {
2721 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
2722 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
2723 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
2724 432d268c Jun Nakajima
                 */
2725 432d268c Jun Nakajima
                if (block->offset == 0) {
2726 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
2727 432d268c Jun Nakajima
                } else if (block->host == NULL) {
2728 e41d7c69 Jan Kiszka
                    block->host =
2729 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
2730 432d268c Jun Nakajima
                }
2731 432d268c Jun Nakajima
            }
2732 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2733 f471a17e Alex Williamson
        }
2734 94a6b54f pbrook
    }
2735 f471a17e Alex Williamson
2736 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2737 f471a17e Alex Williamson
    abort();
2738 f471a17e Alex Williamson
2739 f471a17e Alex Williamson
    return NULL;
2740 dc828ca1 pbrook
}
2741 dc828ca1 pbrook
2742 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2743 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2744 b2e0a138 Michael S. Tsirkin
 */
2745 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
2746 b2e0a138 Michael S. Tsirkin
{
2747 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
2748 b2e0a138 Michael S. Tsirkin
2749 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
2751 868bb33f Jan Kiszka
            if (xen_enabled()) {
2752 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
2753 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
2754 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
2755 432d268c Jun Nakajima
                 */
2756 432d268c Jun Nakajima
                if (block->offset == 0) {
2757 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
2758 432d268c Jun Nakajima
                } else if (block->host == NULL) {
2759 e41d7c69 Jan Kiszka
                    block->host =
2760 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
2761 432d268c Jun Nakajima
                }
2762 432d268c Jun Nakajima
            }
2763 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
2764 b2e0a138 Michael S. Tsirkin
        }
2765 b2e0a138 Michael S. Tsirkin
    }
2766 b2e0a138 Michael S. Tsirkin
2767 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2768 b2e0a138 Michael S. Tsirkin
    abort();
2769 b2e0a138 Michael S. Tsirkin
2770 b2e0a138 Michael S. Tsirkin
    return NULL;
2771 b2e0a138 Michael S. Tsirkin
}
2772 b2e0a138 Michael S. Tsirkin
2773 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2774 38bee5dc Stefano Stabellini
 * but takes a size argument */
2775 8ab934f9 Stefano Stabellini
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
2776 38bee5dc Stefano Stabellini
{
2777 8ab934f9 Stefano Stabellini
    if (*size == 0) {
2778 8ab934f9 Stefano Stabellini
        return NULL;
2779 8ab934f9 Stefano Stabellini
    }
2780 868bb33f Jan Kiszka
    if (xen_enabled()) {
2781 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
2782 868bb33f Jan Kiszka
    } else {
2783 38bee5dc Stefano Stabellini
        RAMBlock *block;
2784 38bee5dc Stefano Stabellini
2785 38bee5dc Stefano Stabellini
        QLIST_FOREACH(block, &ram_list.blocks, next) {
2786 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
2787 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
2788 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
2789 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
2790 38bee5dc Stefano Stabellini
            }
2791 38bee5dc Stefano Stabellini
        }
2792 38bee5dc Stefano Stabellini
2793 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2794 38bee5dc Stefano Stabellini
        abort();
2795 38bee5dc Stefano Stabellini
    }
2796 38bee5dc Stefano Stabellini
}
2797 38bee5dc Stefano Stabellini
2798 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
2799 050a0ddf Anthony PERARD
{
2800 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
2801 050a0ddf Anthony PERARD
}
2802 050a0ddf Anthony PERARD
2803 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2804 5579c7f3 pbrook
{
2805 94a6b54f pbrook
    RAMBlock *block;
2806 94a6b54f pbrook
    uint8_t *host = ptr;
2807 94a6b54f pbrook
2808 868bb33f Jan Kiszka
    if (xen_enabled()) {
2809 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
2810 712c2b41 Stefano Stabellini
        return 0;
2811 712c2b41 Stefano Stabellini
    }
2812 712c2b41 Stefano Stabellini
2813 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2814 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
2815 432d268c Jun Nakajima
        if (block->host == NULL) {
2816 432d268c Jun Nakajima
            continue;
2817 432d268c Jun Nakajima
        }
2818 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2819 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
2820 e890261f Marcelo Tosatti
            return 0;
2821 f471a17e Alex Williamson
        }
2822 94a6b54f pbrook
    }
2823 432d268c Jun Nakajima
2824 e890261f Marcelo Tosatti
    return -1;
2825 e890261f Marcelo Tosatti
}
2826 f471a17e Alex Williamson
2827 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
2828 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
2829 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2830 e890261f Marcelo Tosatti
{
2831 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
2832 f471a17e Alex Williamson
2833 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2834 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2835 e890261f Marcelo Tosatti
        abort();
2836 e890261f Marcelo Tosatti
    }
2837 e890261f Marcelo Tosatti
    return ram_addr;
2838 5579c7f3 pbrook
}
2839 5579c7f3 pbrook
2840 0e0df1e2 Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2841 0e0df1e2 Avi Kivity
                                    unsigned size)
2842 e18231a3 blueswir1
{
2843 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2844 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2845 e18231a3 blueswir1
#endif
2846 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2847 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2848 e18231a3 blueswir1
#endif
2849 e18231a3 blueswir1
    return 0;
2850 e18231a3 blueswir1
}
2851 e18231a3 blueswir1
2852 0e0df1e2 Avi Kivity
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2853 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
2854 e18231a3 blueswir1
{
2855 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2856 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2857 e18231a3 blueswir1
#endif
2858 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2859 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2860 b4f0a316 blueswir1
#endif
2861 33417e70 bellard
}
2862 33417e70 bellard
2863 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
2864 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
2865 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
2866 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2867 0e0df1e2 Avi Kivity
};
2868 e18231a3 blueswir1
2869 0e0df1e2 Avi Kivity
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2870 0e0df1e2 Avi Kivity
                               unsigned size)
2871 e18231a3 blueswir1
{
2872 0e0df1e2 Avi Kivity
    abort();
2873 e18231a3 blueswir1
}
2874 e18231a3 blueswir1
2875 0e0df1e2 Avi Kivity
static void error_mem_write(void *opaque, target_phys_addr_t addr,
2876 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
2877 e18231a3 blueswir1
{
2878 0e0df1e2 Avi Kivity
    abort();
2879 33417e70 bellard
}
2880 33417e70 bellard
2881 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
2882 0e0df1e2 Avi Kivity
    .read = error_mem_read,
2883 0e0df1e2 Avi Kivity
    .write = error_mem_write,
2884 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2885 33417e70 bellard
};
2886 33417e70 bellard
2887 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
2888 0e0df1e2 Avi Kivity
    .read = error_mem_read,
2889 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
2890 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2891 33417e70 bellard
};
2892 33417e70 bellard
2893 0e0df1e2 Avi Kivity
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2894 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
2895 9fa3e853 bellard
{
2896 3a7d929e bellard
    int dirty_flags;
2897 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2898 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2899 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2900 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
2901 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2902 9fa3e853 bellard
#endif
2903 3a7d929e bellard
    }
2904 0e0df1e2 Avi Kivity
    switch (size) {
2905 0e0df1e2 Avi Kivity
    case 1:
2906 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
2907 0e0df1e2 Avi Kivity
        break;
2908 0e0df1e2 Avi Kivity
    case 2:
2909 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
2910 0e0df1e2 Avi Kivity
        break;
2911 0e0df1e2 Avi Kivity
    case 4:
2912 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
2913 0e0df1e2 Avi Kivity
        break;
2914 0e0df1e2 Avi Kivity
    default:
2915 0e0df1e2 Avi Kivity
        abort();
2916 3a7d929e bellard
    }
2917 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2918 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2919 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2920 f23db169 bellard
       flushed */
2921 f23db169 bellard
    if (dirty_flags == 0xff)
2922 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2923 9fa3e853 bellard
}
2924 9fa3e853 bellard
2925 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
2926 0e0df1e2 Avi Kivity
    .read = error_mem_read,
2927 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
2928 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2929 1ccde1cb bellard
};
2930 1ccde1cb bellard
2931 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2932 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2933 0f459d16 pbrook
{
2934 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
2935 06d55cc1 aliguori
    target_ulong pc, cs_base;
2936 06d55cc1 aliguori
    TranslationBlock *tb;
2937 0f459d16 pbrook
    target_ulong vaddr;
2938 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2939 06d55cc1 aliguori
    int cpu_flags;
2940 0f459d16 pbrook
2941 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2942 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2943 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2944 06d55cc1 aliguori
         * current instruction. */
2945 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2946 06d55cc1 aliguori
        return;
2947 06d55cc1 aliguori
    }
2948 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2949 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2950 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2951 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2952 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2953 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2954 6e140f28 aliguori
                env->watchpoint_hit = wp;
2955 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2956 6e140f28 aliguori
                if (!tb) {
2957 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2958 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2959 6e140f28 aliguori
                }
2960 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
2961 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2962 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2963 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2964 488d6577 Max Filippov
                    cpu_loop_exit(env);
2965 6e140f28 aliguori
                } else {
2966 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2967 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2968 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
2969 6e140f28 aliguori
                }
2970 06d55cc1 aliguori
            }
2971 6e140f28 aliguori
        } else {
2972 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2973 0f459d16 pbrook
        }
2974 0f459d16 pbrook
    }
2975 0f459d16 pbrook
}
2976 0f459d16 pbrook
2977 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2978 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2979 6658ffb8 pbrook
   phys routines.  */
2980 1ec9b909 Avi Kivity
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2981 1ec9b909 Avi Kivity
                               unsigned size)
2982 6658ffb8 pbrook
{
2983 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2984 1ec9b909 Avi Kivity
    switch (size) {
2985 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
2986 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
2987 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
2988 1ec9b909 Avi Kivity
    default: abort();
2989 1ec9b909 Avi Kivity
    }
2990 6658ffb8 pbrook
}
2991 6658ffb8 pbrook
2992 1ec9b909 Avi Kivity
static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2993 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
2994 6658ffb8 pbrook
{
2995 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2996 1ec9b909 Avi Kivity
    switch (size) {
2997 67364150 Max Filippov
    case 1:
2998 67364150 Max Filippov
        stb_phys(addr, val);
2999 67364150 Max Filippov
        break;
3000 67364150 Max Filippov
    case 2:
3001 67364150 Max Filippov
        stw_phys(addr, val);
3002 67364150 Max Filippov
        break;
3003 67364150 Max Filippov
    case 4:
3004 67364150 Max Filippov
        stl_phys(addr, val);
3005 67364150 Max Filippov
        break;
3006 1ec9b909 Avi Kivity
    default: abort();
3007 1ec9b909 Avi Kivity
    }
3008 6658ffb8 pbrook
}
3009 6658ffb8 pbrook
3010 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
3011 1ec9b909 Avi Kivity
    .read = watch_mem_read,
3012 1ec9b909 Avi Kivity
    .write = watch_mem_write,
3013 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3014 6658ffb8 pbrook
};
3015 6658ffb8 pbrook
3016 70c68e44 Avi Kivity
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3017 70c68e44 Avi Kivity
                             unsigned len)
3018 db7b5426 blueswir1
{
3019 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3020 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3021 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3022 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3023 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3024 db7b5426 blueswir1
           mmio, len, addr, idx);
3025 db7b5426 blueswir1
#endif
3026 db7b5426 blueswir1
3027 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3028 5312bd8b Avi Kivity
    addr += mmio->base;
3029 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3030 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3031 37ec01d4 Avi Kivity
    return io_mem_read(section->mr, addr, len);
3032 db7b5426 blueswir1
}
3033 db7b5426 blueswir1
3034 70c68e44 Avi Kivity
static void subpage_write(void *opaque, target_phys_addr_t addr,
3035 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
3036 db7b5426 blueswir1
{
3037 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3038 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3039 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3040 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3041 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3042 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
3043 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3044 db7b5426 blueswir1
#endif
3045 f6405247 Richard Henderson
3046 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3047 5312bd8b Avi Kivity
    addr += mmio->base;
3048 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3049 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3050 37ec01d4 Avi Kivity
    io_mem_write(section->mr, addr, value, len);
3051 db7b5426 blueswir1
}
3052 db7b5426 blueswir1
3053 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
3054 70c68e44 Avi Kivity
    .read = subpage_read,
3055 70c68e44 Avi Kivity
    .write = subpage_write,
3056 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3057 db7b5426 blueswir1
};
3058 db7b5426 blueswir1
3059 de712f94 Avi Kivity
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3060 de712f94 Avi Kivity
                                 unsigned size)
3061 56384e8b Andreas Färber
{
3062 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3063 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3064 de712f94 Avi Kivity
    switch (size) {
3065 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
3066 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
3067 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
3068 de712f94 Avi Kivity
    default: abort();
3069 de712f94 Avi Kivity
    }
3070 56384e8b Andreas Färber
}
3071 56384e8b Andreas Färber
3072 de712f94 Avi Kivity
static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3073 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
3074 56384e8b Andreas Färber
{
3075 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3076 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3077 de712f94 Avi Kivity
    switch (size) {
3078 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
3079 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
3080 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
3081 de712f94 Avi Kivity
    default: abort();
3082 de712f94 Avi Kivity
    }
3083 56384e8b Andreas Färber
}
3084 56384e8b Andreas Färber
3085 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
3086 de712f94 Avi Kivity
    .read = subpage_ram_read,
3087 de712f94 Avi Kivity
    .write = subpage_ram_write,
3088 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3089 56384e8b Andreas Färber
};
3090 56384e8b Andreas Färber
3091 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3092 5312bd8b Avi Kivity
                             uint16_t section)
3093 db7b5426 blueswir1
{
3094 db7b5426 blueswir1
    int idx, eidx;
3095 db7b5426 blueswir1
3096 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3097 db7b5426 blueswir1
        return -1;
3098 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3099 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3100 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3101 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3102 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3103 db7b5426 blueswir1
#endif
3104 5312bd8b Avi Kivity
    if (memory_region_is_ram(phys_sections[section].mr)) {
3105 5312bd8b Avi Kivity
        MemoryRegionSection new_section = phys_sections[section];
3106 5312bd8b Avi Kivity
        new_section.mr = &io_mem_subpage_ram;
3107 5312bd8b Avi Kivity
        section = phys_section_add(&new_section);
3108 56384e8b Andreas Färber
    }
3109 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3110 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
3111 db7b5426 blueswir1
    }
3112 db7b5426 blueswir1
3113 db7b5426 blueswir1
    return 0;
3114 db7b5426 blueswir1
}
3115 db7b5426 blueswir1
3116 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base)
3117 db7b5426 blueswir1
{
3118 c227f099 Anthony Liguori
    subpage_t *mmio;
3119 db7b5426 blueswir1
3120 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
3121 1eec614b aliguori
3122 1eec614b aliguori
    mmio->base = base;
3123 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3124 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
3125 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
3126 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3127 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3128 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3129 db7b5426 blueswir1
#endif
3130 0f0cb164 Avi Kivity
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3131 db7b5426 blueswir1
3132 db7b5426 blueswir1
    return mmio;
3133 db7b5426 blueswir1
}
3134 db7b5426 blueswir1
3135 5312bd8b Avi Kivity
static uint16_t dummy_section(MemoryRegion *mr)
3136 5312bd8b Avi Kivity
{
3137 5312bd8b Avi Kivity
    MemoryRegionSection section = {
3138 5312bd8b Avi Kivity
        .mr = mr,
3139 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
3140 5312bd8b Avi Kivity
        .offset_within_region = 0,
3141 5312bd8b Avi Kivity
        .size = UINT64_MAX,
3142 5312bd8b Avi Kivity
    };
3143 5312bd8b Avi Kivity
3144 5312bd8b Avi Kivity
    return phys_section_add(&section);
3145 5312bd8b Avi Kivity
}
3146 5312bd8b Avi Kivity
3147 37ec01d4 Avi Kivity
MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3148 aa102231 Avi Kivity
{
3149 37ec01d4 Avi Kivity
    return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3150 aa102231 Avi Kivity
}
3151 aa102231 Avi Kivity
3152 e9179ce1 Avi Kivity
static void io_mem_init(void)
3153 e9179ce1 Avi Kivity
{
3154 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3155 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3156 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3157 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
3158 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3159 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
3160 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3161 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
3162 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3163 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
3164 e9179ce1 Avi Kivity
}
3165 e9179ce1 Avi Kivity
3166 50c1e149 Avi Kivity
static void core_begin(MemoryListener *listener)
3167 50c1e149 Avi Kivity
{
3168 54688b1e Avi Kivity
    destroy_all_mappings();
3169 5312bd8b Avi Kivity
    phys_sections_clear();
3170 c19e8800 Avi Kivity
    phys_map.ptr = PHYS_MAP_NODE_NIL;
3171 5312bd8b Avi Kivity
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
3172 aa102231 Avi Kivity
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
3173 aa102231 Avi Kivity
    phys_section_rom = dummy_section(&io_mem_rom);
3174 aa102231 Avi Kivity
    phys_section_watch = dummy_section(&io_mem_watch);
3175 50c1e149 Avi Kivity
}
3176 50c1e149 Avi Kivity
3177 50c1e149 Avi Kivity
static void core_commit(MemoryListener *listener)
3178 50c1e149 Avi Kivity
{
3179 9349b4f9 Andreas Färber
    CPUArchState *env;
3180 117712c3 Avi Kivity
3181 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
3182 117712c3 Avi Kivity
       reset the modified entries */
3183 117712c3 Avi Kivity
    /* XXX: slow ! */
3184 117712c3 Avi Kivity
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
3185 117712c3 Avi Kivity
        tlb_flush(env, 1);
3186 117712c3 Avi Kivity
    }
3187 50c1e149 Avi Kivity
}
3188 50c1e149 Avi Kivity
3189 93632747 Avi Kivity
static void core_region_add(MemoryListener *listener,
3190 93632747 Avi Kivity
                            MemoryRegionSection *section)
3191 93632747 Avi Kivity
{
3192 4855d41a Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3193 93632747 Avi Kivity
}
3194 93632747 Avi Kivity
3195 93632747 Avi Kivity
static void core_region_del(MemoryListener *listener,
3196 93632747 Avi Kivity
                            MemoryRegionSection *section)
3197 93632747 Avi Kivity
{
3198 93632747 Avi Kivity
}
3199 93632747 Avi Kivity
3200 50c1e149 Avi Kivity
static void core_region_nop(MemoryListener *listener,
3201 50c1e149 Avi Kivity
                            MemoryRegionSection *section)
3202 50c1e149 Avi Kivity
{
3203 54688b1e Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3204 50c1e149 Avi Kivity
}
3205 50c1e149 Avi Kivity
3206 93632747 Avi Kivity
static void core_log_start(MemoryListener *listener,
3207 93632747 Avi Kivity
                           MemoryRegionSection *section)
3208 93632747 Avi Kivity
{
3209 93632747 Avi Kivity
}
3210 93632747 Avi Kivity
3211 93632747 Avi Kivity
static void core_log_stop(MemoryListener *listener,
3212 93632747 Avi Kivity
                          MemoryRegionSection *section)
3213 93632747 Avi Kivity
{
3214 93632747 Avi Kivity
}
3215 93632747 Avi Kivity
3216 93632747 Avi Kivity
static void core_log_sync(MemoryListener *listener,
3217 93632747 Avi Kivity
                          MemoryRegionSection *section)
3218 93632747 Avi Kivity
{
3219 93632747 Avi Kivity
}
3220 93632747 Avi Kivity
3221 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
3222 93632747 Avi Kivity
{
3223 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(1);
3224 93632747 Avi Kivity
}
3225 93632747 Avi Kivity
3226 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
3227 93632747 Avi Kivity
{
3228 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(0);
3229 93632747 Avi Kivity
}
3230 93632747 Avi Kivity
3231 93632747 Avi Kivity
static void core_eventfd_add(MemoryListener *listener,
3232 93632747 Avi Kivity
                             MemoryRegionSection *section,
3233 753d5e14 Paolo Bonzini
                             bool match_data, uint64_t data, EventNotifier *e)
3234 93632747 Avi Kivity
{
3235 93632747 Avi Kivity
}
3236 93632747 Avi Kivity
3237 93632747 Avi Kivity
static void core_eventfd_del(MemoryListener *listener,
3238 93632747 Avi Kivity
                             MemoryRegionSection *section,
3239 753d5e14 Paolo Bonzini
                             bool match_data, uint64_t data, EventNotifier *e)
3240 93632747 Avi Kivity
{
3241 93632747 Avi Kivity
}
3242 93632747 Avi Kivity
3243 50c1e149 Avi Kivity
static void io_begin(MemoryListener *listener)
3244 50c1e149 Avi Kivity
{
3245 50c1e149 Avi Kivity
}
3246 50c1e149 Avi Kivity
3247 50c1e149 Avi Kivity
static void io_commit(MemoryListener *listener)
3248 50c1e149 Avi Kivity
{
3249 50c1e149 Avi Kivity
}
3250 50c1e149 Avi Kivity
3251 4855d41a Avi Kivity
static void io_region_add(MemoryListener *listener,
3252 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3253 4855d41a Avi Kivity
{
3254 a2d33521 Avi Kivity
    MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3255 a2d33521 Avi Kivity
3256 a2d33521 Avi Kivity
    mrio->mr = section->mr;
3257 a2d33521 Avi Kivity
    mrio->offset = section->offset_within_region;
3258 a2d33521 Avi Kivity
    iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3259 4855d41a Avi Kivity
                 section->offset_within_address_space, section->size);
3260 a2d33521 Avi Kivity
    ioport_register(&mrio->iorange);
3261 4855d41a Avi Kivity
}
3262 4855d41a Avi Kivity
3263 4855d41a Avi Kivity
static void io_region_del(MemoryListener *listener,
3264 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3265 4855d41a Avi Kivity
{
3266 4855d41a Avi Kivity
    isa_unassign_ioport(section->offset_within_address_space, section->size);
3267 4855d41a Avi Kivity
}
3268 4855d41a Avi Kivity
3269 50c1e149 Avi Kivity
static void io_region_nop(MemoryListener *listener,
3270 50c1e149 Avi Kivity
                          MemoryRegionSection *section)
3271 50c1e149 Avi Kivity
{
3272 50c1e149 Avi Kivity
}
3273 50c1e149 Avi Kivity
3274 4855d41a Avi Kivity
static void io_log_start(MemoryListener *listener,
3275 4855d41a Avi Kivity
                         MemoryRegionSection *section)
3276 4855d41a Avi Kivity
{
3277 4855d41a Avi Kivity
}
3278 4855d41a Avi Kivity
3279 4855d41a Avi Kivity
static void io_log_stop(MemoryListener *listener,
3280 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3281 4855d41a Avi Kivity
{
3282 4855d41a Avi Kivity
}
3283 4855d41a Avi Kivity
3284 4855d41a Avi Kivity
static void io_log_sync(MemoryListener *listener,
3285 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3286 4855d41a Avi Kivity
{
3287 4855d41a Avi Kivity
}
3288 4855d41a Avi Kivity
3289 4855d41a Avi Kivity
static void io_log_global_start(MemoryListener *listener)
3290 4855d41a Avi Kivity
{
3291 4855d41a Avi Kivity
}
3292 4855d41a Avi Kivity
3293 4855d41a Avi Kivity
static void io_log_global_stop(MemoryListener *listener)
3294 4855d41a Avi Kivity
{
3295 4855d41a Avi Kivity
}
3296 4855d41a Avi Kivity
3297 4855d41a Avi Kivity
static void io_eventfd_add(MemoryListener *listener,
3298 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3299 753d5e14 Paolo Bonzini
                           bool match_data, uint64_t data, EventNotifier *e)
3300 4855d41a Avi Kivity
{
3301 4855d41a Avi Kivity
}
3302 4855d41a Avi Kivity
3303 4855d41a Avi Kivity
static void io_eventfd_del(MemoryListener *listener,
3304 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3305 753d5e14 Paolo Bonzini
                           bool match_data, uint64_t data, EventNotifier *e)
3306 4855d41a Avi Kivity
{
3307 4855d41a Avi Kivity
}
3308 4855d41a Avi Kivity
3309 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
3310 50c1e149 Avi Kivity
    .begin = core_begin,
3311 50c1e149 Avi Kivity
    .commit = core_commit,
3312 93632747 Avi Kivity
    .region_add = core_region_add,
3313 93632747 Avi Kivity
    .region_del = core_region_del,
3314 50c1e149 Avi Kivity
    .region_nop = core_region_nop,
3315 93632747 Avi Kivity
    .log_start = core_log_start,
3316 93632747 Avi Kivity
    .log_stop = core_log_stop,
3317 93632747 Avi Kivity
    .log_sync = core_log_sync,
3318 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
3319 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
3320 93632747 Avi Kivity
    .eventfd_add = core_eventfd_add,
3321 93632747 Avi Kivity
    .eventfd_del = core_eventfd_del,
3322 93632747 Avi Kivity
    .priority = 0,
3323 93632747 Avi Kivity
};
3324 93632747 Avi Kivity
3325 4855d41a Avi Kivity
static MemoryListener io_memory_listener = {
3326 50c1e149 Avi Kivity
    .begin = io_begin,
3327 50c1e149 Avi Kivity
    .commit = io_commit,
3328 4855d41a Avi Kivity
    .region_add = io_region_add,
3329 4855d41a Avi Kivity
    .region_del = io_region_del,
3330 50c1e149 Avi Kivity
    .region_nop = io_region_nop,
3331 4855d41a Avi Kivity
    .log_start = io_log_start,
3332 4855d41a Avi Kivity
    .log_stop = io_log_stop,
3333 4855d41a Avi Kivity
    .log_sync = io_log_sync,
3334 4855d41a Avi Kivity
    .log_global_start = io_log_global_start,
3335 4855d41a Avi Kivity
    .log_global_stop = io_log_global_stop,
3336 4855d41a Avi Kivity
    .eventfd_add = io_eventfd_add,
3337 4855d41a Avi Kivity
    .eventfd_del = io_eventfd_del,
3338 4855d41a Avi Kivity
    .priority = 0,
3339 4855d41a Avi Kivity
};
3340 4855d41a Avi Kivity
3341 62152b8a Avi Kivity
static void memory_map_init(void)
3342 62152b8a Avi Kivity
{
3343 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
3344 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
3345 62152b8a Avi Kivity
    set_system_memory_map(system_memory);
3346 309cb471 Avi Kivity
3347 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
3348 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
3349 309cb471 Avi Kivity
    set_system_io_map(system_io);
3350 93632747 Avi Kivity
3351 4855d41a Avi Kivity
    memory_listener_register(&core_memory_listener, system_memory);
3352 4855d41a Avi Kivity
    memory_listener_register(&io_memory_listener, system_io);
3353 62152b8a Avi Kivity
}
3354 62152b8a Avi Kivity
3355 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
3356 62152b8a Avi Kivity
{
3357 62152b8a Avi Kivity
    return system_memory;
3358 62152b8a Avi Kivity
}
3359 62152b8a Avi Kivity
3360 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
3361 309cb471 Avi Kivity
{
3362 309cb471 Avi Kivity
    return system_io;
3363 309cb471 Avi Kivity
}
3364 309cb471 Avi Kivity
3365 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3366 e2eef170 pbrook
3367 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3368 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3369 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3370 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3371 13eb76e0 bellard
{
3372 13eb76e0 bellard
    int l, flags;
3373 13eb76e0 bellard
    target_ulong page;
3374 53a5960a pbrook
    void * p;
3375 13eb76e0 bellard
3376 13eb76e0 bellard
    while (len > 0) {
3377 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3378 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3379 13eb76e0 bellard
        if (l > len)
3380 13eb76e0 bellard
            l = len;
3381 13eb76e0 bellard
        flags = page_get_flags(page);
3382 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3383 a68fe89c Paul Brook
            return -1;
3384 13eb76e0 bellard
        if (is_write) {
3385 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3386 a68fe89c Paul Brook
                return -1;
3387 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3388 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3389 a68fe89c Paul Brook
                return -1;
3390 72fb7daa aurel32
            memcpy(p, buf, l);
3391 72fb7daa aurel32
            unlock_user(p, addr, l);
3392 13eb76e0 bellard
        } else {
3393 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3394 a68fe89c Paul Brook
                return -1;
3395 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3396 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3397 a68fe89c Paul Brook
                return -1;
3398 72fb7daa aurel32
            memcpy(buf, p, l);
3399 5b257578 aurel32
            unlock_user(p, addr, 0);
3400 13eb76e0 bellard
        }
3401 13eb76e0 bellard
        len -= l;
3402 13eb76e0 bellard
        buf += l;
3403 13eb76e0 bellard
        addr += l;
3404 13eb76e0 bellard
    }
3405 a68fe89c Paul Brook
    return 0;
3406 13eb76e0 bellard
}
3407 8df1cd07 bellard
3408 13eb76e0 bellard
#else
3409 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3410 13eb76e0 bellard
                            int len, int is_write)
3411 13eb76e0 bellard
{
3412 37ec01d4 Avi Kivity
    int l;
3413 13eb76e0 bellard
    uint8_t *ptr;
3414 13eb76e0 bellard
    uint32_t val;
3415 c227f099 Anthony Liguori
    target_phys_addr_t page;
3416 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3417 3b46e624 ths
3418 13eb76e0 bellard
    while (len > 0) {
3419 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3420 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3421 13eb76e0 bellard
        if (l > len)
3422 13eb76e0 bellard
            l = len;
3423 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3424 3b46e624 ths
3425 13eb76e0 bellard
        if (is_write) {
3426 f3705d53 Avi Kivity
            if (!memory_region_is_ram(section->mr)) {
3427 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3428 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
3429 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3430 6a00d601 bellard
                   potential bugs */
3431 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3432 1c213d19 bellard
                    /* 32 bit write access */
3433 c27004ec bellard
                    val = ldl_p(buf);
3434 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 4);
3435 13eb76e0 bellard
                    l = 4;
3436 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3437 1c213d19 bellard
                    /* 16 bit write access */
3438 c27004ec bellard
                    val = lduw_p(buf);
3439 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 2);
3440 13eb76e0 bellard
                    l = 2;
3441 13eb76e0 bellard
                } else {
3442 1c213d19 bellard
                    /* 8 bit write access */
3443 c27004ec bellard
                    val = ldub_p(buf);
3444 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 1);
3445 13eb76e0 bellard
                    l = 1;
3446 13eb76e0 bellard
                }
3447 f3705d53 Avi Kivity
            } else if (!section->readonly) {
3448 8ca5692d Anthony PERARD
                ram_addr_t addr1;
3449 f3705d53 Avi Kivity
                addr1 = memory_region_get_ram_addr(section->mr)
3450 cc5bea60 Blue Swirl
                    + memory_region_section_addr(section, addr);
3451 13eb76e0 bellard
                /* RAM case */
3452 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3453 13eb76e0 bellard
                memcpy(ptr, buf, l);
3454 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3455 3a7d929e bellard
                    /* invalidate code */
3456 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3457 3a7d929e bellard
                    /* set dirty bit */
3458 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3459 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3460 3a7d929e bellard
                }
3461 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3462 13eb76e0 bellard
            }
3463 13eb76e0 bellard
        } else {
3464 cc5bea60 Blue Swirl
            if (!(memory_region_is_ram(section->mr) ||
3465 cc5bea60 Blue Swirl
                  memory_region_is_romd(section->mr))) {
3466 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3467 13eb76e0 bellard
                /* I/O case */
3468 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
3469 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3470 13eb76e0 bellard
                    /* 32 bit read access */
3471 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 4);
3472 c27004ec bellard
                    stl_p(buf, val);
3473 13eb76e0 bellard
                    l = 4;
3474 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3475 13eb76e0 bellard
                    /* 16 bit read access */
3476 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 2);
3477 c27004ec bellard
                    stw_p(buf, val);
3478 13eb76e0 bellard
                    l = 2;
3479 13eb76e0 bellard
                } else {
3480 1c213d19 bellard
                    /* 8 bit read access */
3481 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 1);
3482 c27004ec bellard
                    stb_p(buf, val);
3483 13eb76e0 bellard
                    l = 1;
3484 13eb76e0 bellard
                }
3485 13eb76e0 bellard
            } else {
3486 13eb76e0 bellard
                /* RAM case */
3487 0a1b357f Anthony PERARD
                ptr = qemu_get_ram_ptr(section->mr->ram_addr
3488 cc5bea60 Blue Swirl
                                       + memory_region_section_addr(section,
3489 cc5bea60 Blue Swirl
                                                                    addr));
3490 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
3491 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3492 13eb76e0 bellard
            }
3493 13eb76e0 bellard
        }
3494 13eb76e0 bellard
        len -= l;
3495 13eb76e0 bellard
        buf += l;
3496 13eb76e0 bellard
        addr += l;
3497 13eb76e0 bellard
    }
3498 13eb76e0 bellard
}
3499 8df1cd07 bellard
3500 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3501 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3502 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3503 d0ecd2aa bellard
{
3504 d0ecd2aa bellard
    int l;
3505 d0ecd2aa bellard
    uint8_t *ptr;
3506 c227f099 Anthony Liguori
    target_phys_addr_t page;
3507 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3508 3b46e624 ths
3509 d0ecd2aa bellard
    while (len > 0) {
3510 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3511 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3512 d0ecd2aa bellard
        if (l > len)
3513 d0ecd2aa bellard
            l = len;
3514 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3515 3b46e624 ths
3516 cc5bea60 Blue Swirl
        if (!(memory_region_is_ram(section->mr) ||
3517 cc5bea60 Blue Swirl
              memory_region_is_romd(section->mr))) {
3518 d0ecd2aa bellard
            /* do nothing */
3519 d0ecd2aa bellard
        } else {
3520 d0ecd2aa bellard
            unsigned long addr1;
3521 f3705d53 Avi Kivity
            addr1 = memory_region_get_ram_addr(section->mr)
3522 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
3523 d0ecd2aa bellard
            /* ROM/RAM case */
3524 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3525 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3526 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3527 d0ecd2aa bellard
        }
3528 d0ecd2aa bellard
        len -= l;
3529 d0ecd2aa bellard
        buf += l;
3530 d0ecd2aa bellard
        addr += l;
3531 d0ecd2aa bellard
    }
3532 d0ecd2aa bellard
}
3533 d0ecd2aa bellard
3534 6d16c2f8 aliguori
typedef struct {
3535 6d16c2f8 aliguori
    void *buffer;
3536 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3537 c227f099 Anthony Liguori
    target_phys_addr_t len;
3538 6d16c2f8 aliguori
} BounceBuffer;
3539 6d16c2f8 aliguori
3540 6d16c2f8 aliguori
static BounceBuffer bounce;
3541 6d16c2f8 aliguori
3542 ba223c29 aliguori
typedef struct MapClient {
3543 ba223c29 aliguori
    void *opaque;
3544 ba223c29 aliguori
    void (*callback)(void *opaque);
3545 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3546 ba223c29 aliguori
} MapClient;
3547 ba223c29 aliguori
3548 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3549 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3550 ba223c29 aliguori
3551 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3552 ba223c29 aliguori
{
3553 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
3554 ba223c29 aliguori
3555 ba223c29 aliguori
    client->opaque = opaque;
3556 ba223c29 aliguori
    client->callback = callback;
3557 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3558 ba223c29 aliguori
    return client;
3559 ba223c29 aliguori
}
3560 ba223c29 aliguori
3561 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3562 ba223c29 aliguori
{
3563 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3564 ba223c29 aliguori
3565 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3566 7267c094 Anthony Liguori
    g_free(client);
3567 ba223c29 aliguori
}
3568 ba223c29 aliguori
3569 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3570 ba223c29 aliguori
{
3571 ba223c29 aliguori
    MapClient *client;
3572 ba223c29 aliguori
3573 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3574 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3575 ba223c29 aliguori
        client->callback(client->opaque);
3576 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3577 ba223c29 aliguori
    }
3578 ba223c29 aliguori
}
3579 ba223c29 aliguori
3580 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3581 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3582 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3583 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3584 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3585 ba223c29 aliguori
 * likely to succeed.
3586 6d16c2f8 aliguori
 */
3587 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3588 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3589 6d16c2f8 aliguori
                              int is_write)
3590 6d16c2f8 aliguori
{
3591 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3592 38bee5dc Stefano Stabellini
    target_phys_addr_t todo = 0;
3593 6d16c2f8 aliguori
    int l;
3594 c227f099 Anthony Liguori
    target_phys_addr_t page;
3595 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3596 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
3597 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
3598 8ab934f9 Stefano Stabellini
    void *ret;
3599 6d16c2f8 aliguori
3600 6d16c2f8 aliguori
    while (len > 0) {
3601 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3602 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3603 6d16c2f8 aliguori
        if (l > len)
3604 6d16c2f8 aliguori
            l = len;
3605 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3606 6d16c2f8 aliguori
3607 f3705d53 Avi Kivity
        if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3608 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
3609 6d16c2f8 aliguori
                break;
3610 6d16c2f8 aliguori
            }
3611 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3612 6d16c2f8 aliguori
            bounce.addr = addr;
3613 6d16c2f8 aliguori
            bounce.len = l;
3614 6d16c2f8 aliguori
            if (!is_write) {
3615 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
3616 6d16c2f8 aliguori
            }
3617 38bee5dc Stefano Stabellini
3618 38bee5dc Stefano Stabellini
            *plen = l;
3619 38bee5dc Stefano Stabellini
            return bounce.buffer;
3620 6d16c2f8 aliguori
        }
3621 8ab934f9 Stefano Stabellini
        if (!todo) {
3622 f3705d53 Avi Kivity
            raddr = memory_region_get_ram_addr(section->mr)
3623 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
3624 8ab934f9 Stefano Stabellini
        }
3625 6d16c2f8 aliguori
3626 6d16c2f8 aliguori
        len -= l;
3627 6d16c2f8 aliguori
        addr += l;
3628 38bee5dc Stefano Stabellini
        todo += l;
3629 6d16c2f8 aliguori
    }
3630 8ab934f9 Stefano Stabellini
    rlen = todo;
3631 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
3632 8ab934f9 Stefano Stabellini
    *plen = rlen;
3633 8ab934f9 Stefano Stabellini
    return ret;
3634 6d16c2f8 aliguori
}
3635 6d16c2f8 aliguori
3636 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3637 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3638 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3639 6d16c2f8 aliguori
 */
3640 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3641 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3642 6d16c2f8 aliguori
{
3643 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3644 6d16c2f8 aliguori
        if (is_write) {
3645 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3646 6d16c2f8 aliguori
            while (access_len) {
3647 6d16c2f8 aliguori
                unsigned l;
3648 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3649 6d16c2f8 aliguori
                if (l > access_len)
3650 6d16c2f8 aliguori
                    l = access_len;
3651 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3652 6d16c2f8 aliguori
                    /* invalidate code */
3653 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3654 6d16c2f8 aliguori
                    /* set dirty bit */
3655 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3656 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3657 6d16c2f8 aliguori
                }
3658 6d16c2f8 aliguori
                addr1 += l;
3659 6d16c2f8 aliguori
                access_len -= l;
3660 6d16c2f8 aliguori
            }
3661 6d16c2f8 aliguori
        }
3662 868bb33f Jan Kiszka
        if (xen_enabled()) {
3663 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
3664 050a0ddf Anthony PERARD
        }
3665 6d16c2f8 aliguori
        return;
3666 6d16c2f8 aliguori
    }
3667 6d16c2f8 aliguori
    if (is_write) {
3668 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3669 6d16c2f8 aliguori
    }
3670 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3671 6d16c2f8 aliguori
    bounce.buffer = NULL;
3672 ba223c29 aliguori
    cpu_notify_map_clients();
3673 6d16c2f8 aliguori
}
3674 d0ecd2aa bellard
3675 8df1cd07 bellard
/* warning: addr must be aligned */
3676 1e78bcc1 Alexander Graf
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3677 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3678 8df1cd07 bellard
{
3679 8df1cd07 bellard
    uint8_t *ptr;
3680 8df1cd07 bellard
    uint32_t val;
3681 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3682 8df1cd07 bellard
3683 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3684 3b46e624 ths
3685 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3686 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3687 8df1cd07 bellard
        /* I/O case */
3688 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3689 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
3690 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3691 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3692 1e78bcc1 Alexander Graf
            val = bswap32(val);
3693 1e78bcc1 Alexander Graf
        }
3694 1e78bcc1 Alexander Graf
#else
3695 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3696 1e78bcc1 Alexander Graf
            val = bswap32(val);
3697 1e78bcc1 Alexander Graf
        }
3698 1e78bcc1 Alexander Graf
#endif
3699 8df1cd07 bellard
    } else {
3700 8df1cd07 bellard
        /* RAM case */
3701 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3702 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3703 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3704 1e78bcc1 Alexander Graf
        switch (endian) {
3705 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3706 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
3707 1e78bcc1 Alexander Graf
            break;
3708 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3709 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
3710 1e78bcc1 Alexander Graf
            break;
3711 1e78bcc1 Alexander Graf
        default:
3712 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
3713 1e78bcc1 Alexander Graf
            break;
3714 1e78bcc1 Alexander Graf
        }
3715 8df1cd07 bellard
    }
3716 8df1cd07 bellard
    return val;
3717 8df1cd07 bellard
}
3718 8df1cd07 bellard
3719 1e78bcc1 Alexander Graf
uint32_t ldl_phys(target_phys_addr_t addr)
3720 1e78bcc1 Alexander Graf
{
3721 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3722 1e78bcc1 Alexander Graf
}
3723 1e78bcc1 Alexander Graf
3724 1e78bcc1 Alexander Graf
uint32_t ldl_le_phys(target_phys_addr_t addr)
3725 1e78bcc1 Alexander Graf
{
3726 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3727 1e78bcc1 Alexander Graf
}
3728 1e78bcc1 Alexander Graf
3729 1e78bcc1 Alexander Graf
uint32_t ldl_be_phys(target_phys_addr_t addr)
3730 1e78bcc1 Alexander Graf
{
3731 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3732 1e78bcc1 Alexander Graf
}
3733 1e78bcc1 Alexander Graf
3734 84b7b8e7 bellard
/* warning: addr must be aligned */
3735 1e78bcc1 Alexander Graf
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3736 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3737 84b7b8e7 bellard
{
3738 84b7b8e7 bellard
    uint8_t *ptr;
3739 84b7b8e7 bellard
    uint64_t val;
3740 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3741 84b7b8e7 bellard
3742 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3743 3b46e624 ths
3744 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3745 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3746 84b7b8e7 bellard
        /* I/O case */
3747 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3748 1e78bcc1 Alexander Graf
3749 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
3750 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
3751 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3752 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4) << 32;
3753 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4);
3754 84b7b8e7 bellard
#else
3755 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
3756 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4) << 32;
3757 84b7b8e7 bellard
#endif
3758 84b7b8e7 bellard
    } else {
3759 84b7b8e7 bellard
        /* RAM case */
3760 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3761 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3762 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3763 1e78bcc1 Alexander Graf
        switch (endian) {
3764 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3765 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
3766 1e78bcc1 Alexander Graf
            break;
3767 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3768 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
3769 1e78bcc1 Alexander Graf
            break;
3770 1e78bcc1 Alexander Graf
        default:
3771 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
3772 1e78bcc1 Alexander Graf
            break;
3773 1e78bcc1 Alexander Graf
        }
3774 84b7b8e7 bellard
    }
3775 84b7b8e7 bellard
    return val;
3776 84b7b8e7 bellard
}
3777 84b7b8e7 bellard
3778 1e78bcc1 Alexander Graf
uint64_t ldq_phys(target_phys_addr_t addr)
3779 1e78bcc1 Alexander Graf
{
3780 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3781 1e78bcc1 Alexander Graf
}
3782 1e78bcc1 Alexander Graf
3783 1e78bcc1 Alexander Graf
uint64_t ldq_le_phys(target_phys_addr_t addr)
3784 1e78bcc1 Alexander Graf
{
3785 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3786 1e78bcc1 Alexander Graf
}
3787 1e78bcc1 Alexander Graf
3788 1e78bcc1 Alexander Graf
uint64_t ldq_be_phys(target_phys_addr_t addr)
3789 1e78bcc1 Alexander Graf
{
3790 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3791 1e78bcc1 Alexander Graf
}
3792 1e78bcc1 Alexander Graf
3793 aab33094 bellard
/* XXX: optimize */
3794 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3795 aab33094 bellard
{
3796 aab33094 bellard
    uint8_t val;
3797 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3798 aab33094 bellard
    return val;
3799 aab33094 bellard
}
3800 aab33094 bellard
3801 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3802 1e78bcc1 Alexander Graf
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3803 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
3804 aab33094 bellard
{
3805 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3806 733f0b02 Michael S. Tsirkin
    uint64_t val;
3807 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3808 733f0b02 Michael S. Tsirkin
3809 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3810 733f0b02 Michael S. Tsirkin
3811 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3812 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3813 733f0b02 Michael S. Tsirkin
        /* I/O case */
3814 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3815 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 2);
3816 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3817 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3818 1e78bcc1 Alexander Graf
            val = bswap16(val);
3819 1e78bcc1 Alexander Graf
        }
3820 1e78bcc1 Alexander Graf
#else
3821 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3822 1e78bcc1 Alexander Graf
            val = bswap16(val);
3823 1e78bcc1 Alexander Graf
        }
3824 1e78bcc1 Alexander Graf
#endif
3825 733f0b02 Michael S. Tsirkin
    } else {
3826 733f0b02 Michael S. Tsirkin
        /* RAM case */
3827 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3828 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3829 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3830 1e78bcc1 Alexander Graf
        switch (endian) {
3831 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3832 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
3833 1e78bcc1 Alexander Graf
            break;
3834 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3835 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
3836 1e78bcc1 Alexander Graf
            break;
3837 1e78bcc1 Alexander Graf
        default:
3838 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
3839 1e78bcc1 Alexander Graf
            break;
3840 1e78bcc1 Alexander Graf
        }
3841 733f0b02 Michael S. Tsirkin
    }
3842 733f0b02 Michael S. Tsirkin
    return val;
3843 aab33094 bellard
}
3844 aab33094 bellard
3845 1e78bcc1 Alexander Graf
uint32_t lduw_phys(target_phys_addr_t addr)
3846 1e78bcc1 Alexander Graf
{
3847 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3848 1e78bcc1 Alexander Graf
}
3849 1e78bcc1 Alexander Graf
3850 1e78bcc1 Alexander Graf
uint32_t lduw_le_phys(target_phys_addr_t addr)
3851 1e78bcc1 Alexander Graf
{
3852 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3853 1e78bcc1 Alexander Graf
}
3854 1e78bcc1 Alexander Graf
3855 1e78bcc1 Alexander Graf
uint32_t lduw_be_phys(target_phys_addr_t addr)
3856 1e78bcc1 Alexander Graf
{
3857 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3858 1e78bcc1 Alexander Graf
}
3859 1e78bcc1 Alexander Graf
3860 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3861 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3862 8df1cd07 bellard
   bits are used to track modified PTEs */
3863 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3864 8df1cd07 bellard
{
3865 8df1cd07 bellard
    uint8_t *ptr;
3866 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3867 8df1cd07 bellard
3868 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3869 3b46e624 ths
3870 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
3871 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3872 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
3873 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
3874 06ef3525 Avi Kivity
        }
3875 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
3876 8df1cd07 bellard
    } else {
3877 f3705d53 Avi Kivity
        unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
3878 06ef3525 Avi Kivity
                               & TARGET_PAGE_MASK)
3879 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
3880 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3881 8df1cd07 bellard
        stl_p(ptr, val);
3882 74576198 aliguori
3883 74576198 aliguori
        if (unlikely(in_migration)) {
3884 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3885 74576198 aliguori
                /* invalidate code */
3886 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3887 74576198 aliguori
                /* set dirty bit */
3888 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3889 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3890 74576198 aliguori
            }
3891 74576198 aliguori
        }
3892 8df1cd07 bellard
    }
3893 8df1cd07 bellard
}
3894 8df1cd07 bellard
3895 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3896 bc98a7ef j_mayer
{
3897 bc98a7ef j_mayer
    uint8_t *ptr;
3898 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3899 bc98a7ef j_mayer
3900 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3901 3b46e624 ths
3902 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
3903 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3904 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
3905 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
3906 06ef3525 Avi Kivity
        }
3907 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3908 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val >> 32, 4);
3909 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3910 bc98a7ef j_mayer
#else
3911 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, (uint32_t)val, 4);
3912 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, val >> 32, 4);
3913 bc98a7ef j_mayer
#endif
3914 bc98a7ef j_mayer
    } else {
3915 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3916 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3917 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3918 bc98a7ef j_mayer
        stq_p(ptr, val);
3919 bc98a7ef j_mayer
    }
3920 bc98a7ef j_mayer
}
3921 bc98a7ef j_mayer
3922 8df1cd07 bellard
/* warning: addr must be aligned */
3923 1e78bcc1 Alexander Graf
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3924 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
3925 8df1cd07 bellard
{
3926 8df1cd07 bellard
    uint8_t *ptr;
3927 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3928 8df1cd07 bellard
3929 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3930 3b46e624 ths
3931 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
3932 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3933 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
3934 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
3935 06ef3525 Avi Kivity
        }
3936 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3937 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3938 1e78bcc1 Alexander Graf
            val = bswap32(val);
3939 1e78bcc1 Alexander Graf
        }
3940 1e78bcc1 Alexander Graf
#else
3941 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3942 1e78bcc1 Alexander Graf
            val = bswap32(val);
3943 1e78bcc1 Alexander Graf
        }
3944 1e78bcc1 Alexander Graf
#endif
3945 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
3946 8df1cd07 bellard
    } else {
3947 8df1cd07 bellard
        unsigned long addr1;
3948 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3949 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
3950 8df1cd07 bellard
        /* RAM case */
3951 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3952 1e78bcc1 Alexander Graf
        switch (endian) {
3953 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3954 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
3955 1e78bcc1 Alexander Graf
            break;
3956 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3957 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
3958 1e78bcc1 Alexander Graf
            break;
3959 1e78bcc1 Alexander Graf
        default:
3960 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
3961 1e78bcc1 Alexander Graf
            break;
3962 1e78bcc1 Alexander Graf
        }
3963 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3964 3a7d929e bellard
            /* invalidate code */
3965 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3966 3a7d929e bellard
            /* set dirty bit */
3967 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3968 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3969 3a7d929e bellard
        }
3970 8df1cd07 bellard
    }
3971 8df1cd07 bellard
}
3972 8df1cd07 bellard
3973 1e78bcc1 Alexander Graf
void stl_phys(target_phys_addr_t addr, uint32_t val)
3974 1e78bcc1 Alexander Graf
{
3975 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3976 1e78bcc1 Alexander Graf
}
3977 1e78bcc1 Alexander Graf
3978 1e78bcc1 Alexander Graf
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3979 1e78bcc1 Alexander Graf
{
3980 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3981 1e78bcc1 Alexander Graf
}
3982 1e78bcc1 Alexander Graf
3983 1e78bcc1 Alexander Graf
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3984 1e78bcc1 Alexander Graf
{
3985 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3986 1e78bcc1 Alexander Graf
}
3987 1e78bcc1 Alexander Graf
3988 aab33094 bellard
/* XXX: optimize */
3989 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3990 aab33094 bellard
{
3991 aab33094 bellard
    uint8_t v = val;
3992 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3993 aab33094 bellard
}
3994 aab33094 bellard
3995 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3996 1e78bcc1 Alexander Graf
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3997 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
3998 aab33094 bellard
{
3999 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4000 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4001 733f0b02 Michael S. Tsirkin
4002 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4003 733f0b02 Michael S. Tsirkin
4004 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4005 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
4006 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4007 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4008 06ef3525 Avi Kivity
        }
4009 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4010 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4011 1e78bcc1 Alexander Graf
            val = bswap16(val);
4012 1e78bcc1 Alexander Graf
        }
4013 1e78bcc1 Alexander Graf
#else
4014 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4015 1e78bcc1 Alexander Graf
            val = bswap16(val);
4016 1e78bcc1 Alexander Graf
        }
4017 1e78bcc1 Alexander Graf
#endif
4018 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 2);
4019 733f0b02 Michael S. Tsirkin
    } else {
4020 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4021 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4022 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
4023 733f0b02 Michael S. Tsirkin
        /* RAM case */
4024 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4025 1e78bcc1 Alexander Graf
        switch (endian) {
4026 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4027 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
4028 1e78bcc1 Alexander Graf
            break;
4029 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4030 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
4031 1e78bcc1 Alexander Graf
            break;
4032 1e78bcc1 Alexander Graf
        default:
4033 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
4034 1e78bcc1 Alexander Graf
            break;
4035 1e78bcc1 Alexander Graf
        }
4036 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4037 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4038 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4039 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4040 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4041 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4042 733f0b02 Michael S. Tsirkin
        }
4043 733f0b02 Michael S. Tsirkin
    }
4044 aab33094 bellard
}
4045 aab33094 bellard
4046 1e78bcc1 Alexander Graf
void stw_phys(target_phys_addr_t addr, uint32_t val)
4047 1e78bcc1 Alexander Graf
{
4048 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4049 1e78bcc1 Alexander Graf
}
4050 1e78bcc1 Alexander Graf
4051 1e78bcc1 Alexander Graf
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4052 1e78bcc1 Alexander Graf
{
4053 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4054 1e78bcc1 Alexander Graf
}
4055 1e78bcc1 Alexander Graf
4056 1e78bcc1 Alexander Graf
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4057 1e78bcc1 Alexander Graf
{
4058 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4059 1e78bcc1 Alexander Graf
}
4060 1e78bcc1 Alexander Graf
4061 aab33094 bellard
/* XXX: optimize */
4062 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4063 aab33094 bellard
{
4064 aab33094 bellard
    val = tswap64(val);
4065 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4066 aab33094 bellard
}
4067 aab33094 bellard
4068 1e78bcc1 Alexander Graf
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4069 1e78bcc1 Alexander Graf
{
4070 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
4071 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4072 1e78bcc1 Alexander Graf
}
4073 1e78bcc1 Alexander Graf
4074 1e78bcc1 Alexander Graf
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4075 1e78bcc1 Alexander Graf
{
4076 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
4077 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4078 1e78bcc1 Alexander Graf
}
4079 1e78bcc1 Alexander Graf
4080 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4081 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4082 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4083 13eb76e0 bellard
{
4084 13eb76e0 bellard
    int l;
4085 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4086 9b3c35e0 j_mayer
    target_ulong page;
4087 13eb76e0 bellard
4088 13eb76e0 bellard
    while (len > 0) {
4089 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4090 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4091 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4092 13eb76e0 bellard
        if (phys_addr == -1)
4093 13eb76e0 bellard
            return -1;
4094 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4095 13eb76e0 bellard
        if (l > len)
4096 13eb76e0 bellard
            l = len;
4097 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4098 5e2972fd aliguori
        if (is_write)
4099 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4100 5e2972fd aliguori
        else
4101 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4102 13eb76e0 bellard
        len -= l;
4103 13eb76e0 bellard
        buf += l;
4104 13eb76e0 bellard
        addr += l;
4105 13eb76e0 bellard
    }
4106 13eb76e0 bellard
    return 0;
4107 13eb76e0 bellard
}
4108 a68fe89c Paul Brook
#endif
4109 13eb76e0 bellard
4110 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4111 2e70f6ef pbrook
   must be at the end of the TB */
4112 20503968 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4113 2e70f6ef pbrook
{
4114 2e70f6ef pbrook
    TranslationBlock *tb;
4115 2e70f6ef pbrook
    uint32_t n, cflags;
4116 2e70f6ef pbrook
    target_ulong pc, cs_base;
4117 2e70f6ef pbrook
    uint64_t flags;
4118 2e70f6ef pbrook
4119 20503968 Blue Swirl
    tb = tb_find_pc(retaddr);
4120 2e70f6ef pbrook
    if (!tb) {
4121 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4122 20503968 Blue Swirl
                  (void *)retaddr);
4123 2e70f6ef pbrook
    }
4124 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4125 20503968 Blue Swirl
    cpu_restore_state(tb, env, retaddr);
4126 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4127 bf20dc07 ths
       occurred.  */
4128 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4129 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4130 2e70f6ef pbrook
    n++;
4131 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4132 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4133 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4134 2e70f6ef pbrook
       branch.  */
4135 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4136 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4137 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4138 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4139 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4140 2e70f6ef pbrook
    }
4141 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4142 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4143 2e70f6ef pbrook
            && n > 1) {
4144 2e70f6ef pbrook
        env->pc -= 2;
4145 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4146 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4147 2e70f6ef pbrook
    }
4148 2e70f6ef pbrook
#endif
4149 2e70f6ef pbrook
    /* This should never happen.  */
4150 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4151 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4152 2e70f6ef pbrook
4153 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4154 2e70f6ef pbrook
    pc = tb->pc;
4155 2e70f6ef pbrook
    cs_base = tb->cs_base;
4156 2e70f6ef pbrook
    flags = tb->flags;
4157 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4158 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4159 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4160 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4161 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4162 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4163 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4164 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4165 2e70f6ef pbrook
       second new TB.  */
4166 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4167 2e70f6ef pbrook
}
4168 2e70f6ef pbrook
4169 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4170 b3755a91 Paul Brook
4171 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4172 e3db7226 bellard
{
4173 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4174 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4175 e3db7226 bellard
    TranslationBlock *tb;
4176 3b46e624 ths
4177 e3db7226 bellard
    target_code_size = 0;
4178 e3db7226 bellard
    max_target_code_size = 0;
4179 e3db7226 bellard
    cross_page = 0;
4180 e3db7226 bellard
    direct_jmp_count = 0;
4181 e3db7226 bellard
    direct_jmp2_count = 0;
4182 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4183 e3db7226 bellard
        tb = &tbs[i];
4184 e3db7226 bellard
        target_code_size += tb->size;
4185 e3db7226 bellard
        if (tb->size > max_target_code_size)
4186 e3db7226 bellard
            max_target_code_size = tb->size;
4187 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4188 e3db7226 bellard
            cross_page++;
4189 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4190 e3db7226 bellard
            direct_jmp_count++;
4191 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4192 e3db7226 bellard
                direct_jmp2_count++;
4193 e3db7226 bellard
            }
4194 e3db7226 bellard
        }
4195 e3db7226 bellard
    }
4196 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4197 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4198 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4199 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4200 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4201 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4202 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4203 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4204 e3db7226 bellard
                max_target_code_size);
4205 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4206 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4207 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4208 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4209 5fafdf24 ths
            cross_page,
4210 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4211 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4212 5fafdf24 ths
                direct_jmp_count,
4213 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4214 e3db7226 bellard
                direct_jmp2_count,
4215 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4216 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4217 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4218 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4219 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4220 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4221 e3db7226 bellard
}
4222 e3db7226 bellard
4223 82afa586 Benjamin Herrenschmidt
/*
4224 82afa586 Benjamin Herrenschmidt
 * A helper function for the _utterly broken_ virtio device model to find out if
4225 82afa586 Benjamin Herrenschmidt
 * it's running on a big endian machine. Don't do this at home kids!
4226 82afa586 Benjamin Herrenschmidt
 */
4227 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void);
4228 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void)
4229 82afa586 Benjamin Herrenschmidt
{
4230 82afa586 Benjamin Herrenschmidt
#if defined(TARGET_WORDS_BIGENDIAN)
4231 82afa586 Benjamin Herrenschmidt
    return true;
4232 82afa586 Benjamin Herrenschmidt
#else
4233 82afa586 Benjamin Herrenschmidt
    return false;
4234 82afa586 Benjamin Herrenschmidt
#endif
4235 82afa586 Benjamin Herrenschmidt
}
4236 82afa586 Benjamin Herrenschmidt
4237 61382a50 bellard
#endif
4238 76f35538 Wen Congyang
4239 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
4240 76f35538 Wen Congyang
bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4241 76f35538 Wen Congyang
{
4242 76f35538 Wen Congyang
    MemoryRegionSection *section;
4243 76f35538 Wen Congyang
4244 76f35538 Wen Congyang
    section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4245 76f35538 Wen Congyang
4246 76f35538 Wen Congyang
    return !(memory_region_is_ram(section->mr) ||
4247 76f35538 Wen Congyang
             memory_region_is_romd(section->mr));
4248 76f35538 Wen Congyang
}
4249 76f35538 Wen Congyang
#endif