Statistics
| Branch: | Revision:

root / exec.c @ 09f1bbcd

History | View | Annotate | Download (125.7 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 74576198 aliguori
#include "osdep.h"
33 7ba1e619 aliguori
#include "kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 62152b8a Avi Kivity
#include "memory.h"
37 62152b8a Avi Kivity
#include "exec-memory.h"
38 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
39 53a5960a pbrook
#include <qemu.h>
40 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 f01576f1 Juergen Lock
#include <sys/param.h>
42 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
43 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
44 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 f01576f1 Juergen Lock
#include <sys/time.h>
46 f01576f1 Juergen Lock
#include <sys/proc.h>
47 f01576f1 Juergen Lock
#include <machine/profile.h>
48 f01576f1 Juergen Lock
#define _KERNEL
49 f01576f1 Juergen Lock
#include <sys/user.h>
50 f01576f1 Juergen Lock
#undef _KERNEL
51 f01576f1 Juergen Lock
#undef sigqueue
52 f01576f1 Juergen Lock
#include <libutil.h>
53 f01576f1 Juergen Lock
#endif
54 f01576f1 Juergen Lock
#endif
55 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
56 432d268c Jun Nakajima
#include "xen-mapcache.h"
57 6506e4f9 Stefano Stabellini
#include "trace.h"
58 53a5960a pbrook
#endif
59 54936004 bellard
60 0cac1b66 Blue Swirl
#include "cputlb.h"
61 0cac1b66 Blue Swirl
62 67d95c15 Avi Kivity
#define WANT_EXEC_OBSOLETE
63 67d95c15 Avi Kivity
#include "exec-obsolete.h"
64 67d95c15 Avi Kivity
65 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
66 66e85a21 bellard
//#define DEBUG_FLUSH
67 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
68 fd6ce8f6 bellard
69 fd6ce8f6 bellard
/* make various TB consistency checks */
70 5fafdf24 ths
//#define DEBUG_TB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 6840981d Stefan Weil
#elif defined(_WIN32) && !defined(_WIN64)
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 74576198 aliguori
static int in_migration;
114 94a6b54f pbrook
115 85d59fef Paolo Bonzini
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
116 62152b8a Avi Kivity
117 62152b8a Avi Kivity
static MemoryRegion *system_memory;
118 309cb471 Avi Kivity
static MemoryRegion *system_io;
119 62152b8a Avi Kivity
120 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
122 0e0df1e2 Avi Kivity
123 e2eef170 pbrook
#endif
124 9fa3e853 bellard
125 9349b4f9 Andreas Färber
CPUArchState *first_cpu;
126 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
127 6a00d601 bellard
   cpu_exec() */
128 9349b4f9 Andreas Färber
DEFINE_TLS(CPUArchState *,cpu_single_env);
129 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
130 bf20dc07 ths
   1 = Precise instruction counting.
131 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
132 2e70f6ef pbrook
int use_icount = 0;
133 6a00d601 bellard
134 54936004 bellard
typedef struct PageDesc {
135 92e873b9 bellard
    /* list of TBs intersecting this ram page */
136 fd6ce8f6 bellard
    TranslationBlock *first_tb;
137 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
138 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
139 9fa3e853 bellard
    unsigned int code_write_count;
140 9fa3e853 bellard
    uint8_t *code_bitmap;
141 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
142 9fa3e853 bellard
    unsigned long flags;
143 9fa3e853 bellard
#endif
144 54936004 bellard
} PageDesc;
145 54936004 bellard
146 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
147 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
148 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
149 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
151 41c1b1c9 Paul Brook
#else
152 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
153 41c1b1c9 Paul Brook
#endif
154 bedb69ea j_mayer
#else
155 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156 bedb69ea j_mayer
#endif
157 54936004 bellard
158 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
159 5cd2c5b6 Richard Henderson
#define L2_BITS 10
160 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
161 54936004 bellard
162 3eef53df Avi Kivity
#define P_L2_LEVELS \
163 3eef53df Avi Kivity
    (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164 3eef53df Avi Kivity
165 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
167 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 5cd2c5b6 Richard Henderson
169 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
171 5cd2c5b6 Richard Henderson
#else
172 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
173 5cd2c5b6 Richard Henderson
#endif
174 5cd2c5b6 Richard Henderson
175 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 c6d50674 Stefan Weil
uintptr_t qemu_real_host_page_size;
180 c6d50674 Stefan Weil
uintptr_t qemu_host_page_size;
181 c6d50674 Stefan Weil
uintptr_t qemu_host_page_mask;
182 54936004 bellard
183 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
184 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
185 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
186 54936004 bellard
187 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
188 4346ae3e Avi Kivity
typedef struct PhysPageEntry PhysPageEntry;
189 4346ae3e Avi Kivity
190 5312bd8b Avi Kivity
static MemoryRegionSection *phys_sections;
191 5312bd8b Avi Kivity
static unsigned phys_sections_nb, phys_sections_nb_alloc;
192 5312bd8b Avi Kivity
static uint16_t phys_section_unassigned;
193 aa102231 Avi Kivity
static uint16_t phys_section_notdirty;
194 aa102231 Avi Kivity
static uint16_t phys_section_rom;
195 aa102231 Avi Kivity
static uint16_t phys_section_watch;
196 5312bd8b Avi Kivity
197 4346ae3e Avi Kivity
struct PhysPageEntry {
198 07f07b31 Avi Kivity
    uint16_t is_leaf : 1;
199 07f07b31 Avi Kivity
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 07f07b31 Avi Kivity
    uint16_t ptr : 15;
201 4346ae3e Avi Kivity
};
202 4346ae3e Avi Kivity
203 d6f2ea22 Avi Kivity
/* Simple allocator for PhysPageEntry nodes */
204 d6f2ea22 Avi Kivity
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205 d6f2ea22 Avi Kivity
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206 d6f2ea22 Avi Kivity
207 07f07b31 Avi Kivity
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208 d6f2ea22 Avi Kivity
209 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
210 06ef3525 Avi Kivity
   The bottom level has pointers to MemoryRegionSections.  */
211 07f07b31 Avi Kivity
static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
212 6d9a1304 Paul Brook
213 e2eef170 pbrook
static void io_mem_init(void);
214 62152b8a Avi Kivity
static void memory_map_init(void);
215 e2eef170 pbrook
216 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
217 6658ffb8 pbrook
#endif
218 33417e70 bellard
219 34865134 bellard
/* log support */
220 1e8b27ca Juha Riihimäki
#ifdef WIN32
221 1e8b27ca Juha Riihimäki
static const char *logfilename = "qemu.log";
222 1e8b27ca Juha Riihimäki
#else
223 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
224 1e8b27ca Juha Riihimäki
#endif
225 34865134 bellard
FILE *logfile;
226 34865134 bellard
int loglevel;
227 e735b91c pbrook
static int log_append = 0;
228 34865134 bellard
229 e3db7226 bellard
/* statistics */
230 e3db7226 bellard
static int tb_flush_count;
231 e3db7226 bellard
static int tb_phys_invalidate_count;
232 e3db7226 bellard
233 7cb69cae bellard
#ifdef _WIN32
234 7cb69cae bellard
static void map_exec(void *addr, long size)
235 7cb69cae bellard
{
236 7cb69cae bellard
    DWORD old_protect;
237 7cb69cae bellard
    VirtualProtect(addr, size,
238 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
239 7cb69cae bellard
    
240 7cb69cae bellard
}
241 7cb69cae bellard
#else
242 7cb69cae bellard
static void map_exec(void *addr, long size)
243 7cb69cae bellard
{
244 4369415f bellard
    unsigned long start, end, page_size;
245 7cb69cae bellard
    
246 4369415f bellard
    page_size = getpagesize();
247 7cb69cae bellard
    start = (unsigned long)addr;
248 4369415f bellard
    start &= ~(page_size - 1);
249 7cb69cae bellard
    
250 7cb69cae bellard
    end = (unsigned long)addr + size;
251 4369415f bellard
    end += page_size - 1;
252 4369415f bellard
    end &= ~(page_size - 1);
253 7cb69cae bellard
    
254 7cb69cae bellard
    mprotect((void *)start, end - start,
255 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
256 7cb69cae bellard
}
257 7cb69cae bellard
#endif
258 7cb69cae bellard
259 b346ff46 bellard
static void page_init(void)
260 54936004 bellard
{
261 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
262 54936004 bellard
       TARGET_PAGE_SIZE */
263 c2b48b69 aliguori
#ifdef _WIN32
264 c2b48b69 aliguori
    {
265 c2b48b69 aliguori
        SYSTEM_INFO system_info;
266 c2b48b69 aliguori
267 c2b48b69 aliguori
        GetSystemInfo(&system_info);
268 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
269 c2b48b69 aliguori
    }
270 c2b48b69 aliguori
#else
271 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
272 c2b48b69 aliguori
#endif
273 83fb7adf bellard
    if (qemu_host_page_size == 0)
274 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
275 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
277 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
278 50a9569b balrog
279 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 50a9569b balrog
    {
281 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
282 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
283 f01576f1 Juergen Lock
        int i, cnt;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
286 f01576f1 Juergen Lock
        if (freep) {
287 f01576f1 Juergen Lock
            mmap_lock();
288 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
289 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
290 f01576f1 Juergen Lock
291 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
292 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
293 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
294 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295 f01576f1 Juergen Lock
296 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
297 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
298 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 f01576f1 Juergen Lock
                    } else {
300 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 f01576f1 Juergen Lock
                        endaddr = ~0ul;
302 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 f01576f1 Juergen Lock
#endif
304 f01576f1 Juergen Lock
                    }
305 f01576f1 Juergen Lock
                }
306 f01576f1 Juergen Lock
            }
307 f01576f1 Juergen Lock
            free(freep);
308 f01576f1 Juergen Lock
            mmap_unlock();
309 f01576f1 Juergen Lock
        }
310 f01576f1 Juergen Lock
#else
311 50a9569b balrog
        FILE *f;
312 50a9569b balrog
313 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
314 5cd2c5b6 Richard Henderson
315 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
316 50a9569b balrog
        if (f) {
317 5cd2c5b6 Richard Henderson
            mmap_lock();
318 5cd2c5b6 Richard Henderson
319 50a9569b balrog
            do {
320 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
321 5cd2c5b6 Richard Henderson
                int n;
322 5cd2c5b6 Richard Henderson
323 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324 5cd2c5b6 Richard Henderson
325 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
326 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327 5cd2c5b6 Richard Henderson
328 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
329 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
330 5cd2c5b6 Richard Henderson
                    } else {
331 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
332 5cd2c5b6 Richard Henderson
                    }
333 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 50a9569b balrog
                }
335 50a9569b balrog
            } while (!feof(f));
336 5cd2c5b6 Richard Henderson
337 50a9569b balrog
            fclose(f);
338 5cd2c5b6 Richard Henderson
            mmap_unlock();
339 50a9569b balrog
        }
340 f01576f1 Juergen Lock
#endif
341 50a9569b balrog
    }
342 50a9569b balrog
#endif
343 54936004 bellard
}
344 54936004 bellard
345 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
346 54936004 bellard
{
347 41c1b1c9 Paul Brook
    PageDesc *pd;
348 41c1b1c9 Paul Brook
    void **lp;
349 41c1b1c9 Paul Brook
    int i;
350 41c1b1c9 Paul Brook
351 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
352 7267c094 Anthony Liguori
    /* We can't use g_malloc because it may recurse into a locked mutex. */
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
354 5cd2c5b6 Richard Henderson
    do {                                                \
355 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
356 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
357 5cd2c5b6 Richard Henderson
    } while (0)
358 5cd2c5b6 Richard Henderson
#else
359 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
360 7267c094 Anthony Liguori
    do { P = g_malloc0(SIZE); } while (0)
361 17e2377a pbrook
#endif
362 434929bf aliguori
363 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
364 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365 5cd2c5b6 Richard Henderson
366 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
367 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 5cd2c5b6 Richard Henderson
        void **p = *lp;
369 5cd2c5b6 Richard Henderson
370 5cd2c5b6 Richard Henderson
        if (p == NULL) {
371 5cd2c5b6 Richard Henderson
            if (!alloc) {
372 5cd2c5b6 Richard Henderson
                return NULL;
373 5cd2c5b6 Richard Henderson
            }
374 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
375 5cd2c5b6 Richard Henderson
            *lp = p;
376 17e2377a pbrook
        }
377 5cd2c5b6 Richard Henderson
378 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 5cd2c5b6 Richard Henderson
    }
380 5cd2c5b6 Richard Henderson
381 5cd2c5b6 Richard Henderson
    pd = *lp;
382 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
383 5cd2c5b6 Richard Henderson
        if (!alloc) {
384 5cd2c5b6 Richard Henderson
            return NULL;
385 5cd2c5b6 Richard Henderson
        }
386 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 5cd2c5b6 Richard Henderson
        *lp = pd;
388 54936004 bellard
    }
389 5cd2c5b6 Richard Henderson
390 5cd2c5b6 Richard Henderson
#undef ALLOC
391 5cd2c5b6 Richard Henderson
392 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
393 54936004 bellard
}
394 54936004 bellard
395 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
396 54936004 bellard
{
397 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
398 fd6ce8f6 bellard
}
399 fd6ce8f6 bellard
400 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
401 d6f2ea22 Avi Kivity
402 f7bf5461 Avi Kivity
static void phys_map_node_reserve(unsigned nodes)
403 d6f2ea22 Avi Kivity
{
404 f7bf5461 Avi Kivity
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 d6f2ea22 Avi Kivity
        typedef PhysPageEntry Node[L2_SIZE];
406 d6f2ea22 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 f7bf5461 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 f7bf5461 Avi Kivity
                                      phys_map_nodes_nb + nodes);
409 d6f2ea22 Avi Kivity
        phys_map_nodes = g_renew(Node, phys_map_nodes,
410 d6f2ea22 Avi Kivity
                                 phys_map_nodes_nb_alloc);
411 d6f2ea22 Avi Kivity
    }
412 f7bf5461 Avi Kivity
}
413 f7bf5461 Avi Kivity
414 f7bf5461 Avi Kivity
static uint16_t phys_map_node_alloc(void)
415 f7bf5461 Avi Kivity
{
416 f7bf5461 Avi Kivity
    unsigned i;
417 f7bf5461 Avi Kivity
    uint16_t ret;
418 f7bf5461 Avi Kivity
419 f7bf5461 Avi Kivity
    ret = phys_map_nodes_nb++;
420 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
421 f7bf5461 Avi Kivity
    assert(ret != phys_map_nodes_nb_alloc);
422 d6f2ea22 Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
423 07f07b31 Avi Kivity
        phys_map_nodes[ret][i].is_leaf = 0;
424 c19e8800 Avi Kivity
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
425 d6f2ea22 Avi Kivity
    }
426 f7bf5461 Avi Kivity
    return ret;
427 d6f2ea22 Avi Kivity
}
428 d6f2ea22 Avi Kivity
429 d6f2ea22 Avi Kivity
static void phys_map_nodes_reset(void)
430 d6f2ea22 Avi Kivity
{
431 d6f2ea22 Avi Kivity
    phys_map_nodes_nb = 0;
432 d6f2ea22 Avi Kivity
}
433 d6f2ea22 Avi Kivity
434 92e873b9 bellard
435 2999097b Avi Kivity
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 2999097b Avi Kivity
                                target_phys_addr_t *nb, uint16_t leaf,
437 2999097b Avi Kivity
                                int level)
438 f7bf5461 Avi Kivity
{
439 f7bf5461 Avi Kivity
    PhysPageEntry *p;
440 f7bf5461 Avi Kivity
    int i;
441 07f07b31 Avi Kivity
    target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
442 108c49b8 bellard
443 07f07b31 Avi Kivity
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
444 c19e8800 Avi Kivity
        lp->ptr = phys_map_node_alloc();
445 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
446 f7bf5461 Avi Kivity
        if (level == 0) {
447 f7bf5461 Avi Kivity
            for (i = 0; i < L2_SIZE; i++) {
448 07f07b31 Avi Kivity
                p[i].is_leaf = 1;
449 c19e8800 Avi Kivity
                p[i].ptr = phys_section_unassigned;
450 4346ae3e Avi Kivity
            }
451 67c4d23c pbrook
        }
452 f7bf5461 Avi Kivity
    } else {
453 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
454 92e873b9 bellard
    }
455 2999097b Avi Kivity
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
456 f7bf5461 Avi Kivity
457 2999097b Avi Kivity
    while (*nb && lp < &p[L2_SIZE]) {
458 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
459 07f07b31 Avi Kivity
            lp->is_leaf = true;
460 c19e8800 Avi Kivity
            lp->ptr = leaf;
461 07f07b31 Avi Kivity
            *index += step;
462 07f07b31 Avi Kivity
            *nb -= step;
463 2999097b Avi Kivity
        } else {
464 2999097b Avi Kivity
            phys_page_set_level(lp, index, nb, leaf, level - 1);
465 2999097b Avi Kivity
        }
466 2999097b Avi Kivity
        ++lp;
467 f7bf5461 Avi Kivity
    }
468 f7bf5461 Avi Kivity
}
469 f7bf5461 Avi Kivity
470 2999097b Avi Kivity
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 2999097b Avi Kivity
                          uint16_t leaf)
472 f7bf5461 Avi Kivity
{
473 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
474 07f07b31 Avi Kivity
    phys_map_node_reserve(3 * P_L2_LEVELS);
475 5cd2c5b6 Richard Henderson
476 2999097b Avi Kivity
    phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
477 92e873b9 bellard
}
478 92e873b9 bellard
479 0cac1b66 Blue Swirl
MemoryRegionSection *phys_page_find(target_phys_addr_t index)
480 92e873b9 bellard
{
481 31ab2b4a Avi Kivity
    PhysPageEntry lp = phys_map;
482 31ab2b4a Avi Kivity
    PhysPageEntry *p;
483 31ab2b4a Avi Kivity
    int i;
484 31ab2b4a Avi Kivity
    uint16_t s_index = phys_section_unassigned;
485 f1f6e3b8 Avi Kivity
486 07f07b31 Avi Kivity
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
487 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
488 31ab2b4a Avi Kivity
            goto not_found;
489 31ab2b4a Avi Kivity
        }
490 c19e8800 Avi Kivity
        p = phys_map_nodes[lp.ptr];
491 31ab2b4a Avi Kivity
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
492 5312bd8b Avi Kivity
    }
493 31ab2b4a Avi Kivity
494 c19e8800 Avi Kivity
    s_index = lp.ptr;
495 31ab2b4a Avi Kivity
not_found:
496 f3705d53 Avi Kivity
    return &phys_sections[s_index];
497 f3705d53 Avi Kivity
}
498 f3705d53 Avi Kivity
499 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
500 e5548617 Blue Swirl
{
501 e5548617 Blue Swirl
    return mr != &io_mem_ram && mr != &io_mem_rom
502 e5548617 Blue Swirl
        && mr != &io_mem_notdirty && !mr->rom_device
503 e5548617 Blue Swirl
        && mr != &io_mem_watch;
504 e5548617 Blue Swirl
}
505 e5548617 Blue Swirl
506 c8a706fe pbrook
#define mmap_lock() do { } while(0)
507 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
508 9fa3e853 bellard
#endif
509 fd6ce8f6 bellard
510 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511 4369415f bellard
512 4369415f bellard
#if defined(CONFIG_USER_ONLY)
513 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
514 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
515 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
516 4369415f bellard
#endif
517 4369415f bellard
518 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
519 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
521 4369415f bellard
#endif
522 4369415f bellard
523 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
524 26a5f13b bellard
{
525 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
526 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
527 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
529 4369415f bellard
#else
530 26a5f13b bellard
    code_gen_buffer_size = tb_size;
531 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
532 4369415f bellard
#if defined(CONFIG_USER_ONLY)
533 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534 4369415f bellard
#else
535 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
536 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
537 4369415f bellard
#endif
538 26a5f13b bellard
    }
539 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
542 26a5f13b bellard
       the host cpu and OS */
543 26a5f13b bellard
#if defined(__linux__) 
544 26a5f13b bellard
    {
545 26a5f13b bellard
        int flags;
546 141ac468 blueswir1
        void *start = NULL;
547 141ac468 blueswir1
548 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
549 26a5f13b bellard
#if defined(__x86_64__)
550 26a5f13b bellard
        flags |= MAP_32BIT;
551 26a5f13b bellard
        /* Cannot map more than that */
552 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
553 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
554 141ac468 blueswir1
#elif defined(__sparc_v9__)
555 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
556 141ac468 blueswir1
        flags |= MAP_FIXED;
557 141ac468 blueswir1
        start = (void *) 0x60000000UL;
558 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
559 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
560 1cb0661e balrog
#elif defined(__arm__)
561 5c84bd90 Aurelien Jarno
        /* Keep the buffer no bigger than 16MB to branch between blocks */
562 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
563 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
564 eba0b893 Richard Henderson
#elif defined(__s390x__)
565 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
566 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
567 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 eba0b893 Richard Henderson
        }
570 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
571 26a5f13b bellard
#endif
572 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
573 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
574 26a5f13b bellard
                               flags, -1, 0);
575 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
576 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 26a5f13b bellard
            exit(1);
578 26a5f13b bellard
        }
579 26a5f13b bellard
    }
580 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
581 9f4b09a4 Tobias Nygren
    || defined(__DragonFly__) || defined(__OpenBSD__) \
582 9f4b09a4 Tobias Nygren
    || defined(__NetBSD__)
583 06e67a82 aliguori
    {
584 06e67a82 aliguori
        int flags;
585 06e67a82 aliguori
        void *addr = NULL;
586 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
587 06e67a82 aliguori
#if defined(__x86_64__)
588 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 06e67a82 aliguori
         * 0x40000000 is free */
590 06e67a82 aliguori
        flags |= MAP_FIXED;
591 06e67a82 aliguori
        addr = (void *)0x40000000;
592 06e67a82 aliguori
        /* Cannot map more than that */
593 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
594 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
595 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
596 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
597 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
598 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
599 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
601 4cd31ad2 Blue Swirl
        }
602 06e67a82 aliguori
#endif
603 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
605 06e67a82 aliguori
                               flags, -1, 0);
606 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
607 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 06e67a82 aliguori
            exit(1);
609 06e67a82 aliguori
        }
610 06e67a82 aliguori
    }
611 26a5f13b bellard
#else
612 7267c094 Anthony Liguori
    code_gen_buffer = g_malloc(code_gen_buffer_size);
613 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
614 26a5f13b bellard
#endif
615 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
616 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
617 a884da8a Peter Maydell
    code_gen_buffer_max_size = code_gen_buffer_size -
618 a884da8a Peter Maydell
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
619 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
620 7267c094 Anthony Liguori
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
621 26a5f13b bellard
}
622 26a5f13b bellard
623 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
625 26a5f13b bellard
   size. */
626 d5ab9713 Jan Kiszka
void tcg_exec_init(unsigned long tb_size)
627 26a5f13b bellard
{
628 26a5f13b bellard
    cpu_gen_init();
629 26a5f13b bellard
    code_gen_alloc(tb_size);
630 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
631 813da627 Richard Henderson
    tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
632 4369415f bellard
    page_init();
633 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
635 9002ec79 Richard Henderson
       initialize the prologue now.  */
636 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
637 9002ec79 Richard Henderson
#endif
638 26a5f13b bellard
}
639 26a5f13b bellard
640 d5ab9713 Jan Kiszka
bool tcg_enabled(void)
641 d5ab9713 Jan Kiszka
{
642 d5ab9713 Jan Kiszka
    return code_gen_buffer != NULL;
643 d5ab9713 Jan Kiszka
}
644 d5ab9713 Jan Kiszka
645 d5ab9713 Jan Kiszka
void cpu_exec_init_all(void)
646 d5ab9713 Jan Kiszka
{
647 d5ab9713 Jan Kiszka
#if !defined(CONFIG_USER_ONLY)
648 d5ab9713 Jan Kiszka
    memory_map_init();
649 d5ab9713 Jan Kiszka
    io_mem_init();
650 d5ab9713 Jan Kiszka
#endif
651 d5ab9713 Jan Kiszka
}
652 d5ab9713 Jan Kiszka
653 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654 9656f324 pbrook
655 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
656 e7f4eff7 Juan Quintela
{
657 9349b4f9 Andreas Färber
    CPUArchState *env = opaque;
658 9656f324 pbrook
659 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 3098dba0 aurel32
       version_id is increased. */
661 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
662 9656f324 pbrook
    tlb_flush(env, 1);
663 9656f324 pbrook
664 9656f324 pbrook
    return 0;
665 9656f324 pbrook
}
666 e7f4eff7 Juan Quintela
667 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
668 e7f4eff7 Juan Quintela
    .name = "cpu_common",
669 e7f4eff7 Juan Quintela
    .version_id = 1,
670 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
671 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
672 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
673 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
674 9349b4f9 Andreas Färber
        VMSTATE_UINT32(halted, CPUArchState),
675 9349b4f9 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUArchState),
676 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
677 e7f4eff7 Juan Quintela
    }
678 e7f4eff7 Juan Quintela
};
679 9656f324 pbrook
#endif
680 9656f324 pbrook
681 9349b4f9 Andreas Färber
CPUArchState *qemu_get_cpu(int cpu)
682 950f1472 Glauber Costa
{
683 9349b4f9 Andreas Färber
    CPUArchState *env = first_cpu;
684 950f1472 Glauber Costa
685 950f1472 Glauber Costa
    while (env) {
686 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
687 950f1472 Glauber Costa
            break;
688 950f1472 Glauber Costa
        env = env->next_cpu;
689 950f1472 Glauber Costa
    }
690 950f1472 Glauber Costa
691 950f1472 Glauber Costa
    return env;
692 950f1472 Glauber Costa
}
693 950f1472 Glauber Costa
694 9349b4f9 Andreas Färber
void cpu_exec_init(CPUArchState *env)
695 fd6ce8f6 bellard
{
696 9349b4f9 Andreas Färber
    CPUArchState **penv;
697 6a00d601 bellard
    int cpu_index;
698 6a00d601 bellard
699 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
700 c2764719 pbrook
    cpu_list_lock();
701 c2764719 pbrook
#endif
702 6a00d601 bellard
    env->next_cpu = NULL;
703 6a00d601 bellard
    penv = &first_cpu;
704 6a00d601 bellard
    cpu_index = 0;
705 6a00d601 bellard
    while (*penv != NULL) {
706 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
707 6a00d601 bellard
        cpu_index++;
708 6a00d601 bellard
    }
709 6a00d601 bellard
    env->cpu_index = cpu_index;
710 268a362c aliguori
    env->numa_node = 0;
711 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
712 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
713 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
714 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
715 dc7a09cf Jan Kiszka
#endif
716 6a00d601 bellard
    *penv = env;
717 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
718 c2764719 pbrook
    cpu_list_unlock();
719 c2764719 pbrook
#endif
720 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
721 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
723 b3c7724c pbrook
                    cpu_save, cpu_load, env);
724 b3c7724c pbrook
#endif
725 fd6ce8f6 bellard
}
726 fd6ce8f6 bellard
727 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
728 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
729 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
730 d1a1eb74 Tristan Gingold
{
731 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
732 d1a1eb74 Tristan Gingold
733 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
734 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 d1a1eb74 Tristan Gingold
        return NULL;
736 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
737 d1a1eb74 Tristan Gingold
    tb->pc = pc;
738 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
739 d1a1eb74 Tristan Gingold
    return tb;
740 d1a1eb74 Tristan Gingold
}
741 d1a1eb74 Tristan Gingold
742 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
743 d1a1eb74 Tristan Gingold
{
744 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
745 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
746 d1a1eb74 Tristan Gingold
       be the last one generated.  */
747 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
749 d1a1eb74 Tristan Gingold
        nb_tbs--;
750 d1a1eb74 Tristan Gingold
    }
751 d1a1eb74 Tristan Gingold
}
752 d1a1eb74 Tristan Gingold
753 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
754 9fa3e853 bellard
{
755 9fa3e853 bellard
    if (p->code_bitmap) {
756 7267c094 Anthony Liguori
        g_free(p->code_bitmap);
757 9fa3e853 bellard
        p->code_bitmap = NULL;
758 9fa3e853 bellard
    }
759 9fa3e853 bellard
    p->code_write_count = 0;
760 9fa3e853 bellard
}
761 9fa3e853 bellard
762 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763 5cd2c5b6 Richard Henderson
764 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
765 fd6ce8f6 bellard
{
766 5cd2c5b6 Richard Henderson
    int i;
767 fd6ce8f6 bellard
768 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
769 5cd2c5b6 Richard Henderson
        return;
770 5cd2c5b6 Richard Henderson
    }
771 5cd2c5b6 Richard Henderson
    if (level == 0) {
772 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
773 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
774 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
775 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
776 fd6ce8f6 bellard
        }
777 5cd2c5b6 Richard Henderson
    } else {
778 5cd2c5b6 Richard Henderson
        void **pp = *lp;
779 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
780 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
781 5cd2c5b6 Richard Henderson
        }
782 5cd2c5b6 Richard Henderson
    }
783 5cd2c5b6 Richard Henderson
}
784 5cd2c5b6 Richard Henderson
785 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
786 5cd2c5b6 Richard Henderson
{
787 5cd2c5b6 Richard Henderson
    int i;
788 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
789 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
790 fd6ce8f6 bellard
    }
791 fd6ce8f6 bellard
}
792 fd6ce8f6 bellard
793 fd6ce8f6 bellard
/* flush all the translation blocks */
794 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
795 9349b4f9 Andreas Färber
void tb_flush(CPUArchState *env1)
796 fd6ce8f6 bellard
{
797 9349b4f9 Andreas Färber
    CPUArchState *env;
798 0124311e bellard
#if defined(DEBUG_FLUSH)
799 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
801 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
802 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
803 fd6ce8f6 bellard
#endif
804 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
805 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
806 a208e54a pbrook
807 fd6ce8f6 bellard
    nb_tbs = 0;
808 3b46e624 ths
809 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 6a00d601 bellard
    }
812 9fa3e853 bellard
813 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
814 fd6ce8f6 bellard
    page_flush_tb();
815 9fa3e853 bellard
816 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
817 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
818 d4e8164f bellard
       expensive */
819 e3db7226 bellard
    tb_flush_count++;
820 fd6ce8f6 bellard
}
821 fd6ce8f6 bellard
822 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
823 fd6ce8f6 bellard
824 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
825 fd6ce8f6 bellard
{
826 fd6ce8f6 bellard
    TranslationBlock *tb;
827 fd6ce8f6 bellard
    int i;
828 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
829 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
831 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
833 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
834 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
835 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
836 fd6ce8f6 bellard
            }
837 fd6ce8f6 bellard
        }
838 fd6ce8f6 bellard
    }
839 fd6ce8f6 bellard
}
840 fd6ce8f6 bellard
841 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
842 fd6ce8f6 bellard
static void tb_page_check(void)
843 fd6ce8f6 bellard
{
844 fd6ce8f6 bellard
    TranslationBlock *tb;
845 fd6ce8f6 bellard
    int i, flags1, flags2;
846 3b46e624 ths
847 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
849 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
850 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
851 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
853 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
854 fd6ce8f6 bellard
            }
855 fd6ce8f6 bellard
        }
856 fd6ce8f6 bellard
    }
857 fd6ce8f6 bellard
}
858 fd6ce8f6 bellard
859 fd6ce8f6 bellard
#endif
860 fd6ce8f6 bellard
861 fd6ce8f6 bellard
/* invalidate one TB */
862 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 fd6ce8f6 bellard
                             int next_offset)
864 fd6ce8f6 bellard
{
865 fd6ce8f6 bellard
    TranslationBlock *tb1;
866 fd6ce8f6 bellard
    for(;;) {
867 fd6ce8f6 bellard
        tb1 = *ptb;
868 fd6ce8f6 bellard
        if (tb1 == tb) {
869 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 fd6ce8f6 bellard
            break;
871 fd6ce8f6 bellard
        }
872 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 fd6ce8f6 bellard
    }
874 fd6ce8f6 bellard
}
875 fd6ce8f6 bellard
876 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877 9fa3e853 bellard
{
878 9fa3e853 bellard
    TranslationBlock *tb1;
879 9fa3e853 bellard
    unsigned int n1;
880 9fa3e853 bellard
881 9fa3e853 bellard
    for(;;) {
882 9fa3e853 bellard
        tb1 = *ptb;
883 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
884 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
885 9fa3e853 bellard
        if (tb1 == tb) {
886 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
887 9fa3e853 bellard
            break;
888 9fa3e853 bellard
        }
889 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
890 9fa3e853 bellard
    }
891 9fa3e853 bellard
}
892 9fa3e853 bellard
893 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894 d4e8164f bellard
{
895 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
896 d4e8164f bellard
    unsigned int n1;
897 d4e8164f bellard
898 d4e8164f bellard
    ptb = &tb->jmp_next[n];
899 d4e8164f bellard
    tb1 = *ptb;
900 d4e8164f bellard
    if (tb1) {
901 d4e8164f bellard
        /* find tb(n) in circular list */
902 d4e8164f bellard
        for(;;) {
903 d4e8164f bellard
            tb1 = *ptb;
904 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
905 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
906 d4e8164f bellard
            if (n1 == n && tb1 == tb)
907 d4e8164f bellard
                break;
908 d4e8164f bellard
            if (n1 == 2) {
909 d4e8164f bellard
                ptb = &tb1->jmp_first;
910 d4e8164f bellard
            } else {
911 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
912 d4e8164f bellard
            }
913 d4e8164f bellard
        }
914 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
915 d4e8164f bellard
        *ptb = tb->jmp_next[n];
916 d4e8164f bellard
917 d4e8164f bellard
        tb->jmp_next[n] = NULL;
918 d4e8164f bellard
    }
919 d4e8164f bellard
}
920 d4e8164f bellard
921 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
922 d4e8164f bellard
   another TB */
923 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
924 d4e8164f bellard
{
925 8efe0ca8 Stefan Weil
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
926 d4e8164f bellard
}
927 d4e8164f bellard
928 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
929 fd6ce8f6 bellard
{
930 9349b4f9 Andreas Färber
    CPUArchState *env;
931 8a40a180 bellard
    PageDesc *p;
932 d4e8164f bellard
    unsigned int h, n1;
933 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
934 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
935 3b46e624 ths
936 8a40a180 bellard
    /* remove the TB from the hash list */
937 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
939 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
940 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
941 8a40a180 bellard
942 8a40a180 bellard
    /* remove the TB from the page list */
943 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
944 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
946 8a40a180 bellard
        invalidate_page_bitmap(p);
947 8a40a180 bellard
    }
948 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
951 8a40a180 bellard
        invalidate_page_bitmap(p);
952 8a40a180 bellard
    }
953 8a40a180 bellard
954 36bdbe54 bellard
    tb_invalidated_flag = 1;
955 59817ccb bellard
956 fd6ce8f6 bellard
    /* remove the TB from the hash list */
957 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
958 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
960 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
961 6a00d601 bellard
    }
962 d4e8164f bellard
963 d4e8164f bellard
    /* suppress this TB from the two jump lists */
964 d4e8164f bellard
    tb_jmp_remove(tb, 0);
965 d4e8164f bellard
    tb_jmp_remove(tb, 1);
966 d4e8164f bellard
967 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
968 d4e8164f bellard
    tb1 = tb->jmp_first;
969 d4e8164f bellard
    for(;;) {
970 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
971 d4e8164f bellard
        if (n1 == 2)
972 d4e8164f bellard
            break;
973 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
974 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
975 d4e8164f bellard
        tb_reset_jump(tb1, n1);
976 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
977 d4e8164f bellard
        tb1 = tb2;
978 d4e8164f bellard
    }
979 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
980 9fa3e853 bellard
981 e3db7226 bellard
    tb_phys_invalidate_count++;
982 9fa3e853 bellard
}
983 9fa3e853 bellard
984 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
985 9fa3e853 bellard
{
986 9fa3e853 bellard
    int end, mask, end1;
987 9fa3e853 bellard
988 9fa3e853 bellard
    end = start + len;
989 9fa3e853 bellard
    tab += start >> 3;
990 9fa3e853 bellard
    mask = 0xff << (start & 7);
991 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
992 9fa3e853 bellard
        if (start < end) {
993 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
994 9fa3e853 bellard
            *tab |= mask;
995 9fa3e853 bellard
        }
996 9fa3e853 bellard
    } else {
997 9fa3e853 bellard
        *tab++ |= mask;
998 9fa3e853 bellard
        start = (start + 8) & ~7;
999 9fa3e853 bellard
        end1 = end & ~7;
1000 9fa3e853 bellard
        while (start < end1) {
1001 9fa3e853 bellard
            *tab++ = 0xff;
1002 9fa3e853 bellard
            start += 8;
1003 9fa3e853 bellard
        }
1004 9fa3e853 bellard
        if (start < end) {
1005 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
1006 9fa3e853 bellard
            *tab |= mask;
1007 9fa3e853 bellard
        }
1008 9fa3e853 bellard
    }
1009 9fa3e853 bellard
}
1010 9fa3e853 bellard
1011 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
1012 9fa3e853 bellard
{
1013 9fa3e853 bellard
    int n, tb_start, tb_end;
1014 9fa3e853 bellard
    TranslationBlock *tb;
1015 3b46e624 ths
1016 7267c094 Anthony Liguori
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1017 9fa3e853 bellard
1018 9fa3e853 bellard
    tb = p->first_tb;
1019 9fa3e853 bellard
    while (tb != NULL) {
1020 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1021 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1022 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1023 9fa3e853 bellard
        if (n == 0) {
1024 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1025 9fa3e853 bellard
               it is not a problem */
1026 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1028 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
1029 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
1030 9fa3e853 bellard
        } else {
1031 9fa3e853 bellard
            tb_start = 0;
1032 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 9fa3e853 bellard
        }
1034 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 9fa3e853 bellard
        tb = tb->page_next[n];
1036 9fa3e853 bellard
    }
1037 9fa3e853 bellard
}
1038 9fa3e853 bellard
1039 9349b4f9 Andreas Färber
TranslationBlock *tb_gen_code(CPUArchState *env,
1040 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
1041 2e70f6ef pbrook
                              int flags, int cflags)
1042 d720b93d bellard
{
1043 d720b93d bellard
    TranslationBlock *tb;
1044 d720b93d bellard
    uint8_t *tc_ptr;
1045 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
1046 41c1b1c9 Paul Brook
    target_ulong virt_page2;
1047 d720b93d bellard
    int code_gen_size;
1048 d720b93d bellard
1049 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
1050 c27004ec bellard
    tb = tb_alloc(pc);
1051 d720b93d bellard
    if (!tb) {
1052 d720b93d bellard
        /* flush must be done */
1053 d720b93d bellard
        tb_flush(env);
1054 d720b93d bellard
        /* cannot fail at this point */
1055 c27004ec bellard
        tb = tb_alloc(pc);
1056 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
1057 2e70f6ef pbrook
        tb_invalidated_flag = 1;
1058 d720b93d bellard
    }
1059 d720b93d bellard
    tc_ptr = code_gen_ptr;
1060 d720b93d bellard
    tb->tc_ptr = tc_ptr;
1061 d720b93d bellard
    tb->cs_base = cs_base;
1062 d720b93d bellard
    tb->flags = flags;
1063 d720b93d bellard
    tb->cflags = cflags;
1064 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
1065 8efe0ca8 Stefan Weil
    code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 8efe0ca8 Stefan Weil
                             CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1067 3b46e624 ths
1068 d720b93d bellard
    /* check next page if needed */
1069 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1070 d720b93d bellard
    phys_page2 = -1;
1071 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1072 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1073 d720b93d bellard
    }
1074 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1075 2e70f6ef pbrook
    return tb;
1076 d720b93d bellard
}
1077 3b46e624 ths
1078 77a8f1a5 Alexander Graf
/*
1079 77a8f1a5 Alexander Graf
 * invalidate all TBs which intersect with the target physical pages
1080 77a8f1a5 Alexander Graf
 * starting in range [start;end[. NOTE: start and end may refer to
1081 77a8f1a5 Alexander Graf
 * different physical pages. 'is_cpu_write_access' should be true if called
1082 77a8f1a5 Alexander Graf
 * from a real cpu write access: the virtual CPU will exit the current
1083 77a8f1a5 Alexander Graf
 * TB if code is modified inside this TB.
1084 77a8f1a5 Alexander Graf
 */
1085 77a8f1a5 Alexander Graf
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 77a8f1a5 Alexander Graf
                              int is_cpu_write_access)
1087 77a8f1a5 Alexander Graf
{
1088 77a8f1a5 Alexander Graf
    while (start < end) {
1089 77a8f1a5 Alexander Graf
        tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 77a8f1a5 Alexander Graf
        start &= TARGET_PAGE_MASK;
1091 77a8f1a5 Alexander Graf
        start += TARGET_PAGE_SIZE;
1092 77a8f1a5 Alexander Graf
    }
1093 77a8f1a5 Alexander Graf
}
1094 77a8f1a5 Alexander Graf
1095 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1096 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1097 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1098 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1099 d720b93d bellard
   TB if code is modified inside this TB. */
1100 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1101 d720b93d bellard
                                   int is_cpu_write_access)
1102 d720b93d bellard
{
1103 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1104 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1105 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1106 6b917547 aliguori
    PageDesc *p;
1107 6b917547 aliguori
    int n;
1108 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1109 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1110 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1111 6b917547 aliguori
    int current_tb_modified = 0;
1112 6b917547 aliguori
    target_ulong current_pc = 0;
1113 6b917547 aliguori
    target_ulong current_cs_base = 0;
1114 6b917547 aliguori
    int current_flags = 0;
1115 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1116 9fa3e853 bellard
1117 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1118 5fafdf24 ths
    if (!p)
1119 9fa3e853 bellard
        return;
1120 5fafdf24 ths
    if (!p->code_bitmap &&
1121 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1122 d720b93d bellard
        is_cpu_write_access) {
1123 9fa3e853 bellard
        /* build code bitmap */
1124 9fa3e853 bellard
        build_page_bitmap(p);
1125 9fa3e853 bellard
    }
1126 9fa3e853 bellard
1127 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1128 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1129 9fa3e853 bellard
    tb = p->first_tb;
1130 9fa3e853 bellard
    while (tb != NULL) {
1131 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1132 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1133 9fa3e853 bellard
        tb_next = tb->page_next[n];
1134 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1135 9fa3e853 bellard
        if (n == 0) {
1136 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1137 9fa3e853 bellard
               it is not a problem */
1138 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1139 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1140 9fa3e853 bellard
        } else {
1141 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1142 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1143 9fa3e853 bellard
        }
1144 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1145 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1146 d720b93d bellard
            if (current_tb_not_found) {
1147 d720b93d bellard
                current_tb_not_found = 0;
1148 d720b93d bellard
                current_tb = NULL;
1149 2e70f6ef pbrook
                if (env->mem_io_pc) {
1150 d720b93d bellard
                    /* now we have a real cpu fault */
1151 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1152 d720b93d bellard
                }
1153 d720b93d bellard
            }
1154 d720b93d bellard
            if (current_tb == tb &&
1155 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1156 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1157 d720b93d bellard
                its execution. We could be more precise by checking
1158 d720b93d bellard
                that the modification is after the current PC, but it
1159 d720b93d bellard
                would require a specialized function to partially
1160 d720b93d bellard
                restore the CPU state */
1161 3b46e624 ths
1162 d720b93d bellard
                current_tb_modified = 1;
1163 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1164 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 6b917547 aliguori
                                     &current_flags);
1166 d720b93d bellard
            }
1167 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1168 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1169 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1170 6f5a9f7e bellard
            saved_tb = NULL;
1171 6f5a9f7e bellard
            if (env) {
1172 6f5a9f7e bellard
                saved_tb = env->current_tb;
1173 6f5a9f7e bellard
                env->current_tb = NULL;
1174 6f5a9f7e bellard
            }
1175 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1176 6f5a9f7e bellard
            if (env) {
1177 6f5a9f7e bellard
                env->current_tb = saved_tb;
1178 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1179 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1180 6f5a9f7e bellard
            }
1181 9fa3e853 bellard
        }
1182 9fa3e853 bellard
        tb = tb_next;
1183 9fa3e853 bellard
    }
1184 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1185 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1186 9fa3e853 bellard
    if (!p->first_tb) {
1187 9fa3e853 bellard
        invalidate_page_bitmap(p);
1188 d720b93d bellard
        if (is_cpu_write_access) {
1189 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1190 d720b93d bellard
        }
1191 d720b93d bellard
    }
1192 d720b93d bellard
#endif
1193 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1194 d720b93d bellard
    if (current_tb_modified) {
1195 d720b93d bellard
        /* we generate a block containing just the instruction
1196 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1197 d720b93d bellard
           itself */
1198 ea1c1802 bellard
        env->current_tb = NULL;
1199 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1200 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1201 9fa3e853 bellard
    }
1202 fd6ce8f6 bellard
#endif
1203 9fa3e853 bellard
}
1204 fd6ce8f6 bellard
1205 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1206 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1207 9fa3e853 bellard
{
1208 9fa3e853 bellard
    PageDesc *p;
1209 9fa3e853 bellard
    int offset, b;
1210 59817ccb bellard
#if 0
1211 a4193c8a bellard
    if (1) {
1212 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1213 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1214 93fcfe39 aliguori
                  cpu_single_env->eip,
1215 8efe0ca8 Stefan Weil
                  cpu_single_env->eip +
1216 8efe0ca8 Stefan Weil
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1217 59817ccb bellard
    }
1218 59817ccb bellard
#endif
1219 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1220 5fafdf24 ths
    if (!p)
1221 9fa3e853 bellard
        return;
1222 9fa3e853 bellard
    if (p->code_bitmap) {
1223 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1224 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1225 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1226 9fa3e853 bellard
            goto do_invalidate;
1227 9fa3e853 bellard
    } else {
1228 9fa3e853 bellard
    do_invalidate:
1229 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1230 9fa3e853 bellard
    }
1231 9fa3e853 bellard
}
1232 9fa3e853 bellard
1233 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1234 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1235 20503968 Blue Swirl
                                    uintptr_t pc, void *puc)
1236 9fa3e853 bellard
{
1237 6b917547 aliguori
    TranslationBlock *tb;
1238 9fa3e853 bellard
    PageDesc *p;
1239 6b917547 aliguori
    int n;
1240 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1241 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1242 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1243 6b917547 aliguori
    int current_tb_modified = 0;
1244 6b917547 aliguori
    target_ulong current_pc = 0;
1245 6b917547 aliguori
    target_ulong current_cs_base = 0;
1246 6b917547 aliguori
    int current_flags = 0;
1247 d720b93d bellard
#endif
1248 9fa3e853 bellard
1249 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1250 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1251 5fafdf24 ths
    if (!p)
1252 9fa3e853 bellard
        return;
1253 9fa3e853 bellard
    tb = p->first_tb;
1254 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1255 d720b93d bellard
    if (tb && pc != 0) {
1256 d720b93d bellard
        current_tb = tb_find_pc(pc);
1257 d720b93d bellard
    }
1258 d720b93d bellard
#endif
1259 9fa3e853 bellard
    while (tb != NULL) {
1260 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1261 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1262 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1263 d720b93d bellard
        if (current_tb == tb &&
1264 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1265 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1266 d720b93d bellard
                   its execution. We could be more precise by checking
1267 d720b93d bellard
                   that the modification is after the current PC, but it
1268 d720b93d bellard
                   would require a specialized function to partially
1269 d720b93d bellard
                   restore the CPU state */
1270 3b46e624 ths
1271 d720b93d bellard
            current_tb_modified = 1;
1272 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1273 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1274 6b917547 aliguori
                                 &current_flags);
1275 d720b93d bellard
        }
1276 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1277 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1278 9fa3e853 bellard
        tb = tb->page_next[n];
1279 9fa3e853 bellard
    }
1280 fd6ce8f6 bellard
    p->first_tb = NULL;
1281 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1282 d720b93d bellard
    if (current_tb_modified) {
1283 d720b93d bellard
        /* we generate a block containing just the instruction
1284 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1285 d720b93d bellard
           itself */
1286 ea1c1802 bellard
        env->current_tb = NULL;
1287 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1288 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1289 d720b93d bellard
    }
1290 d720b93d bellard
#endif
1291 fd6ce8f6 bellard
}
1292 9fa3e853 bellard
#endif
1293 fd6ce8f6 bellard
1294 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1295 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1296 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1297 fd6ce8f6 bellard
{
1298 fd6ce8f6 bellard
    PageDesc *p;
1299 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1300 4429ab44 Juan Quintela
    bool page_already_protected;
1301 4429ab44 Juan Quintela
#endif
1302 9fa3e853 bellard
1303 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1304 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1305 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1306 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1307 4429ab44 Juan Quintela
    page_already_protected = p->first_tb != NULL;
1308 4429ab44 Juan Quintela
#endif
1309 8efe0ca8 Stefan Weil
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1310 9fa3e853 bellard
    invalidate_page_bitmap(p);
1311 fd6ce8f6 bellard
1312 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1313 d720b93d bellard
1314 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1315 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1316 53a5960a pbrook
        target_ulong addr;
1317 53a5960a pbrook
        PageDesc *p2;
1318 9fa3e853 bellard
        int prot;
1319 9fa3e853 bellard
1320 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1321 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1322 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1323 fd6ce8f6 bellard
        prot = 0;
1324 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1325 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1326 53a5960a pbrook
1327 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1328 53a5960a pbrook
            if (!p2)
1329 53a5960a pbrook
                continue;
1330 53a5960a pbrook
            prot |= p2->flags;
1331 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1332 53a5960a pbrook
          }
1333 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1334 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1335 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1336 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1337 53a5960a pbrook
               page_addr);
1338 fd6ce8f6 bellard
#endif
1339 fd6ce8f6 bellard
    }
1340 9fa3e853 bellard
#else
1341 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1342 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1343 9fa3e853 bellard
       allocated in a physical page */
1344 4429ab44 Juan Quintela
    if (!page_already_protected) {
1345 6a00d601 bellard
        tlb_protect_code(page_addr);
1346 9fa3e853 bellard
    }
1347 9fa3e853 bellard
#endif
1348 d720b93d bellard
1349 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1350 fd6ce8f6 bellard
}
1351 fd6ce8f6 bellard
1352 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1353 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1354 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1355 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1356 d4e8164f bellard
{
1357 9fa3e853 bellard
    unsigned int h;
1358 9fa3e853 bellard
    TranslationBlock **ptb;
1359 9fa3e853 bellard
1360 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1361 c8a706fe pbrook
       before we are done.  */
1362 c8a706fe pbrook
    mmap_lock();
1363 9fa3e853 bellard
    /* add in the physical hash table */
1364 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1365 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1366 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1367 9fa3e853 bellard
    *ptb = tb;
1368 fd6ce8f6 bellard
1369 fd6ce8f6 bellard
    /* add in the page list */
1370 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1371 9fa3e853 bellard
    if (phys_page2 != -1)
1372 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1373 9fa3e853 bellard
    else
1374 9fa3e853 bellard
        tb->page_addr[1] = -1;
1375 9fa3e853 bellard
1376 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1377 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1378 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1379 d4e8164f bellard
1380 d4e8164f bellard
    /* init original jump addresses */
1381 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1382 d4e8164f bellard
        tb_reset_jump(tb, 0);
1383 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1384 d4e8164f bellard
        tb_reset_jump(tb, 1);
1385 8a40a180 bellard
1386 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1387 8a40a180 bellard
    tb_page_check();
1388 8a40a180 bellard
#endif
1389 c8a706fe pbrook
    mmap_unlock();
1390 fd6ce8f6 bellard
}
1391 fd6ce8f6 bellard
1392 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1393 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1394 6375e09e Stefan Weil
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1395 fd6ce8f6 bellard
{
1396 9fa3e853 bellard
    int m_min, m_max, m;
1397 8efe0ca8 Stefan Weil
    uintptr_t v;
1398 9fa3e853 bellard
    TranslationBlock *tb;
1399 a513fe19 bellard
1400 a513fe19 bellard
    if (nb_tbs <= 0)
1401 a513fe19 bellard
        return NULL;
1402 8efe0ca8 Stefan Weil
    if (tc_ptr < (uintptr_t)code_gen_buffer ||
1403 8efe0ca8 Stefan Weil
        tc_ptr >= (uintptr_t)code_gen_ptr) {
1404 a513fe19 bellard
        return NULL;
1405 8efe0ca8 Stefan Weil
    }
1406 a513fe19 bellard
    /* binary search (cf Knuth) */
1407 a513fe19 bellard
    m_min = 0;
1408 a513fe19 bellard
    m_max = nb_tbs - 1;
1409 a513fe19 bellard
    while (m_min <= m_max) {
1410 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1411 a513fe19 bellard
        tb = &tbs[m];
1412 8efe0ca8 Stefan Weil
        v = (uintptr_t)tb->tc_ptr;
1413 a513fe19 bellard
        if (v == tc_ptr)
1414 a513fe19 bellard
            return tb;
1415 a513fe19 bellard
        else if (tc_ptr < v) {
1416 a513fe19 bellard
            m_max = m - 1;
1417 a513fe19 bellard
        } else {
1418 a513fe19 bellard
            m_min = m + 1;
1419 a513fe19 bellard
        }
1420 5fafdf24 ths
    }
1421 a513fe19 bellard
    return &tbs[m_max];
1422 a513fe19 bellard
}
1423 7501267e bellard
1424 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1425 ea041c0e bellard
1426 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1427 ea041c0e bellard
{
1428 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1429 ea041c0e bellard
    unsigned int n1;
1430 ea041c0e bellard
1431 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1432 ea041c0e bellard
    if (tb1 != NULL) {
1433 ea041c0e bellard
        /* find head of list */
1434 ea041c0e bellard
        for(;;) {
1435 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1436 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1437 ea041c0e bellard
            if (n1 == 2)
1438 ea041c0e bellard
                break;
1439 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1440 ea041c0e bellard
        }
1441 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1442 ea041c0e bellard
        tb_next = tb1;
1443 ea041c0e bellard
1444 ea041c0e bellard
        /* remove tb from the jmp_first list */
1445 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1446 ea041c0e bellard
        for(;;) {
1447 ea041c0e bellard
            tb1 = *ptb;
1448 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1449 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1450 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1451 ea041c0e bellard
                break;
1452 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1453 ea041c0e bellard
        }
1454 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1455 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1456 3b46e624 ths
1457 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1458 ea041c0e bellard
        tb_reset_jump(tb, n);
1459 ea041c0e bellard
1460 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1461 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1462 ea041c0e bellard
    }
1463 ea041c0e bellard
}
1464 ea041c0e bellard
1465 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1466 ea041c0e bellard
{
1467 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1468 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1469 ea041c0e bellard
}
1470 ea041c0e bellard
1471 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1472 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1473 9349b4f9 Andreas Färber
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1474 94df27fd Paul Brook
{
1475 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1476 94df27fd Paul Brook
}
1477 94df27fd Paul Brook
#else
1478 1e7855a5 Max Filippov
void tb_invalidate_phys_addr(target_phys_addr_t addr)
1479 d720b93d bellard
{
1480 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1481 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1482 d720b93d bellard
1483 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
1484 f3705d53 Avi Kivity
    if (!(memory_region_is_ram(section->mr)
1485 f3705d53 Avi Kivity
          || (section->mr->rom_device && section->mr->readable))) {
1486 06ef3525 Avi Kivity
        return;
1487 06ef3525 Avi Kivity
    }
1488 f3705d53 Avi Kivity
    ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1489 cc5bea60 Blue Swirl
        + memory_region_section_addr(section, addr);
1490 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1491 d720b93d bellard
}
1492 1e7855a5 Max Filippov
1493 1e7855a5 Max Filippov
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1494 1e7855a5 Max Filippov
{
1495 1e7855a5 Max Filippov
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1496 1e7855a5 Max Filippov
}
1497 c27004ec bellard
#endif
1498 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1499 d720b93d bellard
1500 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1501 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1502 c527ee8f Paul Brook
1503 c527ee8f Paul Brook
{
1504 c527ee8f Paul Brook
}
1505 c527ee8f Paul Brook
1506 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1507 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1508 c527ee8f Paul Brook
{
1509 c527ee8f Paul Brook
    return -ENOSYS;
1510 c527ee8f Paul Brook
}
1511 c527ee8f Paul Brook
#else
1512 6658ffb8 pbrook
/* Add a watchpoint.  */
1513 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1514 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1515 6658ffb8 pbrook
{
1516 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1517 c0ce998e aliguori
    CPUWatchpoint *wp;
1518 6658ffb8 pbrook
1519 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1520 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
1521 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
1522 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1523 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1524 b4051334 aliguori
        return -EINVAL;
1525 b4051334 aliguori
    }
1526 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
1527 a1d1bb31 aliguori
1528 a1d1bb31 aliguori
    wp->vaddr = addr;
1529 b4051334 aliguori
    wp->len_mask = len_mask;
1530 a1d1bb31 aliguori
    wp->flags = flags;
1531 a1d1bb31 aliguori
1532 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1533 c0ce998e aliguori
    if (flags & BP_GDB)
1534 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1535 c0ce998e aliguori
    else
1536 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1537 6658ffb8 pbrook
1538 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1539 a1d1bb31 aliguori
1540 a1d1bb31 aliguori
    if (watchpoint)
1541 a1d1bb31 aliguori
        *watchpoint = wp;
1542 a1d1bb31 aliguori
    return 0;
1543 6658ffb8 pbrook
}
1544 6658ffb8 pbrook
1545 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1546 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1547 a1d1bb31 aliguori
                          int flags)
1548 6658ffb8 pbrook
{
1549 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1550 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1551 6658ffb8 pbrook
1552 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1553 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1554 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1555 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1556 6658ffb8 pbrook
            return 0;
1557 6658ffb8 pbrook
        }
1558 6658ffb8 pbrook
    }
1559 a1d1bb31 aliguori
    return -ENOENT;
1560 6658ffb8 pbrook
}
1561 6658ffb8 pbrook
1562 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1563 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1564 a1d1bb31 aliguori
{
1565 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1566 7d03f82f edgar_igl
1567 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1568 a1d1bb31 aliguori
1569 7267c094 Anthony Liguori
    g_free(watchpoint);
1570 a1d1bb31 aliguori
}
1571 a1d1bb31 aliguori
1572 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1573 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1574 a1d1bb31 aliguori
{
1575 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1576 a1d1bb31 aliguori
1577 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1578 a1d1bb31 aliguori
        if (wp->flags & mask)
1579 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1580 c0ce998e aliguori
    }
1581 7d03f82f edgar_igl
}
1582 c527ee8f Paul Brook
#endif
1583 7d03f82f edgar_igl
1584 a1d1bb31 aliguori
/* Add a breakpoint.  */
1585 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1586 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1587 4c3a88a2 bellard
{
1588 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1589 c0ce998e aliguori
    CPUBreakpoint *bp;
1590 3b46e624 ths
1591 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
1592 4c3a88a2 bellard
1593 a1d1bb31 aliguori
    bp->pc = pc;
1594 a1d1bb31 aliguori
    bp->flags = flags;
1595 a1d1bb31 aliguori
1596 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1597 c0ce998e aliguori
    if (flags & BP_GDB)
1598 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1599 c0ce998e aliguori
    else
1600 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1601 3b46e624 ths
1602 d720b93d bellard
    breakpoint_invalidate(env, pc);
1603 a1d1bb31 aliguori
1604 a1d1bb31 aliguori
    if (breakpoint)
1605 a1d1bb31 aliguori
        *breakpoint = bp;
1606 4c3a88a2 bellard
    return 0;
1607 4c3a88a2 bellard
#else
1608 a1d1bb31 aliguori
    return -ENOSYS;
1609 4c3a88a2 bellard
#endif
1610 4c3a88a2 bellard
}
1611 4c3a88a2 bellard
1612 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1613 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1614 a1d1bb31 aliguori
{
1615 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1616 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1617 a1d1bb31 aliguori
1618 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1619 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1620 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1621 a1d1bb31 aliguori
            return 0;
1622 a1d1bb31 aliguori
        }
1623 7d03f82f edgar_igl
    }
1624 a1d1bb31 aliguori
    return -ENOENT;
1625 a1d1bb31 aliguori
#else
1626 a1d1bb31 aliguori
    return -ENOSYS;
1627 7d03f82f edgar_igl
#endif
1628 7d03f82f edgar_igl
}
1629 7d03f82f edgar_igl
1630 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1631 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1632 4c3a88a2 bellard
{
1633 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1634 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1635 d720b93d bellard
1636 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1637 a1d1bb31 aliguori
1638 7267c094 Anthony Liguori
    g_free(breakpoint);
1639 a1d1bb31 aliguori
#endif
1640 a1d1bb31 aliguori
}
1641 a1d1bb31 aliguori
1642 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1643 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1644 a1d1bb31 aliguori
{
1645 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1646 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1647 a1d1bb31 aliguori
1648 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1649 a1d1bb31 aliguori
        if (bp->flags & mask)
1650 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1651 c0ce998e aliguori
    }
1652 4c3a88a2 bellard
#endif
1653 4c3a88a2 bellard
}
1654 4c3a88a2 bellard
1655 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1656 c33a346e bellard
   CPU loop after each instruction */
1657 9349b4f9 Andreas Färber
void cpu_single_step(CPUArchState *env, int enabled)
1658 c33a346e bellard
{
1659 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1660 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1661 c33a346e bellard
        env->singlestep_enabled = enabled;
1662 e22a25c9 aliguori
        if (kvm_enabled())
1663 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1664 e22a25c9 aliguori
        else {
1665 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1666 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1667 e22a25c9 aliguori
            tb_flush(env);
1668 e22a25c9 aliguori
        }
1669 c33a346e bellard
    }
1670 c33a346e bellard
#endif
1671 c33a346e bellard
}
1672 c33a346e bellard
1673 34865134 bellard
/* enable or disable low levels log */
1674 34865134 bellard
void cpu_set_log(int log_flags)
1675 34865134 bellard
{
1676 34865134 bellard
    loglevel = log_flags;
1677 34865134 bellard
    if (loglevel && !logfile) {
1678 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1679 34865134 bellard
        if (!logfile) {
1680 34865134 bellard
            perror(logfilename);
1681 34865134 bellard
            _exit(1);
1682 34865134 bellard
        }
1683 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1684 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1685 9fa3e853 bellard
        {
1686 b55266b5 blueswir1
            static char logfile_buf[4096];
1687 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1688 9fa3e853 bellard
        }
1689 daf767b1 Stefan Weil
#elif defined(_WIN32)
1690 daf767b1 Stefan Weil
        /* Win32 doesn't support line-buffering, so use unbuffered output. */
1691 daf767b1 Stefan Weil
        setvbuf(logfile, NULL, _IONBF, 0);
1692 daf767b1 Stefan Weil
#else
1693 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1694 9fa3e853 bellard
#endif
1695 e735b91c pbrook
        log_append = 1;
1696 e735b91c pbrook
    }
1697 e735b91c pbrook
    if (!loglevel && logfile) {
1698 e735b91c pbrook
        fclose(logfile);
1699 e735b91c pbrook
        logfile = NULL;
1700 34865134 bellard
    }
1701 34865134 bellard
}
1702 34865134 bellard
1703 34865134 bellard
void cpu_set_log_filename(const char *filename)
1704 34865134 bellard
{
1705 34865134 bellard
    logfilename = strdup(filename);
1706 e735b91c pbrook
    if (logfile) {
1707 e735b91c pbrook
        fclose(logfile);
1708 e735b91c pbrook
        logfile = NULL;
1709 e735b91c pbrook
    }
1710 e735b91c pbrook
    cpu_set_log(loglevel);
1711 34865134 bellard
}
1712 c33a346e bellard
1713 9349b4f9 Andreas Färber
static void cpu_unlink_tb(CPUArchState *env)
1714 ea041c0e bellard
{
1715 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1716 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1717 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1718 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1719 ea041c0e bellard
    TranslationBlock *tb;
1720 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1721 59817ccb bellard
1722 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1723 3098dba0 aurel32
    tb = env->current_tb;
1724 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1725 3098dba0 aurel32
       all the potentially executing TB */
1726 f76cfe56 Riku Voipio
    if (tb) {
1727 3098dba0 aurel32
        env->current_tb = NULL;
1728 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1729 be214e6c aurel32
    }
1730 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1731 3098dba0 aurel32
}
1732 3098dba0 aurel32
1733 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1734 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1735 9349b4f9 Andreas Färber
static void tcg_handle_interrupt(CPUArchState *env, int mask)
1736 3098dba0 aurel32
{
1737 3098dba0 aurel32
    int old_mask;
1738 be214e6c aurel32
1739 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1740 68a79315 bellard
    env->interrupt_request |= mask;
1741 3098dba0 aurel32
1742 8edac960 aliguori
    /*
1743 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1744 8edac960 aliguori
     * case its halted.
1745 8edac960 aliguori
     */
1746 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1747 8edac960 aliguori
        qemu_cpu_kick(env);
1748 8edac960 aliguori
        return;
1749 8edac960 aliguori
    }
1750 8edac960 aliguori
1751 2e70f6ef pbrook
    if (use_icount) {
1752 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1753 2e70f6ef pbrook
        if (!can_do_io(env)
1754 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1755 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1756 2e70f6ef pbrook
        }
1757 2e70f6ef pbrook
    } else {
1758 3098dba0 aurel32
        cpu_unlink_tb(env);
1759 ea041c0e bellard
    }
1760 ea041c0e bellard
}
1761 ea041c0e bellard
1762 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1763 ec6959d0 Jan Kiszka
1764 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1765 97ffbd8d Jan Kiszka
1766 9349b4f9 Andreas Färber
void cpu_interrupt(CPUArchState *env, int mask)
1767 97ffbd8d Jan Kiszka
{
1768 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1769 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1770 97ffbd8d Jan Kiszka
}
1771 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1772 97ffbd8d Jan Kiszka
1773 9349b4f9 Andreas Färber
void cpu_reset_interrupt(CPUArchState *env, int mask)
1774 b54ad049 bellard
{
1775 b54ad049 bellard
    env->interrupt_request &= ~mask;
1776 b54ad049 bellard
}
1777 b54ad049 bellard
1778 9349b4f9 Andreas Färber
void cpu_exit(CPUArchState *env)
1779 3098dba0 aurel32
{
1780 3098dba0 aurel32
    env->exit_request = 1;
1781 3098dba0 aurel32
    cpu_unlink_tb(env);
1782 3098dba0 aurel32
}
1783 3098dba0 aurel32
1784 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1785 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1786 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1787 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1788 f193c797 bellard
      "show target assembly code for each compiled TB" },
1789 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1790 57fec1fe bellard
      "show micro ops for each compiled TB" },
1791 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1792 e01a1157 blueswir1
      "show micro ops "
1793 e01a1157 blueswir1
#ifdef TARGET_I386
1794 e01a1157 blueswir1
      "before eflags optimization and "
1795 f193c797 bellard
#endif
1796 e01a1157 blueswir1
      "after liveness analysis" },
1797 f193c797 bellard
    { CPU_LOG_INT, "int",
1798 f193c797 bellard
      "show interrupts/exceptions in short format" },
1799 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1800 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1801 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1802 e91c8a77 ths
      "show CPU state before block translation" },
1803 f193c797 bellard
#ifdef TARGET_I386
1804 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1805 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1806 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1807 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1808 f193c797 bellard
#endif
1809 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1810 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1811 fd872598 bellard
      "show all i/o ports accesses" },
1812 8e3a9fd2 bellard
#endif
1813 f193c797 bellard
    { 0, NULL, NULL },
1814 f193c797 bellard
};
1815 f193c797 bellard
1816 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1817 f193c797 bellard
{
1818 f193c797 bellard
    if (strlen(s2) != n)
1819 f193c797 bellard
        return 0;
1820 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1821 f193c797 bellard
}
1822 3b46e624 ths
1823 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1824 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1825 f193c797 bellard
{
1826 c7cd6a37 blueswir1
    const CPULogItem *item;
1827 f193c797 bellard
    int mask;
1828 f193c797 bellard
    const char *p, *p1;
1829 f193c797 bellard
1830 f193c797 bellard
    p = str;
1831 f193c797 bellard
    mask = 0;
1832 f193c797 bellard
    for(;;) {
1833 f193c797 bellard
        p1 = strchr(p, ',');
1834 f193c797 bellard
        if (!p1)
1835 f193c797 bellard
            p1 = p + strlen(p);
1836 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1837 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1838 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1839 9742bf26 Yoshiaki Tamura
            }
1840 9742bf26 Yoshiaki Tamura
        } else {
1841 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1842 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1843 9742bf26 Yoshiaki Tamura
                    goto found;
1844 9742bf26 Yoshiaki Tamura
            }
1845 9742bf26 Yoshiaki Tamura
            return 0;
1846 f193c797 bellard
        }
1847 f193c797 bellard
    found:
1848 f193c797 bellard
        mask |= item->mask;
1849 f193c797 bellard
        if (*p1 != ',')
1850 f193c797 bellard
            break;
1851 f193c797 bellard
        p = p1 + 1;
1852 f193c797 bellard
    }
1853 f193c797 bellard
    return mask;
1854 f193c797 bellard
}
1855 ea041c0e bellard
1856 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
1857 7501267e bellard
{
1858 7501267e bellard
    va_list ap;
1859 493ae1f0 pbrook
    va_list ap2;
1860 7501267e bellard
1861 7501267e bellard
    va_start(ap, fmt);
1862 493ae1f0 pbrook
    va_copy(ap2, ap);
1863 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1864 7501267e bellard
    vfprintf(stderr, fmt, ap);
1865 7501267e bellard
    fprintf(stderr, "\n");
1866 7501267e bellard
#ifdef TARGET_I386
1867 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1868 7fe48483 bellard
#else
1869 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1870 7501267e bellard
#endif
1871 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1872 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1873 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1874 93fcfe39 aliguori
        qemu_log("\n");
1875 f9373291 j_mayer
#ifdef TARGET_I386
1876 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1877 f9373291 j_mayer
#else
1878 93fcfe39 aliguori
        log_cpu_state(env, 0);
1879 f9373291 j_mayer
#endif
1880 31b1a7b4 aliguori
        qemu_log_flush();
1881 93fcfe39 aliguori
        qemu_log_close();
1882 924edcae balrog
    }
1883 493ae1f0 pbrook
    va_end(ap2);
1884 f9373291 j_mayer
    va_end(ap);
1885 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1886 fd052bf6 Riku Voipio
    {
1887 fd052bf6 Riku Voipio
        struct sigaction act;
1888 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1889 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1890 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1891 fd052bf6 Riku Voipio
    }
1892 fd052bf6 Riku Voipio
#endif
1893 7501267e bellard
    abort();
1894 7501267e bellard
}
1895 7501267e bellard
1896 9349b4f9 Andreas Färber
CPUArchState *cpu_copy(CPUArchState *env)
1897 c5be9f08 ths
{
1898 9349b4f9 Andreas Färber
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
1899 9349b4f9 Andreas Färber
    CPUArchState *next_cpu = new_env->next_cpu;
1900 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1901 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1902 5a38f081 aliguori
    CPUBreakpoint *bp;
1903 5a38f081 aliguori
    CPUWatchpoint *wp;
1904 5a38f081 aliguori
#endif
1905 5a38f081 aliguori
1906 9349b4f9 Andreas Färber
    memcpy(new_env, env, sizeof(CPUArchState));
1907 5a38f081 aliguori
1908 5a38f081 aliguori
    /* Preserve chaining and index. */
1909 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1910 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1911 5a38f081 aliguori
1912 5a38f081 aliguori
    /* Clone all break/watchpoints.
1913 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1914 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1915 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1916 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1917 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1918 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1919 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1920 5a38f081 aliguori
    }
1921 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1922 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1923 5a38f081 aliguori
                              wp->flags, NULL);
1924 5a38f081 aliguori
    }
1925 5a38f081 aliguori
#endif
1926 5a38f081 aliguori
1927 c5be9f08 ths
    return new_env;
1928 c5be9f08 ths
}
1929 c5be9f08 ths
1930 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1931 0cac1b66 Blue Swirl
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1932 5c751e99 edgar_igl
{
1933 5c751e99 edgar_igl
    unsigned int i;
1934 5c751e99 edgar_igl
1935 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1936 5c751e99 edgar_igl
       overlap the flushed page.  */
1937 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1938 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1939 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1940 5c751e99 edgar_igl
1941 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1942 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1943 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1944 5c751e99 edgar_igl
}
1945 5c751e99 edgar_igl
1946 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1947 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1948 0a962c02 bellard
                                     int dirty_flags)
1949 1ccde1cb bellard
{
1950 8efe0ca8 Stefan Weil
    uintptr_t length, start1;
1951 1ccde1cb bellard
1952 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1953 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1954 1ccde1cb bellard
1955 1ccde1cb bellard
    length = end - start;
1956 1ccde1cb bellard
    if (length == 0)
1957 1ccde1cb bellard
        return;
1958 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1959 f23db169 bellard
1960 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1961 1ccde1cb bellard
       when accessing the range */
1962 8efe0ca8 Stefan Weil
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1963 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
1964 5579c7f3 pbrook
       address comparisons below.  */
1965 8efe0ca8 Stefan Weil
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1966 5579c7f3 pbrook
            != (end - 1) - start) {
1967 5579c7f3 pbrook
        abort();
1968 5579c7f3 pbrook
    }
1969 e5548617 Blue Swirl
    cpu_tlb_reset_dirty_all(start1, length);
1970 1ccde1cb bellard
}
1971 1ccde1cb bellard
1972 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1973 74576198 aliguori
{
1974 f6f3fbca Michael S. Tsirkin
    int ret = 0;
1975 74576198 aliguori
    in_migration = enable;
1976 f6f3fbca Michael S. Tsirkin
    return ret;
1977 74576198 aliguori
}
1978 74576198 aliguori
1979 e5548617 Blue Swirl
target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1980 e5548617 Blue Swirl
                                                   MemoryRegionSection *section,
1981 e5548617 Blue Swirl
                                                   target_ulong vaddr,
1982 e5548617 Blue Swirl
                                                   target_phys_addr_t paddr,
1983 e5548617 Blue Swirl
                                                   int prot,
1984 e5548617 Blue Swirl
                                                   target_ulong *address)
1985 e5548617 Blue Swirl
{
1986 e5548617 Blue Swirl
    target_phys_addr_t iotlb;
1987 e5548617 Blue Swirl
    CPUWatchpoint *wp;
1988 e5548617 Blue Swirl
1989 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
1990 e5548617 Blue Swirl
        /* Normal RAM.  */
1991 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1992 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, paddr);
1993 e5548617 Blue Swirl
        if (!section->readonly) {
1994 e5548617 Blue Swirl
            iotlb |= phys_section_notdirty;
1995 e5548617 Blue Swirl
        } else {
1996 e5548617 Blue Swirl
            iotlb |= phys_section_rom;
1997 e5548617 Blue Swirl
        }
1998 e5548617 Blue Swirl
    } else {
1999 e5548617 Blue Swirl
        /* IO handlers are currently passed a physical address.
2000 e5548617 Blue Swirl
           It would be nice to pass an offset from the base address
2001 e5548617 Blue Swirl
           of that region.  This would avoid having to special case RAM,
2002 e5548617 Blue Swirl
           and avoid full address decoding in every device.
2003 e5548617 Blue Swirl
           We can't use the high bits of pd for this because
2004 e5548617 Blue Swirl
           IO_MEM_ROMD uses these as a ram address.  */
2005 e5548617 Blue Swirl
        iotlb = section - phys_sections;
2006 cc5bea60 Blue Swirl
        iotlb += memory_region_section_addr(section, paddr);
2007 e5548617 Blue Swirl
    }
2008 e5548617 Blue Swirl
2009 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
2010 e5548617 Blue Swirl
       watchpoint trap routines.  */
2011 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2012 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2013 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
2014 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2015 e5548617 Blue Swirl
                iotlb = phys_section_watch + paddr;
2016 e5548617 Blue Swirl
                *address |= TLB_MMIO;
2017 e5548617 Blue Swirl
                break;
2018 e5548617 Blue Swirl
            }
2019 e5548617 Blue Swirl
        }
2020 e5548617 Blue Swirl
    }
2021 e5548617 Blue Swirl
2022 e5548617 Blue Swirl
    return iotlb;
2023 e5548617 Blue Swirl
}
2024 e5548617 Blue Swirl
2025 0124311e bellard
#else
2026 edf8e2af Mika Westerberg
/*
2027 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2028 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2029 edf8e2af Mika Westerberg
 */
2030 5cd2c5b6 Richard Henderson
2031 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2032 5cd2c5b6 Richard Henderson
{
2033 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2034 5cd2c5b6 Richard Henderson
    void *priv;
2035 8efe0ca8 Stefan Weil
    uintptr_t start;
2036 5cd2c5b6 Richard Henderson
    int prot;
2037 5cd2c5b6 Richard Henderson
};
2038 5cd2c5b6 Richard Henderson
2039 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2040 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2041 5cd2c5b6 Richard Henderson
{
2042 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2043 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2044 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2045 5cd2c5b6 Richard Henderson
            return rc;
2046 5cd2c5b6 Richard Henderson
        }
2047 5cd2c5b6 Richard Henderson
    }
2048 5cd2c5b6 Richard Henderson
2049 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2050 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2051 5cd2c5b6 Richard Henderson
2052 5cd2c5b6 Richard Henderson
    return 0;
2053 5cd2c5b6 Richard Henderson
}
2054 5cd2c5b6 Richard Henderson
2055 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2056 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2057 5cd2c5b6 Richard Henderson
{
2058 b480d9b7 Paul Brook
    abi_ulong pa;
2059 5cd2c5b6 Richard Henderson
    int i, rc;
2060 5cd2c5b6 Richard Henderson
2061 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2062 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2063 5cd2c5b6 Richard Henderson
    }
2064 5cd2c5b6 Richard Henderson
2065 5cd2c5b6 Richard Henderson
    if (level == 0) {
2066 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2067 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2068 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2069 5cd2c5b6 Richard Henderson
2070 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2071 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2072 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2073 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2074 5cd2c5b6 Richard Henderson
                    return rc;
2075 9fa3e853 bellard
                }
2076 9fa3e853 bellard
            }
2077 5cd2c5b6 Richard Henderson
        }
2078 5cd2c5b6 Richard Henderson
    } else {
2079 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2080 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2081 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2082 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2083 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2084 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2085 5cd2c5b6 Richard Henderson
                return rc;
2086 5cd2c5b6 Richard Henderson
            }
2087 5cd2c5b6 Richard Henderson
        }
2088 5cd2c5b6 Richard Henderson
    }
2089 5cd2c5b6 Richard Henderson
2090 5cd2c5b6 Richard Henderson
    return 0;
2091 5cd2c5b6 Richard Henderson
}
2092 5cd2c5b6 Richard Henderson
2093 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2094 5cd2c5b6 Richard Henderson
{
2095 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2096 8efe0ca8 Stefan Weil
    uintptr_t i;
2097 5cd2c5b6 Richard Henderson
2098 5cd2c5b6 Richard Henderson
    data.fn = fn;
2099 5cd2c5b6 Richard Henderson
    data.priv = priv;
2100 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2101 5cd2c5b6 Richard Henderson
    data.prot = 0;
2102 5cd2c5b6 Richard Henderson
2103 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2104 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2105 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2106 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2107 5cd2c5b6 Richard Henderson
            return rc;
2108 9fa3e853 bellard
        }
2109 33417e70 bellard
    }
2110 5cd2c5b6 Richard Henderson
2111 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2112 edf8e2af Mika Westerberg
}
2113 edf8e2af Mika Westerberg
2114 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2115 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2116 edf8e2af Mika Westerberg
{
2117 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2118 edf8e2af Mika Westerberg
2119 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2120 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2121 edf8e2af Mika Westerberg
        start, end, end - start,
2122 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2123 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2124 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2125 edf8e2af Mika Westerberg
2126 edf8e2af Mika Westerberg
    return (0);
2127 edf8e2af Mika Westerberg
}
2128 edf8e2af Mika Westerberg
2129 edf8e2af Mika Westerberg
/* dump memory mappings */
2130 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2131 edf8e2af Mika Westerberg
{
2132 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2133 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2134 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2135 33417e70 bellard
}
2136 33417e70 bellard
2137 53a5960a pbrook
int page_get_flags(target_ulong address)
2138 33417e70 bellard
{
2139 9fa3e853 bellard
    PageDesc *p;
2140 9fa3e853 bellard
2141 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2142 33417e70 bellard
    if (!p)
2143 9fa3e853 bellard
        return 0;
2144 9fa3e853 bellard
    return p->flags;
2145 9fa3e853 bellard
}
2146 9fa3e853 bellard
2147 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2148 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2149 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2150 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2151 9fa3e853 bellard
{
2152 376a7909 Richard Henderson
    target_ulong addr, len;
2153 376a7909 Richard Henderson
2154 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2155 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2156 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2157 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2158 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2159 376a7909 Richard Henderson
#endif
2160 376a7909 Richard Henderson
    assert(start < end);
2161 9fa3e853 bellard
2162 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2163 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2164 376a7909 Richard Henderson
2165 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2166 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2167 376a7909 Richard Henderson
    }
2168 376a7909 Richard Henderson
2169 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2170 376a7909 Richard Henderson
         len != 0;
2171 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2172 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2173 376a7909 Richard Henderson
2174 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2175 376a7909 Richard Henderson
           the code inside.  */
2176 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2177 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2178 9fa3e853 bellard
            p->first_tb) {
2179 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2180 9fa3e853 bellard
        }
2181 9fa3e853 bellard
        p->flags = flags;
2182 9fa3e853 bellard
    }
2183 33417e70 bellard
}
2184 33417e70 bellard
2185 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2186 3d97b40b ths
{
2187 3d97b40b ths
    PageDesc *p;
2188 3d97b40b ths
    target_ulong end;
2189 3d97b40b ths
    target_ulong addr;
2190 3d97b40b ths
2191 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2192 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2193 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2194 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2195 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2196 376a7909 Richard Henderson
#endif
2197 376a7909 Richard Henderson
2198 3e0650a9 Richard Henderson
    if (len == 0) {
2199 3e0650a9 Richard Henderson
        return 0;
2200 3e0650a9 Richard Henderson
    }
2201 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2202 376a7909 Richard Henderson
        /* We've wrapped around.  */
2203 55f280c9 balrog
        return -1;
2204 376a7909 Richard Henderson
    }
2205 55f280c9 balrog
2206 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2207 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2208 3d97b40b ths
2209 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2210 376a7909 Richard Henderson
         len != 0;
2211 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2212 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2213 3d97b40b ths
        if( !p )
2214 3d97b40b ths
            return -1;
2215 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2216 3d97b40b ths
            return -1;
2217 3d97b40b ths
2218 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2219 3d97b40b ths
            return -1;
2220 dae3270c bellard
        if (flags & PAGE_WRITE) {
2221 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2222 dae3270c bellard
                return -1;
2223 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2224 dae3270c bellard
               contains translated code */
2225 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2226 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2227 dae3270c bellard
                    return -1;
2228 dae3270c bellard
            }
2229 dae3270c bellard
            return 0;
2230 dae3270c bellard
        }
2231 3d97b40b ths
    }
2232 3d97b40b ths
    return 0;
2233 3d97b40b ths
}
2234 3d97b40b ths
2235 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2236 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2237 6375e09e Stefan Weil
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2238 9fa3e853 bellard
{
2239 45d679d6 Aurelien Jarno
    unsigned int prot;
2240 45d679d6 Aurelien Jarno
    PageDesc *p;
2241 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2242 9fa3e853 bellard
2243 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2244 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2245 c8a706fe pbrook
       practice it seems to be ok.  */
2246 c8a706fe pbrook
    mmap_lock();
2247 c8a706fe pbrook
2248 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2249 45d679d6 Aurelien Jarno
    if (!p) {
2250 c8a706fe pbrook
        mmap_unlock();
2251 9fa3e853 bellard
        return 0;
2252 c8a706fe pbrook
    }
2253 45d679d6 Aurelien Jarno
2254 9fa3e853 bellard
    /* if the page was really writable, then we change its
2255 9fa3e853 bellard
       protection back to writable */
2256 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2257 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2258 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2259 45d679d6 Aurelien Jarno
2260 45d679d6 Aurelien Jarno
        prot = 0;
2261 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2262 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2263 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2264 45d679d6 Aurelien Jarno
            prot |= p->flags;
2265 45d679d6 Aurelien Jarno
2266 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2267 9fa3e853 bellard
               the corresponding translated code. */
2268 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2269 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2270 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2271 9fa3e853 bellard
#endif
2272 9fa3e853 bellard
        }
2273 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2274 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2275 45d679d6 Aurelien Jarno
2276 45d679d6 Aurelien Jarno
        mmap_unlock();
2277 45d679d6 Aurelien Jarno
        return 1;
2278 9fa3e853 bellard
    }
2279 c8a706fe pbrook
    mmap_unlock();
2280 9fa3e853 bellard
    return 0;
2281 9fa3e853 bellard
}
2282 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2283 9fa3e853 bellard
2284 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2285 8da3ff18 pbrook
2286 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2287 c04b2b78 Paul Brook
typedef struct subpage_t {
2288 70c68e44 Avi Kivity
    MemoryRegion iomem;
2289 c04b2b78 Paul Brook
    target_phys_addr_t base;
2290 5312bd8b Avi Kivity
    uint16_t sub_section[TARGET_PAGE_SIZE];
2291 c04b2b78 Paul Brook
} subpage_t;
2292 c04b2b78 Paul Brook
2293 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2294 5312bd8b Avi Kivity
                             uint16_t section);
2295 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base);
2296 5312bd8b Avi Kivity
static void destroy_page_desc(uint16_t section_index)
2297 54688b1e Avi Kivity
{
2298 5312bd8b Avi Kivity
    MemoryRegionSection *section = &phys_sections[section_index];
2299 5312bd8b Avi Kivity
    MemoryRegion *mr = section->mr;
2300 54688b1e Avi Kivity
2301 54688b1e Avi Kivity
    if (mr->subpage) {
2302 54688b1e Avi Kivity
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
2303 54688b1e Avi Kivity
        memory_region_destroy(&subpage->iomem);
2304 54688b1e Avi Kivity
        g_free(subpage);
2305 54688b1e Avi Kivity
    }
2306 54688b1e Avi Kivity
}
2307 54688b1e Avi Kivity
2308 4346ae3e Avi Kivity
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2309 54688b1e Avi Kivity
{
2310 54688b1e Avi Kivity
    unsigned i;
2311 d6f2ea22 Avi Kivity
    PhysPageEntry *p;
2312 54688b1e Avi Kivity
2313 c19e8800 Avi Kivity
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
2314 54688b1e Avi Kivity
        return;
2315 54688b1e Avi Kivity
    }
2316 54688b1e Avi Kivity
2317 c19e8800 Avi Kivity
    p = phys_map_nodes[lp->ptr];
2318 4346ae3e Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
2319 07f07b31 Avi Kivity
        if (!p[i].is_leaf) {
2320 54688b1e Avi Kivity
            destroy_l2_mapping(&p[i], level - 1);
2321 4346ae3e Avi Kivity
        } else {
2322 c19e8800 Avi Kivity
            destroy_page_desc(p[i].ptr);
2323 54688b1e Avi Kivity
        }
2324 54688b1e Avi Kivity
    }
2325 07f07b31 Avi Kivity
    lp->is_leaf = 0;
2326 c19e8800 Avi Kivity
    lp->ptr = PHYS_MAP_NODE_NIL;
2327 54688b1e Avi Kivity
}
2328 54688b1e Avi Kivity
2329 54688b1e Avi Kivity
static void destroy_all_mappings(void)
2330 54688b1e Avi Kivity
{
2331 3eef53df Avi Kivity
    destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2332 d6f2ea22 Avi Kivity
    phys_map_nodes_reset();
2333 54688b1e Avi Kivity
}
2334 54688b1e Avi Kivity
2335 5312bd8b Avi Kivity
static uint16_t phys_section_add(MemoryRegionSection *section)
2336 5312bd8b Avi Kivity
{
2337 5312bd8b Avi Kivity
    if (phys_sections_nb == phys_sections_nb_alloc) {
2338 5312bd8b Avi Kivity
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2339 5312bd8b Avi Kivity
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
2340 5312bd8b Avi Kivity
                                phys_sections_nb_alloc);
2341 5312bd8b Avi Kivity
    }
2342 5312bd8b Avi Kivity
    phys_sections[phys_sections_nb] = *section;
2343 5312bd8b Avi Kivity
    return phys_sections_nb++;
2344 5312bd8b Avi Kivity
}
2345 5312bd8b Avi Kivity
2346 5312bd8b Avi Kivity
static void phys_sections_clear(void)
2347 5312bd8b Avi Kivity
{
2348 5312bd8b Avi Kivity
    phys_sections_nb = 0;
2349 5312bd8b Avi Kivity
}
2350 5312bd8b Avi Kivity
2351 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2352 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2353 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2354 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2355 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2356 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2357 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2358 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2359 0f0cb164 Avi Kivity
static void register_subpage(MemoryRegionSection *section)
2360 0f0cb164 Avi Kivity
{
2361 0f0cb164 Avi Kivity
    subpage_t *subpage;
2362 0f0cb164 Avi Kivity
    target_phys_addr_t base = section->offset_within_address_space
2363 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
2364 f3705d53 Avi Kivity
    MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2365 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
2366 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
2367 0f0cb164 Avi Kivity
        .size = TARGET_PAGE_SIZE,
2368 0f0cb164 Avi Kivity
    };
2369 0f0cb164 Avi Kivity
    target_phys_addr_t start, end;
2370 0f0cb164 Avi Kivity
2371 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2372 0f0cb164 Avi Kivity
2373 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
2374 0f0cb164 Avi Kivity
        subpage = subpage_init(base);
2375 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
2376 2999097b Avi Kivity
        phys_page_set(base >> TARGET_PAGE_BITS, 1,
2377 2999097b Avi Kivity
                      phys_section_add(&subsection));
2378 0f0cb164 Avi Kivity
    } else {
2379 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
2380 0f0cb164 Avi Kivity
    }
2381 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2382 0f0cb164 Avi Kivity
    end = start + section->size;
2383 0f0cb164 Avi Kivity
    subpage_register(subpage, start, end, phys_section_add(section));
2384 0f0cb164 Avi Kivity
}
2385 0f0cb164 Avi Kivity
2386 0f0cb164 Avi Kivity
2387 0f0cb164 Avi Kivity
static void register_multipage(MemoryRegionSection *section)
2388 33417e70 bellard
{
2389 dd81124b Avi Kivity
    target_phys_addr_t start_addr = section->offset_within_address_space;
2390 dd81124b Avi Kivity
    ram_addr_t size = section->size;
2391 2999097b Avi Kivity
    target_phys_addr_t addr;
2392 5312bd8b Avi Kivity
    uint16_t section_index = phys_section_add(section);
2393 dd81124b Avi Kivity
2394 3b8e6a2d Edgar E. Iglesias
    assert(size);
2395 f6f3fbca Michael S. Tsirkin
2396 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2397 2999097b Avi Kivity
    phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2398 2999097b Avi Kivity
                  section_index);
2399 33417e70 bellard
}
2400 33417e70 bellard
2401 0f0cb164 Avi Kivity
void cpu_register_physical_memory_log(MemoryRegionSection *section,
2402 0f0cb164 Avi Kivity
                                      bool readonly)
2403 0f0cb164 Avi Kivity
{
2404 0f0cb164 Avi Kivity
    MemoryRegionSection now = *section, remain = *section;
2405 0f0cb164 Avi Kivity
2406 0f0cb164 Avi Kivity
    if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2407 0f0cb164 Avi Kivity
        || (now.size < TARGET_PAGE_SIZE)) {
2408 0f0cb164 Avi Kivity
        now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2409 0f0cb164 Avi Kivity
                       - now.offset_within_address_space,
2410 0f0cb164 Avi Kivity
                       now.size);
2411 0f0cb164 Avi Kivity
        register_subpage(&now);
2412 0f0cb164 Avi Kivity
        remain.size -= now.size;
2413 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2414 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2415 0f0cb164 Avi Kivity
    }
2416 0f0cb164 Avi Kivity
    now = remain;
2417 0f0cb164 Avi Kivity
    now.size &= TARGET_PAGE_MASK;
2418 0f0cb164 Avi Kivity
    if (now.size) {
2419 0f0cb164 Avi Kivity
        register_multipage(&now);
2420 0f0cb164 Avi Kivity
        remain.size -= now.size;
2421 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2422 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2423 0f0cb164 Avi Kivity
    }
2424 0f0cb164 Avi Kivity
    now = remain;
2425 0f0cb164 Avi Kivity
    if (now.size) {
2426 0f0cb164 Avi Kivity
        register_subpage(&now);
2427 0f0cb164 Avi Kivity
    }
2428 0f0cb164 Avi Kivity
}
2429 0f0cb164 Avi Kivity
2430 0f0cb164 Avi Kivity
2431 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2432 f65ed4c1 aliguori
{
2433 f65ed4c1 aliguori
    if (kvm_enabled())
2434 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2435 f65ed4c1 aliguori
}
2436 f65ed4c1 aliguori
2437 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2438 f65ed4c1 aliguori
{
2439 f65ed4c1 aliguori
    if (kvm_enabled())
2440 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2441 f65ed4c1 aliguori
}
2442 f65ed4c1 aliguori
2443 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2444 62a2744c Sheng Yang
{
2445 62a2744c Sheng Yang
    if (kvm_enabled())
2446 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2447 62a2744c Sheng Yang
}
2448 62a2744c Sheng Yang
2449 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2450 c902760f Marcelo Tosatti
2451 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2452 c902760f Marcelo Tosatti
2453 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2454 c902760f Marcelo Tosatti
2455 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2456 c902760f Marcelo Tosatti
{
2457 c902760f Marcelo Tosatti
    struct statfs fs;
2458 c902760f Marcelo Tosatti
    int ret;
2459 c902760f Marcelo Tosatti
2460 c902760f Marcelo Tosatti
    do {
2461 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2462 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2463 c902760f Marcelo Tosatti
2464 c902760f Marcelo Tosatti
    if (ret != 0) {
2465 9742bf26 Yoshiaki Tamura
        perror(path);
2466 9742bf26 Yoshiaki Tamura
        return 0;
2467 c902760f Marcelo Tosatti
    }
2468 c902760f Marcelo Tosatti
2469 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2470 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2471 c902760f Marcelo Tosatti
2472 c902760f Marcelo Tosatti
    return fs.f_bsize;
2473 c902760f Marcelo Tosatti
}
2474 c902760f Marcelo Tosatti
2475 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2476 04b16653 Alex Williamson
                            ram_addr_t memory,
2477 04b16653 Alex Williamson
                            const char *path)
2478 c902760f Marcelo Tosatti
{
2479 c902760f Marcelo Tosatti
    char *filename;
2480 c902760f Marcelo Tosatti
    void *area;
2481 c902760f Marcelo Tosatti
    int fd;
2482 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2483 c902760f Marcelo Tosatti
    int flags;
2484 c902760f Marcelo Tosatti
#endif
2485 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2486 c902760f Marcelo Tosatti
2487 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2488 c902760f Marcelo Tosatti
    if (!hpagesize) {
2489 9742bf26 Yoshiaki Tamura
        return NULL;
2490 c902760f Marcelo Tosatti
    }
2491 c902760f Marcelo Tosatti
2492 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2493 c902760f Marcelo Tosatti
        return NULL;
2494 c902760f Marcelo Tosatti
    }
2495 c902760f Marcelo Tosatti
2496 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2497 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2498 c902760f Marcelo Tosatti
        return NULL;
2499 c902760f Marcelo Tosatti
    }
2500 c902760f Marcelo Tosatti
2501 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2502 9742bf26 Yoshiaki Tamura
        return NULL;
2503 c902760f Marcelo Tosatti
    }
2504 c902760f Marcelo Tosatti
2505 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2506 c902760f Marcelo Tosatti
    if (fd < 0) {
2507 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2508 9742bf26 Yoshiaki Tamura
        free(filename);
2509 9742bf26 Yoshiaki Tamura
        return NULL;
2510 c902760f Marcelo Tosatti
    }
2511 c902760f Marcelo Tosatti
    unlink(filename);
2512 c902760f Marcelo Tosatti
    free(filename);
2513 c902760f Marcelo Tosatti
2514 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2515 c902760f Marcelo Tosatti
2516 c902760f Marcelo Tosatti
    /*
2517 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2518 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2519 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2520 c902760f Marcelo Tosatti
     * mmap will fail.
2521 c902760f Marcelo Tosatti
     */
2522 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2523 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2524 c902760f Marcelo Tosatti
2525 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2526 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2527 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2528 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2529 c902760f Marcelo Tosatti
     */
2530 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2531 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2532 c902760f Marcelo Tosatti
#else
2533 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2534 c902760f Marcelo Tosatti
#endif
2535 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2536 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2537 9742bf26 Yoshiaki Tamura
        close(fd);
2538 9742bf26 Yoshiaki Tamura
        return (NULL);
2539 c902760f Marcelo Tosatti
    }
2540 04b16653 Alex Williamson
    block->fd = fd;
2541 c902760f Marcelo Tosatti
    return area;
2542 c902760f Marcelo Tosatti
}
2543 c902760f Marcelo Tosatti
#endif
2544 c902760f Marcelo Tosatti
2545 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2546 d17b5288 Alex Williamson
{
2547 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2548 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2549 04b16653 Alex Williamson
2550 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2551 04b16653 Alex Williamson
        return 0;
2552 04b16653 Alex Williamson
2553 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2554 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
2555 04b16653 Alex Williamson
2556 04b16653 Alex Williamson
        end = block->offset + block->length;
2557 04b16653 Alex Williamson
2558 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2559 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2560 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2561 04b16653 Alex Williamson
            }
2562 04b16653 Alex Williamson
        }
2563 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2564 3e837b2c Alex Williamson
            offset = end;
2565 04b16653 Alex Williamson
            mingap = next - end;
2566 04b16653 Alex Williamson
        }
2567 04b16653 Alex Williamson
    }
2568 3e837b2c Alex Williamson
2569 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
2570 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2571 3e837b2c Alex Williamson
                (uint64_t)size);
2572 3e837b2c Alex Williamson
        abort();
2573 3e837b2c Alex Williamson
    }
2574 3e837b2c Alex Williamson
2575 04b16653 Alex Williamson
    return offset;
2576 04b16653 Alex Williamson
}
2577 04b16653 Alex Williamson
2578 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2579 04b16653 Alex Williamson
{
2580 d17b5288 Alex Williamson
    RAMBlock *block;
2581 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2582 d17b5288 Alex Williamson
2583 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2584 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2585 d17b5288 Alex Williamson
2586 d17b5288 Alex Williamson
    return last;
2587 d17b5288 Alex Williamson
}
2588 d17b5288 Alex Williamson
2589 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2590 84b89d78 Cam Macdonell
{
2591 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2592 84b89d78 Cam Macdonell
2593 c5705a77 Avi Kivity
    new_block = NULL;
2594 c5705a77 Avi Kivity
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2595 c5705a77 Avi Kivity
        if (block->offset == addr) {
2596 c5705a77 Avi Kivity
            new_block = block;
2597 c5705a77 Avi Kivity
            break;
2598 c5705a77 Avi Kivity
        }
2599 c5705a77 Avi Kivity
    }
2600 c5705a77 Avi Kivity
    assert(new_block);
2601 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
2602 84b89d78 Cam Macdonell
2603 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2604 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2605 84b89d78 Cam Macdonell
        if (id) {
2606 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2607 7267c094 Anthony Liguori
            g_free(id);
2608 84b89d78 Cam Macdonell
        }
2609 84b89d78 Cam Macdonell
    }
2610 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2611 84b89d78 Cam Macdonell
2612 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2613 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2614 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2615 84b89d78 Cam Macdonell
                    new_block->idstr);
2616 84b89d78 Cam Macdonell
            abort();
2617 84b89d78 Cam Macdonell
        }
2618 84b89d78 Cam Macdonell
    }
2619 c5705a77 Avi Kivity
}
2620 c5705a77 Avi Kivity
2621 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2622 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
2623 c5705a77 Avi Kivity
{
2624 c5705a77 Avi Kivity
    RAMBlock *new_block;
2625 c5705a77 Avi Kivity
2626 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
2627 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
2628 84b89d78 Cam Macdonell
2629 7c637366 Avi Kivity
    new_block->mr = mr;
2630 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2631 6977dfe6 Yoshiaki Tamura
    if (host) {
2632 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2633 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2634 6977dfe6 Yoshiaki Tamura
    } else {
2635 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2636 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2637 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2638 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2639 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2640 e78815a5 Andreas Färber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2641 6977dfe6 Yoshiaki Tamura
            }
2642 c902760f Marcelo Tosatti
#else
2643 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2644 6977dfe6 Yoshiaki Tamura
            exit(1);
2645 c902760f Marcelo Tosatti
#endif
2646 6977dfe6 Yoshiaki Tamura
        } else {
2647 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2648 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2649 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2650 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2651 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2652 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2653 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2654 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2655 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2656 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2657 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2658 fb8b2735 Alexander Graf
                abort();
2659 fb8b2735 Alexander Graf
            }
2660 6b02494d Alexander Graf
#else
2661 868bb33f Jan Kiszka
            if (xen_enabled()) {
2662 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
2663 432d268c Jun Nakajima
            } else {
2664 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2665 432d268c Jun Nakajima
            }
2666 6b02494d Alexander Graf
#endif
2667 e78815a5 Andreas Färber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2668 6977dfe6 Yoshiaki Tamura
        }
2669 c902760f Marcelo Tosatti
    }
2670 94a6b54f pbrook
    new_block->length = size;
2671 94a6b54f pbrook
2672 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2673 94a6b54f pbrook
2674 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2675 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2676 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2677 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2678 94a6b54f pbrook
2679 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2680 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2681 6f0437e8 Jan Kiszka
2682 94a6b54f pbrook
    return new_block->offset;
2683 94a6b54f pbrook
}
2684 e9a1ab19 bellard
2685 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2686 6977dfe6 Yoshiaki Tamura
{
2687 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
2688 6977dfe6 Yoshiaki Tamura
}
2689 6977dfe6 Yoshiaki Tamura
2690 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2691 1f2e98b6 Alex Williamson
{
2692 1f2e98b6 Alex Williamson
    RAMBlock *block;
2693 1f2e98b6 Alex Williamson
2694 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2695 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2696 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2697 7267c094 Anthony Liguori
            g_free(block);
2698 1f2e98b6 Alex Williamson
            return;
2699 1f2e98b6 Alex Williamson
        }
2700 1f2e98b6 Alex Williamson
    }
2701 1f2e98b6 Alex Williamson
}
2702 1f2e98b6 Alex Williamson
2703 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2704 e9a1ab19 bellard
{
2705 04b16653 Alex Williamson
    RAMBlock *block;
2706 04b16653 Alex Williamson
2707 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2708 04b16653 Alex Williamson
        if (addr == block->offset) {
2709 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2710 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2711 cd19cfa2 Huang Ying
                ;
2712 cd19cfa2 Huang Ying
            } else if (mem_path) {
2713 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2714 04b16653 Alex Williamson
                if (block->fd) {
2715 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2716 04b16653 Alex Williamson
                    close(block->fd);
2717 04b16653 Alex Williamson
                } else {
2718 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2719 04b16653 Alex Williamson
                }
2720 fd28aa13 Jan Kiszka
#else
2721 fd28aa13 Jan Kiszka
                abort();
2722 04b16653 Alex Williamson
#endif
2723 04b16653 Alex Williamson
            } else {
2724 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2725 04b16653 Alex Williamson
                munmap(block->host, block->length);
2726 04b16653 Alex Williamson
#else
2727 868bb33f Jan Kiszka
                if (xen_enabled()) {
2728 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
2729 432d268c Jun Nakajima
                } else {
2730 432d268c Jun Nakajima
                    qemu_vfree(block->host);
2731 432d268c Jun Nakajima
                }
2732 04b16653 Alex Williamson
#endif
2733 04b16653 Alex Williamson
            }
2734 7267c094 Anthony Liguori
            g_free(block);
2735 04b16653 Alex Williamson
            return;
2736 04b16653 Alex Williamson
        }
2737 04b16653 Alex Williamson
    }
2738 04b16653 Alex Williamson
2739 e9a1ab19 bellard
}
2740 e9a1ab19 bellard
2741 cd19cfa2 Huang Ying
#ifndef _WIN32
2742 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2743 cd19cfa2 Huang Ying
{
2744 cd19cfa2 Huang Ying
    RAMBlock *block;
2745 cd19cfa2 Huang Ying
    ram_addr_t offset;
2746 cd19cfa2 Huang Ying
    int flags;
2747 cd19cfa2 Huang Ying
    void *area, *vaddr;
2748 cd19cfa2 Huang Ying
2749 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 cd19cfa2 Huang Ying
        offset = addr - block->offset;
2751 cd19cfa2 Huang Ying
        if (offset < block->length) {
2752 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
2753 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2754 cd19cfa2 Huang Ying
                ;
2755 cd19cfa2 Huang Ying
            } else {
2756 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
2757 cd19cfa2 Huang Ying
                munmap(vaddr, length);
2758 cd19cfa2 Huang Ying
                if (mem_path) {
2759 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
2760 cd19cfa2 Huang Ying
                    if (block->fd) {
2761 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
2762 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2763 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
2764 cd19cfa2 Huang Ying
#else
2765 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
2766 cd19cfa2 Huang Ying
#endif
2767 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2768 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
2769 cd19cfa2 Huang Ying
                    } else {
2770 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2771 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2772 cd19cfa2 Huang Ying
                                    flags, -1, 0);
2773 cd19cfa2 Huang Ying
                    }
2774 fd28aa13 Jan Kiszka
#else
2775 fd28aa13 Jan Kiszka
                    abort();
2776 cd19cfa2 Huang Ying
#endif
2777 cd19cfa2 Huang Ying
                } else {
2778 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2779 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2780 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2781 cd19cfa2 Huang Ying
                                flags, -1, 0);
2782 cd19cfa2 Huang Ying
#else
2783 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2784 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2785 cd19cfa2 Huang Ying
                                flags, -1, 0);
2786 cd19cfa2 Huang Ying
#endif
2787 cd19cfa2 Huang Ying
                }
2788 cd19cfa2 Huang Ying
                if (area != vaddr) {
2789 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
2790 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2791 cd19cfa2 Huang Ying
                            length, addr);
2792 cd19cfa2 Huang Ying
                    exit(1);
2793 cd19cfa2 Huang Ying
                }
2794 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2795 cd19cfa2 Huang Ying
            }
2796 cd19cfa2 Huang Ying
            return;
2797 cd19cfa2 Huang Ying
        }
2798 cd19cfa2 Huang Ying
    }
2799 cd19cfa2 Huang Ying
}
2800 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
2801 cd19cfa2 Huang Ying
2802 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2803 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2804 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2805 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2806 5579c7f3 pbrook

2807 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2808 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2809 5579c7f3 pbrook
 */
2810 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2811 dc828ca1 pbrook
{
2812 94a6b54f pbrook
    RAMBlock *block;
2813 94a6b54f pbrook
2814 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2815 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2816 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
2817 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
2818 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
2819 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2820 7d82af38 Vincent Palatin
            }
2821 868bb33f Jan Kiszka
            if (xen_enabled()) {
2822 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
2823 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
2824 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
2825 432d268c Jun Nakajima
                 */
2826 432d268c Jun Nakajima
                if (block->offset == 0) {
2827 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
2828 432d268c Jun Nakajima
                } else if (block->host == NULL) {
2829 e41d7c69 Jan Kiszka
                    block->host =
2830 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
2831 432d268c Jun Nakajima
                }
2832 432d268c Jun Nakajima
            }
2833 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2834 f471a17e Alex Williamson
        }
2835 94a6b54f pbrook
    }
2836 f471a17e Alex Williamson
2837 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2838 f471a17e Alex Williamson
    abort();
2839 f471a17e Alex Williamson
2840 f471a17e Alex Williamson
    return NULL;
2841 dc828ca1 pbrook
}
2842 dc828ca1 pbrook
2843 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2844 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2845 b2e0a138 Michael S. Tsirkin
 */
2846 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
2847 b2e0a138 Michael S. Tsirkin
{
2848 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
2849 b2e0a138 Michael S. Tsirkin
2850 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2851 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
2852 868bb33f Jan Kiszka
            if (xen_enabled()) {
2853 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
2854 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
2855 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
2856 432d268c Jun Nakajima
                 */
2857 432d268c Jun Nakajima
                if (block->offset == 0) {
2858 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
2859 432d268c Jun Nakajima
                } else if (block->host == NULL) {
2860 e41d7c69 Jan Kiszka
                    block->host =
2861 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
2862 432d268c Jun Nakajima
                }
2863 432d268c Jun Nakajima
            }
2864 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
2865 b2e0a138 Michael S. Tsirkin
        }
2866 b2e0a138 Michael S. Tsirkin
    }
2867 b2e0a138 Michael S. Tsirkin
2868 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2869 b2e0a138 Michael S. Tsirkin
    abort();
2870 b2e0a138 Michael S. Tsirkin
2871 b2e0a138 Michael S. Tsirkin
    return NULL;
2872 b2e0a138 Michael S. Tsirkin
}
2873 b2e0a138 Michael S. Tsirkin
2874 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2875 38bee5dc Stefano Stabellini
 * but takes a size argument */
2876 8ab934f9 Stefano Stabellini
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
2877 38bee5dc Stefano Stabellini
{
2878 8ab934f9 Stefano Stabellini
    if (*size == 0) {
2879 8ab934f9 Stefano Stabellini
        return NULL;
2880 8ab934f9 Stefano Stabellini
    }
2881 868bb33f Jan Kiszka
    if (xen_enabled()) {
2882 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
2883 868bb33f Jan Kiszka
    } else {
2884 38bee5dc Stefano Stabellini
        RAMBlock *block;
2885 38bee5dc Stefano Stabellini
2886 38bee5dc Stefano Stabellini
        QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
2888 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
2889 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
2890 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
2891 38bee5dc Stefano Stabellini
            }
2892 38bee5dc Stefano Stabellini
        }
2893 38bee5dc Stefano Stabellini
2894 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2895 38bee5dc Stefano Stabellini
        abort();
2896 38bee5dc Stefano Stabellini
    }
2897 38bee5dc Stefano Stabellini
}
2898 38bee5dc Stefano Stabellini
2899 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
2900 050a0ddf Anthony PERARD
{
2901 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
2902 050a0ddf Anthony PERARD
}
2903 050a0ddf Anthony PERARD
2904 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2905 5579c7f3 pbrook
{
2906 94a6b54f pbrook
    RAMBlock *block;
2907 94a6b54f pbrook
    uint8_t *host = ptr;
2908 94a6b54f pbrook
2909 868bb33f Jan Kiszka
    if (xen_enabled()) {
2910 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
2911 712c2b41 Stefano Stabellini
        return 0;
2912 712c2b41 Stefano Stabellini
    }
2913 712c2b41 Stefano Stabellini
2914 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2915 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
2916 432d268c Jun Nakajima
        if (block->host == NULL) {
2917 432d268c Jun Nakajima
            continue;
2918 432d268c Jun Nakajima
        }
2919 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2920 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
2921 e890261f Marcelo Tosatti
            return 0;
2922 f471a17e Alex Williamson
        }
2923 94a6b54f pbrook
    }
2924 432d268c Jun Nakajima
2925 e890261f Marcelo Tosatti
    return -1;
2926 e890261f Marcelo Tosatti
}
2927 f471a17e Alex Williamson
2928 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
2929 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
2930 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2931 e890261f Marcelo Tosatti
{
2932 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
2933 f471a17e Alex Williamson
2934 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2935 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2936 e890261f Marcelo Tosatti
        abort();
2937 e890261f Marcelo Tosatti
    }
2938 e890261f Marcelo Tosatti
    return ram_addr;
2939 5579c7f3 pbrook
}
2940 5579c7f3 pbrook
2941 0e0df1e2 Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2942 0e0df1e2 Avi Kivity
                                    unsigned size)
2943 e18231a3 blueswir1
{
2944 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2945 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2946 e18231a3 blueswir1
#endif
2947 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2948 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2949 e18231a3 blueswir1
#endif
2950 e18231a3 blueswir1
    return 0;
2951 e18231a3 blueswir1
}
2952 e18231a3 blueswir1
2953 0e0df1e2 Avi Kivity
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2954 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
2955 e18231a3 blueswir1
{
2956 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2957 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2958 e18231a3 blueswir1
#endif
2959 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2960 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2961 b4f0a316 blueswir1
#endif
2962 33417e70 bellard
}
2963 33417e70 bellard
2964 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
2965 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
2966 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
2967 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2968 0e0df1e2 Avi Kivity
};
2969 e18231a3 blueswir1
2970 0e0df1e2 Avi Kivity
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2971 0e0df1e2 Avi Kivity
                               unsigned size)
2972 e18231a3 blueswir1
{
2973 0e0df1e2 Avi Kivity
    abort();
2974 e18231a3 blueswir1
}
2975 e18231a3 blueswir1
2976 0e0df1e2 Avi Kivity
static void error_mem_write(void *opaque, target_phys_addr_t addr,
2977 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
2978 e18231a3 blueswir1
{
2979 0e0df1e2 Avi Kivity
    abort();
2980 33417e70 bellard
}
2981 33417e70 bellard
2982 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
2983 0e0df1e2 Avi Kivity
    .read = error_mem_read,
2984 0e0df1e2 Avi Kivity
    .write = error_mem_write,
2985 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2986 33417e70 bellard
};
2987 33417e70 bellard
2988 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
2989 0e0df1e2 Avi Kivity
    .read = error_mem_read,
2990 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
2991 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
2992 33417e70 bellard
};
2993 33417e70 bellard
2994 0e0df1e2 Avi Kivity
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2995 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
2996 9fa3e853 bellard
{
2997 3a7d929e bellard
    int dirty_flags;
2998 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2999 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3000 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3001 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
3002 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3003 9fa3e853 bellard
#endif
3004 3a7d929e bellard
    }
3005 0e0df1e2 Avi Kivity
    switch (size) {
3006 0e0df1e2 Avi Kivity
    case 1:
3007 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
3008 0e0df1e2 Avi Kivity
        break;
3009 0e0df1e2 Avi Kivity
    case 2:
3010 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
3011 0e0df1e2 Avi Kivity
        break;
3012 0e0df1e2 Avi Kivity
    case 4:
3013 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
3014 0e0df1e2 Avi Kivity
        break;
3015 0e0df1e2 Avi Kivity
    default:
3016 0e0df1e2 Avi Kivity
        abort();
3017 3a7d929e bellard
    }
3018 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3019 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3020 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3021 f23db169 bellard
       flushed */
3022 f23db169 bellard
    if (dirty_flags == 0xff)
3023 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3024 9fa3e853 bellard
}
3025 9fa3e853 bellard
3026 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
3027 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3028 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
3029 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3030 1ccde1cb bellard
};
3031 1ccde1cb bellard
3032 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3033 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3034 0f459d16 pbrook
{
3035 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
3036 06d55cc1 aliguori
    target_ulong pc, cs_base;
3037 06d55cc1 aliguori
    TranslationBlock *tb;
3038 0f459d16 pbrook
    target_ulong vaddr;
3039 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3040 06d55cc1 aliguori
    int cpu_flags;
3041 0f459d16 pbrook
3042 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3043 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3044 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3045 06d55cc1 aliguori
         * current instruction. */
3046 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3047 06d55cc1 aliguori
        return;
3048 06d55cc1 aliguori
    }
3049 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3050 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3051 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3052 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3053 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3054 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3055 6e140f28 aliguori
                env->watchpoint_hit = wp;
3056 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3057 6e140f28 aliguori
                if (!tb) {
3058 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3059 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3060 6e140f28 aliguori
                }
3061 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3062 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3063 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3064 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3065 488d6577 Max Filippov
                    cpu_loop_exit(env);
3066 6e140f28 aliguori
                } else {
3067 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3068 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3069 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
3070 6e140f28 aliguori
                }
3071 06d55cc1 aliguori
            }
3072 6e140f28 aliguori
        } else {
3073 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3074 0f459d16 pbrook
        }
3075 0f459d16 pbrook
    }
3076 0f459d16 pbrook
}
3077 0f459d16 pbrook
3078 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3079 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3080 6658ffb8 pbrook
   phys routines.  */
3081 1ec9b909 Avi Kivity
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3082 1ec9b909 Avi Kivity
                               unsigned size)
3083 6658ffb8 pbrook
{
3084 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3085 1ec9b909 Avi Kivity
    switch (size) {
3086 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
3087 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
3088 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
3089 1ec9b909 Avi Kivity
    default: abort();
3090 1ec9b909 Avi Kivity
    }
3091 6658ffb8 pbrook
}
3092 6658ffb8 pbrook
3093 1ec9b909 Avi Kivity
static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3094 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
3095 6658ffb8 pbrook
{
3096 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3097 1ec9b909 Avi Kivity
    switch (size) {
3098 67364150 Max Filippov
    case 1:
3099 67364150 Max Filippov
        stb_phys(addr, val);
3100 67364150 Max Filippov
        break;
3101 67364150 Max Filippov
    case 2:
3102 67364150 Max Filippov
        stw_phys(addr, val);
3103 67364150 Max Filippov
        break;
3104 67364150 Max Filippov
    case 4:
3105 67364150 Max Filippov
        stl_phys(addr, val);
3106 67364150 Max Filippov
        break;
3107 1ec9b909 Avi Kivity
    default: abort();
3108 1ec9b909 Avi Kivity
    }
3109 6658ffb8 pbrook
}
3110 6658ffb8 pbrook
3111 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
3112 1ec9b909 Avi Kivity
    .read = watch_mem_read,
3113 1ec9b909 Avi Kivity
    .write = watch_mem_write,
3114 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3115 6658ffb8 pbrook
};
3116 6658ffb8 pbrook
3117 70c68e44 Avi Kivity
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3118 70c68e44 Avi Kivity
                             unsigned len)
3119 db7b5426 blueswir1
{
3120 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3121 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3122 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3123 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3124 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3125 db7b5426 blueswir1
           mmio, len, addr, idx);
3126 db7b5426 blueswir1
#endif
3127 db7b5426 blueswir1
3128 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3129 5312bd8b Avi Kivity
    addr += mmio->base;
3130 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3131 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3132 37ec01d4 Avi Kivity
    return io_mem_read(section->mr, addr, len);
3133 db7b5426 blueswir1
}
3134 db7b5426 blueswir1
3135 70c68e44 Avi Kivity
static void subpage_write(void *opaque, target_phys_addr_t addr,
3136 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
3137 db7b5426 blueswir1
{
3138 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3139 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3140 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3141 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3142 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3143 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
3144 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3145 db7b5426 blueswir1
#endif
3146 f6405247 Richard Henderson
3147 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3148 5312bd8b Avi Kivity
    addr += mmio->base;
3149 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3150 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3151 37ec01d4 Avi Kivity
    io_mem_write(section->mr, addr, value, len);
3152 db7b5426 blueswir1
}
3153 db7b5426 blueswir1
3154 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
3155 70c68e44 Avi Kivity
    .read = subpage_read,
3156 70c68e44 Avi Kivity
    .write = subpage_write,
3157 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3158 db7b5426 blueswir1
};
3159 db7b5426 blueswir1
3160 de712f94 Avi Kivity
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3161 de712f94 Avi Kivity
                                 unsigned size)
3162 56384e8b Andreas Färber
{
3163 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3164 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3165 de712f94 Avi Kivity
    switch (size) {
3166 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
3167 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
3168 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
3169 de712f94 Avi Kivity
    default: abort();
3170 de712f94 Avi Kivity
    }
3171 56384e8b Andreas Färber
}
3172 56384e8b Andreas Färber
3173 de712f94 Avi Kivity
static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3174 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
3175 56384e8b Andreas Färber
{
3176 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3177 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3178 de712f94 Avi Kivity
    switch (size) {
3179 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
3180 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
3181 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
3182 de712f94 Avi Kivity
    default: abort();
3183 de712f94 Avi Kivity
    }
3184 56384e8b Andreas Färber
}
3185 56384e8b Andreas Färber
3186 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
3187 de712f94 Avi Kivity
    .read = subpage_ram_read,
3188 de712f94 Avi Kivity
    .write = subpage_ram_write,
3189 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3190 56384e8b Andreas Färber
};
3191 56384e8b Andreas Färber
3192 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3193 5312bd8b Avi Kivity
                             uint16_t section)
3194 db7b5426 blueswir1
{
3195 db7b5426 blueswir1
    int idx, eidx;
3196 db7b5426 blueswir1
3197 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3198 db7b5426 blueswir1
        return -1;
3199 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3200 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3201 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3202 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3203 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3204 db7b5426 blueswir1
#endif
3205 5312bd8b Avi Kivity
    if (memory_region_is_ram(phys_sections[section].mr)) {
3206 5312bd8b Avi Kivity
        MemoryRegionSection new_section = phys_sections[section];
3207 5312bd8b Avi Kivity
        new_section.mr = &io_mem_subpage_ram;
3208 5312bd8b Avi Kivity
        section = phys_section_add(&new_section);
3209 56384e8b Andreas Färber
    }
3210 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3211 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
3212 db7b5426 blueswir1
    }
3213 db7b5426 blueswir1
3214 db7b5426 blueswir1
    return 0;
3215 db7b5426 blueswir1
}
3216 db7b5426 blueswir1
3217 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base)
3218 db7b5426 blueswir1
{
3219 c227f099 Anthony Liguori
    subpage_t *mmio;
3220 db7b5426 blueswir1
3221 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
3222 1eec614b aliguori
3223 1eec614b aliguori
    mmio->base = base;
3224 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3225 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
3226 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
3227 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3228 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3229 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3230 db7b5426 blueswir1
#endif
3231 0f0cb164 Avi Kivity
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3232 db7b5426 blueswir1
3233 db7b5426 blueswir1
    return mmio;
3234 db7b5426 blueswir1
}
3235 db7b5426 blueswir1
3236 5312bd8b Avi Kivity
static uint16_t dummy_section(MemoryRegion *mr)
3237 5312bd8b Avi Kivity
{
3238 5312bd8b Avi Kivity
    MemoryRegionSection section = {
3239 5312bd8b Avi Kivity
        .mr = mr,
3240 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
3241 5312bd8b Avi Kivity
        .offset_within_region = 0,
3242 5312bd8b Avi Kivity
        .size = UINT64_MAX,
3243 5312bd8b Avi Kivity
    };
3244 5312bd8b Avi Kivity
3245 5312bd8b Avi Kivity
    return phys_section_add(&section);
3246 5312bd8b Avi Kivity
}
3247 5312bd8b Avi Kivity
3248 37ec01d4 Avi Kivity
MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3249 aa102231 Avi Kivity
{
3250 37ec01d4 Avi Kivity
    return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3251 aa102231 Avi Kivity
}
3252 aa102231 Avi Kivity
3253 e9179ce1 Avi Kivity
static void io_mem_init(void)
3254 e9179ce1 Avi Kivity
{
3255 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3256 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3257 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3258 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
3259 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3260 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
3261 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3262 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
3263 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3264 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
3265 e9179ce1 Avi Kivity
}
3266 e9179ce1 Avi Kivity
3267 50c1e149 Avi Kivity
static void core_begin(MemoryListener *listener)
3268 50c1e149 Avi Kivity
{
3269 54688b1e Avi Kivity
    destroy_all_mappings();
3270 5312bd8b Avi Kivity
    phys_sections_clear();
3271 c19e8800 Avi Kivity
    phys_map.ptr = PHYS_MAP_NODE_NIL;
3272 5312bd8b Avi Kivity
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
3273 aa102231 Avi Kivity
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
3274 aa102231 Avi Kivity
    phys_section_rom = dummy_section(&io_mem_rom);
3275 aa102231 Avi Kivity
    phys_section_watch = dummy_section(&io_mem_watch);
3276 50c1e149 Avi Kivity
}
3277 50c1e149 Avi Kivity
3278 50c1e149 Avi Kivity
static void core_commit(MemoryListener *listener)
3279 50c1e149 Avi Kivity
{
3280 9349b4f9 Andreas Färber
    CPUArchState *env;
3281 117712c3 Avi Kivity
3282 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
3283 117712c3 Avi Kivity
       reset the modified entries */
3284 117712c3 Avi Kivity
    /* XXX: slow ! */
3285 117712c3 Avi Kivity
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
3286 117712c3 Avi Kivity
        tlb_flush(env, 1);
3287 117712c3 Avi Kivity
    }
3288 50c1e149 Avi Kivity
}
3289 50c1e149 Avi Kivity
3290 93632747 Avi Kivity
static void core_region_add(MemoryListener *listener,
3291 93632747 Avi Kivity
                            MemoryRegionSection *section)
3292 93632747 Avi Kivity
{
3293 4855d41a Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3294 93632747 Avi Kivity
}
3295 93632747 Avi Kivity
3296 93632747 Avi Kivity
static void core_region_del(MemoryListener *listener,
3297 93632747 Avi Kivity
                            MemoryRegionSection *section)
3298 93632747 Avi Kivity
{
3299 93632747 Avi Kivity
}
3300 93632747 Avi Kivity
3301 50c1e149 Avi Kivity
static void core_region_nop(MemoryListener *listener,
3302 50c1e149 Avi Kivity
                            MemoryRegionSection *section)
3303 50c1e149 Avi Kivity
{
3304 54688b1e Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3305 50c1e149 Avi Kivity
}
3306 50c1e149 Avi Kivity
3307 93632747 Avi Kivity
static void core_log_start(MemoryListener *listener,
3308 93632747 Avi Kivity
                           MemoryRegionSection *section)
3309 93632747 Avi Kivity
{
3310 93632747 Avi Kivity
}
3311 93632747 Avi Kivity
3312 93632747 Avi Kivity
static void core_log_stop(MemoryListener *listener,
3313 93632747 Avi Kivity
                          MemoryRegionSection *section)
3314 93632747 Avi Kivity
{
3315 93632747 Avi Kivity
}
3316 93632747 Avi Kivity
3317 93632747 Avi Kivity
static void core_log_sync(MemoryListener *listener,
3318 93632747 Avi Kivity
                          MemoryRegionSection *section)
3319 93632747 Avi Kivity
{
3320 93632747 Avi Kivity
}
3321 93632747 Avi Kivity
3322 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
3323 93632747 Avi Kivity
{
3324 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(1);
3325 93632747 Avi Kivity
}
3326 93632747 Avi Kivity
3327 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
3328 93632747 Avi Kivity
{
3329 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(0);
3330 93632747 Avi Kivity
}
3331 93632747 Avi Kivity
3332 93632747 Avi Kivity
static void core_eventfd_add(MemoryListener *listener,
3333 93632747 Avi Kivity
                             MemoryRegionSection *section,
3334 93632747 Avi Kivity
                             bool match_data, uint64_t data, int fd)
3335 93632747 Avi Kivity
{
3336 93632747 Avi Kivity
}
3337 93632747 Avi Kivity
3338 93632747 Avi Kivity
static void core_eventfd_del(MemoryListener *listener,
3339 93632747 Avi Kivity
                             MemoryRegionSection *section,
3340 93632747 Avi Kivity
                             bool match_data, uint64_t data, int fd)
3341 93632747 Avi Kivity
{
3342 93632747 Avi Kivity
}
3343 93632747 Avi Kivity
3344 50c1e149 Avi Kivity
static void io_begin(MemoryListener *listener)
3345 50c1e149 Avi Kivity
{
3346 50c1e149 Avi Kivity
}
3347 50c1e149 Avi Kivity
3348 50c1e149 Avi Kivity
static void io_commit(MemoryListener *listener)
3349 50c1e149 Avi Kivity
{
3350 50c1e149 Avi Kivity
}
3351 50c1e149 Avi Kivity
3352 4855d41a Avi Kivity
static void io_region_add(MemoryListener *listener,
3353 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3354 4855d41a Avi Kivity
{
3355 a2d33521 Avi Kivity
    MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3356 a2d33521 Avi Kivity
3357 a2d33521 Avi Kivity
    mrio->mr = section->mr;
3358 a2d33521 Avi Kivity
    mrio->offset = section->offset_within_region;
3359 a2d33521 Avi Kivity
    iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3360 4855d41a Avi Kivity
                 section->offset_within_address_space, section->size);
3361 a2d33521 Avi Kivity
    ioport_register(&mrio->iorange);
3362 4855d41a Avi Kivity
}
3363 4855d41a Avi Kivity
3364 4855d41a Avi Kivity
static void io_region_del(MemoryListener *listener,
3365 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3366 4855d41a Avi Kivity
{
3367 4855d41a Avi Kivity
    isa_unassign_ioport(section->offset_within_address_space, section->size);
3368 4855d41a Avi Kivity
}
3369 4855d41a Avi Kivity
3370 50c1e149 Avi Kivity
static void io_region_nop(MemoryListener *listener,
3371 50c1e149 Avi Kivity
                          MemoryRegionSection *section)
3372 50c1e149 Avi Kivity
{
3373 50c1e149 Avi Kivity
}
3374 50c1e149 Avi Kivity
3375 4855d41a Avi Kivity
static void io_log_start(MemoryListener *listener,
3376 4855d41a Avi Kivity
                         MemoryRegionSection *section)
3377 4855d41a Avi Kivity
{
3378 4855d41a Avi Kivity
}
3379 4855d41a Avi Kivity
3380 4855d41a Avi Kivity
static void io_log_stop(MemoryListener *listener,
3381 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3382 4855d41a Avi Kivity
{
3383 4855d41a Avi Kivity
}
3384 4855d41a Avi Kivity
3385 4855d41a Avi Kivity
static void io_log_sync(MemoryListener *listener,
3386 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3387 4855d41a Avi Kivity
{
3388 4855d41a Avi Kivity
}
3389 4855d41a Avi Kivity
3390 4855d41a Avi Kivity
static void io_log_global_start(MemoryListener *listener)
3391 4855d41a Avi Kivity
{
3392 4855d41a Avi Kivity
}
3393 4855d41a Avi Kivity
3394 4855d41a Avi Kivity
static void io_log_global_stop(MemoryListener *listener)
3395 4855d41a Avi Kivity
{
3396 4855d41a Avi Kivity
}
3397 4855d41a Avi Kivity
3398 4855d41a Avi Kivity
static void io_eventfd_add(MemoryListener *listener,
3399 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3400 4855d41a Avi Kivity
                           bool match_data, uint64_t data, int fd)
3401 4855d41a Avi Kivity
{
3402 4855d41a Avi Kivity
}
3403 4855d41a Avi Kivity
3404 4855d41a Avi Kivity
static void io_eventfd_del(MemoryListener *listener,
3405 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3406 4855d41a Avi Kivity
                           bool match_data, uint64_t data, int fd)
3407 4855d41a Avi Kivity
{
3408 4855d41a Avi Kivity
}
3409 4855d41a Avi Kivity
3410 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
3411 50c1e149 Avi Kivity
    .begin = core_begin,
3412 50c1e149 Avi Kivity
    .commit = core_commit,
3413 93632747 Avi Kivity
    .region_add = core_region_add,
3414 93632747 Avi Kivity
    .region_del = core_region_del,
3415 50c1e149 Avi Kivity
    .region_nop = core_region_nop,
3416 93632747 Avi Kivity
    .log_start = core_log_start,
3417 93632747 Avi Kivity
    .log_stop = core_log_stop,
3418 93632747 Avi Kivity
    .log_sync = core_log_sync,
3419 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
3420 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
3421 93632747 Avi Kivity
    .eventfd_add = core_eventfd_add,
3422 93632747 Avi Kivity
    .eventfd_del = core_eventfd_del,
3423 93632747 Avi Kivity
    .priority = 0,
3424 93632747 Avi Kivity
};
3425 93632747 Avi Kivity
3426 4855d41a Avi Kivity
static MemoryListener io_memory_listener = {
3427 50c1e149 Avi Kivity
    .begin = io_begin,
3428 50c1e149 Avi Kivity
    .commit = io_commit,
3429 4855d41a Avi Kivity
    .region_add = io_region_add,
3430 4855d41a Avi Kivity
    .region_del = io_region_del,
3431 50c1e149 Avi Kivity
    .region_nop = io_region_nop,
3432 4855d41a Avi Kivity
    .log_start = io_log_start,
3433 4855d41a Avi Kivity
    .log_stop = io_log_stop,
3434 4855d41a Avi Kivity
    .log_sync = io_log_sync,
3435 4855d41a Avi Kivity
    .log_global_start = io_log_global_start,
3436 4855d41a Avi Kivity
    .log_global_stop = io_log_global_stop,
3437 4855d41a Avi Kivity
    .eventfd_add = io_eventfd_add,
3438 4855d41a Avi Kivity
    .eventfd_del = io_eventfd_del,
3439 4855d41a Avi Kivity
    .priority = 0,
3440 4855d41a Avi Kivity
};
3441 4855d41a Avi Kivity
3442 62152b8a Avi Kivity
static void memory_map_init(void)
3443 62152b8a Avi Kivity
{
3444 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
3445 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
3446 62152b8a Avi Kivity
    set_system_memory_map(system_memory);
3447 309cb471 Avi Kivity
3448 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
3449 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
3450 309cb471 Avi Kivity
    set_system_io_map(system_io);
3451 93632747 Avi Kivity
3452 4855d41a Avi Kivity
    memory_listener_register(&core_memory_listener, system_memory);
3453 4855d41a Avi Kivity
    memory_listener_register(&io_memory_listener, system_io);
3454 62152b8a Avi Kivity
}
3455 62152b8a Avi Kivity
3456 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
3457 62152b8a Avi Kivity
{
3458 62152b8a Avi Kivity
    return system_memory;
3459 62152b8a Avi Kivity
}
3460 62152b8a Avi Kivity
3461 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
3462 309cb471 Avi Kivity
{
3463 309cb471 Avi Kivity
    return system_io;
3464 309cb471 Avi Kivity
}
3465 309cb471 Avi Kivity
3466 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3467 e2eef170 pbrook
3468 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3469 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3470 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3471 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3472 13eb76e0 bellard
{
3473 13eb76e0 bellard
    int l, flags;
3474 13eb76e0 bellard
    target_ulong page;
3475 53a5960a pbrook
    void * p;
3476 13eb76e0 bellard
3477 13eb76e0 bellard
    while (len > 0) {
3478 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3479 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3480 13eb76e0 bellard
        if (l > len)
3481 13eb76e0 bellard
            l = len;
3482 13eb76e0 bellard
        flags = page_get_flags(page);
3483 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3484 a68fe89c Paul Brook
            return -1;
3485 13eb76e0 bellard
        if (is_write) {
3486 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3487 a68fe89c Paul Brook
                return -1;
3488 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3489 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3490 a68fe89c Paul Brook
                return -1;
3491 72fb7daa aurel32
            memcpy(p, buf, l);
3492 72fb7daa aurel32
            unlock_user(p, addr, l);
3493 13eb76e0 bellard
        } else {
3494 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3495 a68fe89c Paul Brook
                return -1;
3496 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3497 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3498 a68fe89c Paul Brook
                return -1;
3499 72fb7daa aurel32
            memcpy(buf, p, l);
3500 5b257578 aurel32
            unlock_user(p, addr, 0);
3501 13eb76e0 bellard
        }
3502 13eb76e0 bellard
        len -= l;
3503 13eb76e0 bellard
        buf += l;
3504 13eb76e0 bellard
        addr += l;
3505 13eb76e0 bellard
    }
3506 a68fe89c Paul Brook
    return 0;
3507 13eb76e0 bellard
}
3508 8df1cd07 bellard
3509 13eb76e0 bellard
#else
3510 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3511 13eb76e0 bellard
                            int len, int is_write)
3512 13eb76e0 bellard
{
3513 37ec01d4 Avi Kivity
    int l;
3514 13eb76e0 bellard
    uint8_t *ptr;
3515 13eb76e0 bellard
    uint32_t val;
3516 c227f099 Anthony Liguori
    target_phys_addr_t page;
3517 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3518 3b46e624 ths
3519 13eb76e0 bellard
    while (len > 0) {
3520 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3521 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3522 13eb76e0 bellard
        if (l > len)
3523 13eb76e0 bellard
            l = len;
3524 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3525 3b46e624 ths
3526 13eb76e0 bellard
        if (is_write) {
3527 f3705d53 Avi Kivity
            if (!memory_region_is_ram(section->mr)) {
3528 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3529 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
3530 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3531 6a00d601 bellard
                   potential bugs */
3532 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3533 1c213d19 bellard
                    /* 32 bit write access */
3534 c27004ec bellard
                    val = ldl_p(buf);
3535 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 4);
3536 13eb76e0 bellard
                    l = 4;
3537 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3538 1c213d19 bellard
                    /* 16 bit write access */
3539 c27004ec bellard
                    val = lduw_p(buf);
3540 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 2);
3541 13eb76e0 bellard
                    l = 2;
3542 13eb76e0 bellard
                } else {
3543 1c213d19 bellard
                    /* 8 bit write access */
3544 c27004ec bellard
                    val = ldub_p(buf);
3545 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 1);
3546 13eb76e0 bellard
                    l = 1;
3547 13eb76e0 bellard
                }
3548 f3705d53 Avi Kivity
            } else if (!section->readonly) {
3549 8ca5692d Anthony PERARD
                ram_addr_t addr1;
3550 f3705d53 Avi Kivity
                addr1 = memory_region_get_ram_addr(section->mr)
3551 cc5bea60 Blue Swirl
                    + memory_region_section_addr(section, addr);
3552 13eb76e0 bellard
                /* RAM case */
3553 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3554 13eb76e0 bellard
                memcpy(ptr, buf, l);
3555 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3556 3a7d929e bellard
                    /* invalidate code */
3557 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3558 3a7d929e bellard
                    /* set dirty bit */
3559 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3560 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3561 3a7d929e bellard
                }
3562 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3563 13eb76e0 bellard
            }
3564 13eb76e0 bellard
        } else {
3565 cc5bea60 Blue Swirl
            if (!(memory_region_is_ram(section->mr) ||
3566 cc5bea60 Blue Swirl
                  memory_region_is_romd(section->mr))) {
3567 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3568 13eb76e0 bellard
                /* I/O case */
3569 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
3570 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3571 13eb76e0 bellard
                    /* 32 bit read access */
3572 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 4);
3573 c27004ec bellard
                    stl_p(buf, val);
3574 13eb76e0 bellard
                    l = 4;
3575 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3576 13eb76e0 bellard
                    /* 16 bit read access */
3577 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 2);
3578 c27004ec bellard
                    stw_p(buf, val);
3579 13eb76e0 bellard
                    l = 2;
3580 13eb76e0 bellard
                } else {
3581 1c213d19 bellard
                    /* 8 bit read access */
3582 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 1);
3583 c27004ec bellard
                    stb_p(buf, val);
3584 13eb76e0 bellard
                    l = 1;
3585 13eb76e0 bellard
                }
3586 13eb76e0 bellard
            } else {
3587 13eb76e0 bellard
                /* RAM case */
3588 0a1b357f Anthony PERARD
                ptr = qemu_get_ram_ptr(section->mr->ram_addr
3589 cc5bea60 Blue Swirl
                                       + memory_region_section_addr(section,
3590 cc5bea60 Blue Swirl
                                                                    addr));
3591 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
3592 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3593 13eb76e0 bellard
            }
3594 13eb76e0 bellard
        }
3595 13eb76e0 bellard
        len -= l;
3596 13eb76e0 bellard
        buf += l;
3597 13eb76e0 bellard
        addr += l;
3598 13eb76e0 bellard
    }
3599 13eb76e0 bellard
}
3600 8df1cd07 bellard
3601 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3602 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3603 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3604 d0ecd2aa bellard
{
3605 d0ecd2aa bellard
    int l;
3606 d0ecd2aa bellard
    uint8_t *ptr;
3607 c227f099 Anthony Liguori
    target_phys_addr_t page;
3608 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3609 3b46e624 ths
3610 d0ecd2aa bellard
    while (len > 0) {
3611 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3612 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3613 d0ecd2aa bellard
        if (l > len)
3614 d0ecd2aa bellard
            l = len;
3615 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3616 3b46e624 ths
3617 cc5bea60 Blue Swirl
        if (!(memory_region_is_ram(section->mr) ||
3618 cc5bea60 Blue Swirl
              memory_region_is_romd(section->mr))) {
3619 d0ecd2aa bellard
            /* do nothing */
3620 d0ecd2aa bellard
        } else {
3621 d0ecd2aa bellard
            unsigned long addr1;
3622 f3705d53 Avi Kivity
            addr1 = memory_region_get_ram_addr(section->mr)
3623 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
3624 d0ecd2aa bellard
            /* ROM/RAM case */
3625 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3626 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3627 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3628 d0ecd2aa bellard
        }
3629 d0ecd2aa bellard
        len -= l;
3630 d0ecd2aa bellard
        buf += l;
3631 d0ecd2aa bellard
        addr += l;
3632 d0ecd2aa bellard
    }
3633 d0ecd2aa bellard
}
3634 d0ecd2aa bellard
3635 6d16c2f8 aliguori
typedef struct {
3636 6d16c2f8 aliguori
    void *buffer;
3637 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3638 c227f099 Anthony Liguori
    target_phys_addr_t len;
3639 6d16c2f8 aliguori
} BounceBuffer;
3640 6d16c2f8 aliguori
3641 6d16c2f8 aliguori
static BounceBuffer bounce;
3642 6d16c2f8 aliguori
3643 ba223c29 aliguori
typedef struct MapClient {
3644 ba223c29 aliguori
    void *opaque;
3645 ba223c29 aliguori
    void (*callback)(void *opaque);
3646 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3647 ba223c29 aliguori
} MapClient;
3648 ba223c29 aliguori
3649 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3650 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3651 ba223c29 aliguori
3652 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3653 ba223c29 aliguori
{
3654 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
3655 ba223c29 aliguori
3656 ba223c29 aliguori
    client->opaque = opaque;
3657 ba223c29 aliguori
    client->callback = callback;
3658 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3659 ba223c29 aliguori
    return client;
3660 ba223c29 aliguori
}
3661 ba223c29 aliguori
3662 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3663 ba223c29 aliguori
{
3664 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3665 ba223c29 aliguori
3666 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3667 7267c094 Anthony Liguori
    g_free(client);
3668 ba223c29 aliguori
}
3669 ba223c29 aliguori
3670 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3671 ba223c29 aliguori
{
3672 ba223c29 aliguori
    MapClient *client;
3673 ba223c29 aliguori
3674 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3675 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3676 ba223c29 aliguori
        client->callback(client->opaque);
3677 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3678 ba223c29 aliguori
    }
3679 ba223c29 aliguori
}
3680 ba223c29 aliguori
3681 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3682 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3683 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3684 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3685 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3686 ba223c29 aliguori
 * likely to succeed.
3687 6d16c2f8 aliguori
 */
3688 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3689 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3690 6d16c2f8 aliguori
                              int is_write)
3691 6d16c2f8 aliguori
{
3692 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3693 38bee5dc Stefano Stabellini
    target_phys_addr_t todo = 0;
3694 6d16c2f8 aliguori
    int l;
3695 c227f099 Anthony Liguori
    target_phys_addr_t page;
3696 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3697 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
3698 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
3699 8ab934f9 Stefano Stabellini
    void *ret;
3700 6d16c2f8 aliguori
3701 6d16c2f8 aliguori
    while (len > 0) {
3702 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3703 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3704 6d16c2f8 aliguori
        if (l > len)
3705 6d16c2f8 aliguori
            l = len;
3706 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3707 6d16c2f8 aliguori
3708 f3705d53 Avi Kivity
        if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3709 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
3710 6d16c2f8 aliguori
                break;
3711 6d16c2f8 aliguori
            }
3712 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3713 6d16c2f8 aliguori
            bounce.addr = addr;
3714 6d16c2f8 aliguori
            bounce.len = l;
3715 6d16c2f8 aliguori
            if (!is_write) {
3716 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
3717 6d16c2f8 aliguori
            }
3718 38bee5dc Stefano Stabellini
3719 38bee5dc Stefano Stabellini
            *plen = l;
3720 38bee5dc Stefano Stabellini
            return bounce.buffer;
3721 6d16c2f8 aliguori
        }
3722 8ab934f9 Stefano Stabellini
        if (!todo) {
3723 f3705d53 Avi Kivity
            raddr = memory_region_get_ram_addr(section->mr)
3724 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
3725 8ab934f9 Stefano Stabellini
        }
3726 6d16c2f8 aliguori
3727 6d16c2f8 aliguori
        len -= l;
3728 6d16c2f8 aliguori
        addr += l;
3729 38bee5dc Stefano Stabellini
        todo += l;
3730 6d16c2f8 aliguori
    }
3731 8ab934f9 Stefano Stabellini
    rlen = todo;
3732 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
3733 8ab934f9 Stefano Stabellini
    *plen = rlen;
3734 8ab934f9 Stefano Stabellini
    return ret;
3735 6d16c2f8 aliguori
}
3736 6d16c2f8 aliguori
3737 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3738 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3739 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3740 6d16c2f8 aliguori
 */
3741 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3742 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3743 6d16c2f8 aliguori
{
3744 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3745 6d16c2f8 aliguori
        if (is_write) {
3746 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3747 6d16c2f8 aliguori
            while (access_len) {
3748 6d16c2f8 aliguori
                unsigned l;
3749 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3750 6d16c2f8 aliguori
                if (l > access_len)
3751 6d16c2f8 aliguori
                    l = access_len;
3752 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3753 6d16c2f8 aliguori
                    /* invalidate code */
3754 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3755 6d16c2f8 aliguori
                    /* set dirty bit */
3756 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3757 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3758 6d16c2f8 aliguori
                }
3759 6d16c2f8 aliguori
                addr1 += l;
3760 6d16c2f8 aliguori
                access_len -= l;
3761 6d16c2f8 aliguori
            }
3762 6d16c2f8 aliguori
        }
3763 868bb33f Jan Kiszka
        if (xen_enabled()) {
3764 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
3765 050a0ddf Anthony PERARD
        }
3766 6d16c2f8 aliguori
        return;
3767 6d16c2f8 aliguori
    }
3768 6d16c2f8 aliguori
    if (is_write) {
3769 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3770 6d16c2f8 aliguori
    }
3771 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3772 6d16c2f8 aliguori
    bounce.buffer = NULL;
3773 ba223c29 aliguori
    cpu_notify_map_clients();
3774 6d16c2f8 aliguori
}
3775 d0ecd2aa bellard
3776 8df1cd07 bellard
/* warning: addr must be aligned */
3777 1e78bcc1 Alexander Graf
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3778 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3779 8df1cd07 bellard
{
3780 8df1cd07 bellard
    uint8_t *ptr;
3781 8df1cd07 bellard
    uint32_t val;
3782 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3783 8df1cd07 bellard
3784 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3785 3b46e624 ths
3786 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3787 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3788 8df1cd07 bellard
        /* I/O case */
3789 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3790 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
3791 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3792 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3793 1e78bcc1 Alexander Graf
            val = bswap32(val);
3794 1e78bcc1 Alexander Graf
        }
3795 1e78bcc1 Alexander Graf
#else
3796 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3797 1e78bcc1 Alexander Graf
            val = bswap32(val);
3798 1e78bcc1 Alexander Graf
        }
3799 1e78bcc1 Alexander Graf
#endif
3800 8df1cd07 bellard
    } else {
3801 8df1cd07 bellard
        /* RAM case */
3802 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3803 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3804 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3805 1e78bcc1 Alexander Graf
        switch (endian) {
3806 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3807 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
3808 1e78bcc1 Alexander Graf
            break;
3809 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3810 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
3811 1e78bcc1 Alexander Graf
            break;
3812 1e78bcc1 Alexander Graf
        default:
3813 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
3814 1e78bcc1 Alexander Graf
            break;
3815 1e78bcc1 Alexander Graf
        }
3816 8df1cd07 bellard
    }
3817 8df1cd07 bellard
    return val;
3818 8df1cd07 bellard
}
3819 8df1cd07 bellard
3820 1e78bcc1 Alexander Graf
uint32_t ldl_phys(target_phys_addr_t addr)
3821 1e78bcc1 Alexander Graf
{
3822 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3823 1e78bcc1 Alexander Graf
}
3824 1e78bcc1 Alexander Graf
3825 1e78bcc1 Alexander Graf
uint32_t ldl_le_phys(target_phys_addr_t addr)
3826 1e78bcc1 Alexander Graf
{
3827 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3828 1e78bcc1 Alexander Graf
}
3829 1e78bcc1 Alexander Graf
3830 1e78bcc1 Alexander Graf
uint32_t ldl_be_phys(target_phys_addr_t addr)
3831 1e78bcc1 Alexander Graf
{
3832 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3833 1e78bcc1 Alexander Graf
}
3834 1e78bcc1 Alexander Graf
3835 84b7b8e7 bellard
/* warning: addr must be aligned */
3836 1e78bcc1 Alexander Graf
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3837 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3838 84b7b8e7 bellard
{
3839 84b7b8e7 bellard
    uint8_t *ptr;
3840 84b7b8e7 bellard
    uint64_t val;
3841 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3842 84b7b8e7 bellard
3843 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3844 3b46e624 ths
3845 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3846 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3847 84b7b8e7 bellard
        /* I/O case */
3848 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3849 1e78bcc1 Alexander Graf
3850 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
3851 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
3852 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3853 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4) << 32;
3854 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4);
3855 84b7b8e7 bellard
#else
3856 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
3857 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4) << 32;
3858 84b7b8e7 bellard
#endif
3859 84b7b8e7 bellard
    } else {
3860 84b7b8e7 bellard
        /* RAM case */
3861 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3862 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3863 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3864 1e78bcc1 Alexander Graf
        switch (endian) {
3865 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3866 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
3867 1e78bcc1 Alexander Graf
            break;
3868 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3869 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
3870 1e78bcc1 Alexander Graf
            break;
3871 1e78bcc1 Alexander Graf
        default:
3872 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
3873 1e78bcc1 Alexander Graf
            break;
3874 1e78bcc1 Alexander Graf
        }
3875 84b7b8e7 bellard
    }
3876 84b7b8e7 bellard
    return val;
3877 84b7b8e7 bellard
}
3878 84b7b8e7 bellard
3879 1e78bcc1 Alexander Graf
uint64_t ldq_phys(target_phys_addr_t addr)
3880 1e78bcc1 Alexander Graf
{
3881 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3882 1e78bcc1 Alexander Graf
}
3883 1e78bcc1 Alexander Graf
3884 1e78bcc1 Alexander Graf
uint64_t ldq_le_phys(target_phys_addr_t addr)
3885 1e78bcc1 Alexander Graf
{
3886 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3887 1e78bcc1 Alexander Graf
}
3888 1e78bcc1 Alexander Graf
3889 1e78bcc1 Alexander Graf
uint64_t ldq_be_phys(target_phys_addr_t addr)
3890 1e78bcc1 Alexander Graf
{
3891 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3892 1e78bcc1 Alexander Graf
}
3893 1e78bcc1 Alexander Graf
3894 aab33094 bellard
/* XXX: optimize */
3895 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3896 aab33094 bellard
{
3897 aab33094 bellard
    uint8_t val;
3898 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3899 aab33094 bellard
    return val;
3900 aab33094 bellard
}
3901 aab33094 bellard
3902 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3903 1e78bcc1 Alexander Graf
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3904 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
3905 aab33094 bellard
{
3906 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3907 733f0b02 Michael S. Tsirkin
    uint64_t val;
3908 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3909 733f0b02 Michael S. Tsirkin
3910 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3911 733f0b02 Michael S. Tsirkin
3912 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
3913 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
3914 733f0b02 Michael S. Tsirkin
        /* I/O case */
3915 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3916 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 2);
3917 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3918 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3919 1e78bcc1 Alexander Graf
            val = bswap16(val);
3920 1e78bcc1 Alexander Graf
        }
3921 1e78bcc1 Alexander Graf
#else
3922 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3923 1e78bcc1 Alexander Graf
            val = bswap16(val);
3924 1e78bcc1 Alexander Graf
        }
3925 1e78bcc1 Alexander Graf
#endif
3926 733f0b02 Michael S. Tsirkin
    } else {
3927 733f0b02 Michael S. Tsirkin
        /* RAM case */
3928 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3929 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
3930 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
3931 1e78bcc1 Alexander Graf
        switch (endian) {
3932 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3933 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
3934 1e78bcc1 Alexander Graf
            break;
3935 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3936 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
3937 1e78bcc1 Alexander Graf
            break;
3938 1e78bcc1 Alexander Graf
        default:
3939 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
3940 1e78bcc1 Alexander Graf
            break;
3941 1e78bcc1 Alexander Graf
        }
3942 733f0b02 Michael S. Tsirkin
    }
3943 733f0b02 Michael S. Tsirkin
    return val;
3944 aab33094 bellard
}
3945 aab33094 bellard
3946 1e78bcc1 Alexander Graf
uint32_t lduw_phys(target_phys_addr_t addr)
3947 1e78bcc1 Alexander Graf
{
3948 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3949 1e78bcc1 Alexander Graf
}
3950 1e78bcc1 Alexander Graf
3951 1e78bcc1 Alexander Graf
uint32_t lduw_le_phys(target_phys_addr_t addr)
3952 1e78bcc1 Alexander Graf
{
3953 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3954 1e78bcc1 Alexander Graf
}
3955 1e78bcc1 Alexander Graf
3956 1e78bcc1 Alexander Graf
uint32_t lduw_be_phys(target_phys_addr_t addr)
3957 1e78bcc1 Alexander Graf
{
3958 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3959 1e78bcc1 Alexander Graf
}
3960 1e78bcc1 Alexander Graf
3961 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3962 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3963 8df1cd07 bellard
   bits are used to track modified PTEs */
3964 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3965 8df1cd07 bellard
{
3966 8df1cd07 bellard
    uint8_t *ptr;
3967 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3968 8df1cd07 bellard
3969 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
3970 3b46e624 ths
3971 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
3972 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
3973 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
3974 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
3975 06ef3525 Avi Kivity
        }
3976 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
3977 8df1cd07 bellard
    } else {
3978 f3705d53 Avi Kivity
        unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
3979 06ef3525 Avi Kivity
                               & TARGET_PAGE_MASK)
3980 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
3981 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3982 8df1cd07 bellard
        stl_p(ptr, val);
3983 74576198 aliguori
3984 74576198 aliguori
        if (unlikely(in_migration)) {
3985 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3986 74576198 aliguori
                /* invalidate code */
3987 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3988 74576198 aliguori
                /* set dirty bit */
3989 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3990 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3991 74576198 aliguori
            }
3992 74576198 aliguori
        }
3993 8df1cd07 bellard
    }
3994 8df1cd07 bellard
}
3995 8df1cd07 bellard
3996 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3997 bc98a7ef j_mayer
{
3998 bc98a7ef j_mayer
    uint8_t *ptr;
3999 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4000 bc98a7ef j_mayer
4001 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4002 3b46e624 ths
4003 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4004 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
4005 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4006 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4007 06ef3525 Avi Kivity
        }
4008 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4009 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val >> 32, 4);
4010 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
4011 bc98a7ef j_mayer
#else
4012 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, (uint32_t)val, 4);
4013 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, val >> 32, 4);
4014 bc98a7ef j_mayer
#endif
4015 bc98a7ef j_mayer
    } else {
4016 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4017 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
4018 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
4019 bc98a7ef j_mayer
        stq_p(ptr, val);
4020 bc98a7ef j_mayer
    }
4021 bc98a7ef j_mayer
}
4022 bc98a7ef j_mayer
4023 8df1cd07 bellard
/* warning: addr must be aligned */
4024 1e78bcc1 Alexander Graf
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4025 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4026 8df1cd07 bellard
{
4027 8df1cd07 bellard
    uint8_t *ptr;
4028 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4029 8df1cd07 bellard
4030 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4031 3b46e624 ths
4032 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4033 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
4034 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4035 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4036 06ef3525 Avi Kivity
        }
4037 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4038 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4039 1e78bcc1 Alexander Graf
            val = bswap32(val);
4040 1e78bcc1 Alexander Graf
        }
4041 1e78bcc1 Alexander Graf
#else
4042 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4043 1e78bcc1 Alexander Graf
            val = bswap32(val);
4044 1e78bcc1 Alexander Graf
        }
4045 1e78bcc1 Alexander Graf
#endif
4046 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
4047 8df1cd07 bellard
    } else {
4048 8df1cd07 bellard
        unsigned long addr1;
4049 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4050 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
4051 8df1cd07 bellard
        /* RAM case */
4052 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4053 1e78bcc1 Alexander Graf
        switch (endian) {
4054 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4055 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
4056 1e78bcc1 Alexander Graf
            break;
4057 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4058 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
4059 1e78bcc1 Alexander Graf
            break;
4060 1e78bcc1 Alexander Graf
        default:
4061 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
4062 1e78bcc1 Alexander Graf
            break;
4063 1e78bcc1 Alexander Graf
        }
4064 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4065 3a7d929e bellard
            /* invalidate code */
4066 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4067 3a7d929e bellard
            /* set dirty bit */
4068 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4069 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4070 3a7d929e bellard
        }
4071 8df1cd07 bellard
    }
4072 8df1cd07 bellard
}
4073 8df1cd07 bellard
4074 1e78bcc1 Alexander Graf
void stl_phys(target_phys_addr_t addr, uint32_t val)
4075 1e78bcc1 Alexander Graf
{
4076 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4077 1e78bcc1 Alexander Graf
}
4078 1e78bcc1 Alexander Graf
4079 1e78bcc1 Alexander Graf
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4080 1e78bcc1 Alexander Graf
{
4081 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4082 1e78bcc1 Alexander Graf
}
4083 1e78bcc1 Alexander Graf
4084 1e78bcc1 Alexander Graf
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4085 1e78bcc1 Alexander Graf
{
4086 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4087 1e78bcc1 Alexander Graf
}
4088 1e78bcc1 Alexander Graf
4089 aab33094 bellard
/* XXX: optimize */
4090 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4091 aab33094 bellard
{
4092 aab33094 bellard
    uint8_t v = val;
4093 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4094 aab33094 bellard
}
4095 aab33094 bellard
4096 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4097 1e78bcc1 Alexander Graf
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4098 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4099 aab33094 bellard
{
4100 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4101 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4102 733f0b02 Michael S. Tsirkin
4103 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4104 733f0b02 Michael S. Tsirkin
4105 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4106 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
4107 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4108 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4109 06ef3525 Avi Kivity
        }
4110 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4111 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4112 1e78bcc1 Alexander Graf
            val = bswap16(val);
4113 1e78bcc1 Alexander Graf
        }
4114 1e78bcc1 Alexander Graf
#else
4115 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4116 1e78bcc1 Alexander Graf
            val = bswap16(val);
4117 1e78bcc1 Alexander Graf
        }
4118 1e78bcc1 Alexander Graf
#endif
4119 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 2);
4120 733f0b02 Michael S. Tsirkin
    } else {
4121 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4122 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4123 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
4124 733f0b02 Michael S. Tsirkin
        /* RAM case */
4125 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4126 1e78bcc1 Alexander Graf
        switch (endian) {
4127 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4128 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
4129 1e78bcc1 Alexander Graf
            break;
4130 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4131 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
4132 1e78bcc1 Alexander Graf
            break;
4133 1e78bcc1 Alexander Graf
        default:
4134 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
4135 1e78bcc1 Alexander Graf
            break;
4136 1e78bcc1 Alexander Graf
        }
4137 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4138 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4139 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4140 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4141 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4142 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4143 733f0b02 Michael S. Tsirkin
        }
4144 733f0b02 Michael S. Tsirkin
    }
4145 aab33094 bellard
}
4146 aab33094 bellard
4147 1e78bcc1 Alexander Graf
void stw_phys(target_phys_addr_t addr, uint32_t val)
4148 1e78bcc1 Alexander Graf
{
4149 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4150 1e78bcc1 Alexander Graf
}
4151 1e78bcc1 Alexander Graf
4152 1e78bcc1 Alexander Graf
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4153 1e78bcc1 Alexander Graf
{
4154 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4155 1e78bcc1 Alexander Graf
}
4156 1e78bcc1 Alexander Graf
4157 1e78bcc1 Alexander Graf
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4158 1e78bcc1 Alexander Graf
{
4159 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4160 1e78bcc1 Alexander Graf
}
4161 1e78bcc1 Alexander Graf
4162 aab33094 bellard
/* XXX: optimize */
4163 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4164 aab33094 bellard
{
4165 aab33094 bellard
    val = tswap64(val);
4166 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4167 aab33094 bellard
}
4168 aab33094 bellard
4169 1e78bcc1 Alexander Graf
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4170 1e78bcc1 Alexander Graf
{
4171 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
4172 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4173 1e78bcc1 Alexander Graf
}
4174 1e78bcc1 Alexander Graf
4175 1e78bcc1 Alexander Graf
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4176 1e78bcc1 Alexander Graf
{
4177 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
4178 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4179 1e78bcc1 Alexander Graf
}
4180 1e78bcc1 Alexander Graf
4181 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4182 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4183 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4184 13eb76e0 bellard
{
4185 13eb76e0 bellard
    int l;
4186 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4187 9b3c35e0 j_mayer
    target_ulong page;
4188 13eb76e0 bellard
4189 13eb76e0 bellard
    while (len > 0) {
4190 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4191 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4192 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4193 13eb76e0 bellard
        if (phys_addr == -1)
4194 13eb76e0 bellard
            return -1;
4195 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4196 13eb76e0 bellard
        if (l > len)
4197 13eb76e0 bellard
            l = len;
4198 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4199 5e2972fd aliguori
        if (is_write)
4200 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4201 5e2972fd aliguori
        else
4202 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4203 13eb76e0 bellard
        len -= l;
4204 13eb76e0 bellard
        buf += l;
4205 13eb76e0 bellard
        addr += l;
4206 13eb76e0 bellard
    }
4207 13eb76e0 bellard
    return 0;
4208 13eb76e0 bellard
}
4209 a68fe89c Paul Brook
#endif
4210 13eb76e0 bellard
4211 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4212 2e70f6ef pbrook
   must be at the end of the TB */
4213 20503968 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4214 2e70f6ef pbrook
{
4215 2e70f6ef pbrook
    TranslationBlock *tb;
4216 2e70f6ef pbrook
    uint32_t n, cflags;
4217 2e70f6ef pbrook
    target_ulong pc, cs_base;
4218 2e70f6ef pbrook
    uint64_t flags;
4219 2e70f6ef pbrook
4220 20503968 Blue Swirl
    tb = tb_find_pc(retaddr);
4221 2e70f6ef pbrook
    if (!tb) {
4222 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4223 20503968 Blue Swirl
                  (void *)retaddr);
4224 2e70f6ef pbrook
    }
4225 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4226 20503968 Blue Swirl
    cpu_restore_state(tb, env, retaddr);
4227 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4228 bf20dc07 ths
       occurred.  */
4229 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4230 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4231 2e70f6ef pbrook
    n++;
4232 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4233 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4234 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4235 2e70f6ef pbrook
       branch.  */
4236 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4237 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4238 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4239 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4240 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4241 2e70f6ef pbrook
    }
4242 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4243 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4244 2e70f6ef pbrook
            && n > 1) {
4245 2e70f6ef pbrook
        env->pc -= 2;
4246 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4247 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4248 2e70f6ef pbrook
    }
4249 2e70f6ef pbrook
#endif
4250 2e70f6ef pbrook
    /* This should never happen.  */
4251 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4252 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4253 2e70f6ef pbrook
4254 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4255 2e70f6ef pbrook
    pc = tb->pc;
4256 2e70f6ef pbrook
    cs_base = tb->cs_base;
4257 2e70f6ef pbrook
    flags = tb->flags;
4258 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4259 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4260 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4261 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4262 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4263 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4264 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4265 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4266 2e70f6ef pbrook
       second new TB.  */
4267 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4268 2e70f6ef pbrook
}
4269 2e70f6ef pbrook
4270 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4271 b3755a91 Paul Brook
4272 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4273 e3db7226 bellard
{
4274 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4275 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4276 e3db7226 bellard
    TranslationBlock *tb;
4277 3b46e624 ths
4278 e3db7226 bellard
    target_code_size = 0;
4279 e3db7226 bellard
    max_target_code_size = 0;
4280 e3db7226 bellard
    cross_page = 0;
4281 e3db7226 bellard
    direct_jmp_count = 0;
4282 e3db7226 bellard
    direct_jmp2_count = 0;
4283 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4284 e3db7226 bellard
        tb = &tbs[i];
4285 e3db7226 bellard
        target_code_size += tb->size;
4286 e3db7226 bellard
        if (tb->size > max_target_code_size)
4287 e3db7226 bellard
            max_target_code_size = tb->size;
4288 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4289 e3db7226 bellard
            cross_page++;
4290 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4291 e3db7226 bellard
            direct_jmp_count++;
4292 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4293 e3db7226 bellard
                direct_jmp2_count++;
4294 e3db7226 bellard
            }
4295 e3db7226 bellard
        }
4296 e3db7226 bellard
    }
4297 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4298 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4299 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4300 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4301 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4302 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4303 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4304 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4305 e3db7226 bellard
                max_target_code_size);
4306 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4307 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4308 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4309 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4310 5fafdf24 ths
            cross_page,
4311 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4312 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4313 5fafdf24 ths
                direct_jmp_count,
4314 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4315 e3db7226 bellard
                direct_jmp2_count,
4316 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4317 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4318 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4319 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4320 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4321 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4322 e3db7226 bellard
}
4323 e3db7226 bellard
4324 82afa586 Benjamin Herrenschmidt
/*
4325 82afa586 Benjamin Herrenschmidt
 * A helper function for the _utterly broken_ virtio device model to find out if
4326 82afa586 Benjamin Herrenschmidt
 * it's running on a big endian machine. Don't do this at home kids!
4327 82afa586 Benjamin Herrenschmidt
 */
4328 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void);
4329 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void)
4330 82afa586 Benjamin Herrenschmidt
{
4331 82afa586 Benjamin Herrenschmidt
#if defined(TARGET_WORDS_BIGENDIAN)
4332 82afa586 Benjamin Herrenschmidt
    return true;
4333 82afa586 Benjamin Herrenschmidt
#else
4334 82afa586 Benjamin Herrenschmidt
    return false;
4335 82afa586 Benjamin Herrenschmidt
#endif
4336 82afa586 Benjamin Herrenschmidt
}
4337 82afa586 Benjamin Herrenschmidt
4338 61382a50 bellard
#endif
4339 76f35538 Wen Congyang
4340 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
4341 76f35538 Wen Congyang
bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4342 76f35538 Wen Congyang
{
4343 76f35538 Wen Congyang
    MemoryRegionSection *section;
4344 76f35538 Wen Congyang
4345 76f35538 Wen Congyang
    section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4346 76f35538 Wen Congyang
4347 76f35538 Wen Congyang
    return !(memory_region_is_ram(section->mr) ||
4348 76f35538 Wen Congyang
             memory_region_is_romd(section->mr));
4349 76f35538 Wen Congyang
}
4350 76f35538 Wen Congyang
#endif