Statistics
| Branch: | Revision:

root / exec.c @ 80465e80

History | View | Annotate | Download (134.4 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 74576198 aliguori
#include "osdep.h"
33 7ba1e619 aliguori
#include "kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 62152b8a Avi Kivity
#include "memory.h"
37 62152b8a Avi Kivity
#include "exec-memory.h"
38 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
39 53a5960a pbrook
#include <qemu.h>
40 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 f01576f1 Juergen Lock
#include <sys/param.h>
42 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
43 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
44 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 f01576f1 Juergen Lock
#include <sys/time.h>
46 f01576f1 Juergen Lock
#include <sys/proc.h>
47 f01576f1 Juergen Lock
#include <machine/profile.h>
48 f01576f1 Juergen Lock
#define _KERNEL
49 f01576f1 Juergen Lock
#include <sys/user.h>
50 f01576f1 Juergen Lock
#undef _KERNEL
51 f01576f1 Juergen Lock
#undef sigqueue
52 f01576f1 Juergen Lock
#include <libutil.h>
53 f01576f1 Juergen Lock
#endif
54 f01576f1 Juergen Lock
#endif
55 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
56 432d268c Jun Nakajima
#include "xen-mapcache.h"
57 6506e4f9 Stefano Stabellini
#include "trace.h"
58 53a5960a pbrook
#endif
59 54936004 bellard
60 67d95c15 Avi Kivity
#define WANT_EXEC_OBSOLETE
61 67d95c15 Avi Kivity
#include "exec-obsolete.h"
62 67d95c15 Avi Kivity
63 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
64 66e85a21 bellard
//#define DEBUG_FLUSH
65 9fa3e853 bellard
//#define DEBUG_TLB
66 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
67 fd6ce8f6 bellard
68 fd6ce8f6 bellard
/* make various TB consistency checks */
69 5fafdf24 ths
//#define DEBUG_TB_CHECK
70 5fafdf24 ths
//#define DEBUG_TLB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 6840981d Stefan Weil
#elif defined(_WIN32) && !defined(_WIN64)
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 74576198 aliguori
static int in_migration;
114 94a6b54f pbrook
115 85d59fef Paolo Bonzini
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
116 62152b8a Avi Kivity
117 62152b8a Avi Kivity
static MemoryRegion *system_memory;
118 309cb471 Avi Kivity
static MemoryRegion *system_io;
119 62152b8a Avi Kivity
120 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
122 0e0df1e2 Avi Kivity
123 e2eef170 pbrook
#endif
124 9fa3e853 bellard
125 9349b4f9 Andreas Färber
CPUArchState *first_cpu;
126 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
127 6a00d601 bellard
   cpu_exec() */
128 9349b4f9 Andreas Färber
DEFINE_TLS(CPUArchState *,cpu_single_env);
129 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
130 bf20dc07 ths
   1 = Precise instruction counting.
131 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
132 2e70f6ef pbrook
int use_icount = 0;
133 6a00d601 bellard
134 54936004 bellard
typedef struct PageDesc {
135 92e873b9 bellard
    /* list of TBs intersecting this ram page */
136 fd6ce8f6 bellard
    TranslationBlock *first_tb;
137 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
138 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
139 9fa3e853 bellard
    unsigned int code_write_count;
140 9fa3e853 bellard
    uint8_t *code_bitmap;
141 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
142 9fa3e853 bellard
    unsigned long flags;
143 9fa3e853 bellard
#endif
144 54936004 bellard
} PageDesc;
145 54936004 bellard
146 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
147 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
148 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
149 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
151 41c1b1c9 Paul Brook
#else
152 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
153 41c1b1c9 Paul Brook
#endif
154 bedb69ea j_mayer
#else
155 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156 bedb69ea j_mayer
#endif
157 54936004 bellard
158 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
159 5cd2c5b6 Richard Henderson
#define L2_BITS 10
160 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
161 54936004 bellard
162 3eef53df Avi Kivity
#define P_L2_LEVELS \
163 3eef53df Avi Kivity
    (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164 3eef53df Avi Kivity
165 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
167 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 5cd2c5b6 Richard Henderson
169 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
171 5cd2c5b6 Richard Henderson
#else
172 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
173 5cd2c5b6 Richard Henderson
#endif
174 5cd2c5b6 Richard Henderson
175 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 c6d50674 Stefan Weil
uintptr_t qemu_real_host_page_size;
180 c6d50674 Stefan Weil
uintptr_t qemu_host_page_size;
181 c6d50674 Stefan Weil
uintptr_t qemu_host_page_mask;
182 54936004 bellard
183 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
184 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
185 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
186 54936004 bellard
187 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
188 4346ae3e Avi Kivity
typedef struct PhysPageEntry PhysPageEntry;
189 4346ae3e Avi Kivity
190 5312bd8b Avi Kivity
static MemoryRegionSection *phys_sections;
191 5312bd8b Avi Kivity
static unsigned phys_sections_nb, phys_sections_nb_alloc;
192 5312bd8b Avi Kivity
static uint16_t phys_section_unassigned;
193 aa102231 Avi Kivity
static uint16_t phys_section_notdirty;
194 aa102231 Avi Kivity
static uint16_t phys_section_rom;
195 aa102231 Avi Kivity
static uint16_t phys_section_watch;
196 5312bd8b Avi Kivity
197 4346ae3e Avi Kivity
struct PhysPageEntry {
198 07f07b31 Avi Kivity
    uint16_t is_leaf : 1;
199 07f07b31 Avi Kivity
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 07f07b31 Avi Kivity
    uint16_t ptr : 15;
201 4346ae3e Avi Kivity
};
202 4346ae3e Avi Kivity
203 d6f2ea22 Avi Kivity
/* Simple allocator for PhysPageEntry nodes */
204 d6f2ea22 Avi Kivity
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205 d6f2ea22 Avi Kivity
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206 d6f2ea22 Avi Kivity
207 07f07b31 Avi Kivity
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208 d6f2ea22 Avi Kivity
209 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
210 06ef3525 Avi Kivity
   The bottom level has pointers to MemoryRegionSections.  */
211 07f07b31 Avi Kivity
static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
212 6d9a1304 Paul Brook
213 e2eef170 pbrook
static void io_mem_init(void);
214 62152b8a Avi Kivity
static void memory_map_init(void);
215 e2eef170 pbrook
216 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
217 6658ffb8 pbrook
#endif
218 33417e70 bellard
219 34865134 bellard
/* log support */
220 1e8b27ca Juha Riihimäki
#ifdef WIN32
221 1e8b27ca Juha Riihimäki
static const char *logfilename = "qemu.log";
222 1e8b27ca Juha Riihimäki
#else
223 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
224 1e8b27ca Juha Riihimäki
#endif
225 34865134 bellard
FILE *logfile;
226 34865134 bellard
int loglevel;
227 e735b91c pbrook
static int log_append = 0;
228 34865134 bellard
229 e3db7226 bellard
/* statistics */
230 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
231 e3db7226 bellard
static int tlb_flush_count;
232 b3755a91 Paul Brook
#endif
233 e3db7226 bellard
static int tb_flush_count;
234 e3db7226 bellard
static int tb_phys_invalidate_count;
235 e3db7226 bellard
236 7cb69cae bellard
#ifdef _WIN32
237 7cb69cae bellard
static void map_exec(void *addr, long size)
238 7cb69cae bellard
{
239 7cb69cae bellard
    DWORD old_protect;
240 7cb69cae bellard
    VirtualProtect(addr, size,
241 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
242 7cb69cae bellard
    
243 7cb69cae bellard
}
244 7cb69cae bellard
#else
245 7cb69cae bellard
static void map_exec(void *addr, long size)
246 7cb69cae bellard
{
247 4369415f bellard
    unsigned long start, end, page_size;
248 7cb69cae bellard
    
249 4369415f bellard
    page_size = getpagesize();
250 7cb69cae bellard
    start = (unsigned long)addr;
251 4369415f bellard
    start &= ~(page_size - 1);
252 7cb69cae bellard
    
253 7cb69cae bellard
    end = (unsigned long)addr + size;
254 4369415f bellard
    end += page_size - 1;
255 4369415f bellard
    end &= ~(page_size - 1);
256 7cb69cae bellard
    
257 7cb69cae bellard
    mprotect((void *)start, end - start,
258 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
259 7cb69cae bellard
}
260 7cb69cae bellard
#endif
261 7cb69cae bellard
262 b346ff46 bellard
static void page_init(void)
263 54936004 bellard
{
264 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
265 54936004 bellard
       TARGET_PAGE_SIZE */
266 c2b48b69 aliguori
#ifdef _WIN32
267 c2b48b69 aliguori
    {
268 c2b48b69 aliguori
        SYSTEM_INFO system_info;
269 c2b48b69 aliguori
270 c2b48b69 aliguori
        GetSystemInfo(&system_info);
271 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
272 c2b48b69 aliguori
    }
273 c2b48b69 aliguori
#else
274 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
275 c2b48b69 aliguori
#endif
276 83fb7adf bellard
    if (qemu_host_page_size == 0)
277 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
278 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
280 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
281 50a9569b balrog
282 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
283 50a9569b balrog
    {
284 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
285 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
286 f01576f1 Juergen Lock
        int i, cnt;
287 f01576f1 Juergen Lock
288 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
289 f01576f1 Juergen Lock
        if (freep) {
290 f01576f1 Juergen Lock
            mmap_lock();
291 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
292 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
293 f01576f1 Juergen Lock
294 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
295 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
296 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
297 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298 f01576f1 Juergen Lock
299 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
300 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
301 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
302 f01576f1 Juergen Lock
                    } else {
303 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 f01576f1 Juergen Lock
                        endaddr = ~0ul;
305 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
306 f01576f1 Juergen Lock
#endif
307 f01576f1 Juergen Lock
                    }
308 f01576f1 Juergen Lock
                }
309 f01576f1 Juergen Lock
            }
310 f01576f1 Juergen Lock
            free(freep);
311 f01576f1 Juergen Lock
            mmap_unlock();
312 f01576f1 Juergen Lock
        }
313 f01576f1 Juergen Lock
#else
314 50a9569b balrog
        FILE *f;
315 50a9569b balrog
316 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
317 5cd2c5b6 Richard Henderson
318 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
319 50a9569b balrog
        if (f) {
320 5cd2c5b6 Richard Henderson
            mmap_lock();
321 5cd2c5b6 Richard Henderson
322 50a9569b balrog
            do {
323 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
324 5cd2c5b6 Richard Henderson
                int n;
325 5cd2c5b6 Richard Henderson
326 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327 5cd2c5b6 Richard Henderson
328 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
329 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330 5cd2c5b6 Richard Henderson
331 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
332 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
333 5cd2c5b6 Richard Henderson
                    } else {
334 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
335 5cd2c5b6 Richard Henderson
                    }
336 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
337 50a9569b balrog
                }
338 50a9569b balrog
            } while (!feof(f));
339 5cd2c5b6 Richard Henderson
340 50a9569b balrog
            fclose(f);
341 5cd2c5b6 Richard Henderson
            mmap_unlock();
342 50a9569b balrog
        }
343 f01576f1 Juergen Lock
#endif
344 50a9569b balrog
    }
345 50a9569b balrog
#endif
346 54936004 bellard
}
347 54936004 bellard
348 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
349 54936004 bellard
{
350 41c1b1c9 Paul Brook
    PageDesc *pd;
351 41c1b1c9 Paul Brook
    void **lp;
352 41c1b1c9 Paul Brook
    int i;
353 41c1b1c9 Paul Brook
354 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
355 7267c094 Anthony Liguori
    /* We can't use g_malloc because it may recurse into a locked mutex. */
356 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
357 5cd2c5b6 Richard Henderson
    do {                                                \
358 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
359 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
360 5cd2c5b6 Richard Henderson
    } while (0)
361 5cd2c5b6 Richard Henderson
#else
362 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
363 7267c094 Anthony Liguori
    do { P = g_malloc0(SIZE); } while (0)
364 17e2377a pbrook
#endif
365 434929bf aliguori
366 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
367 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368 5cd2c5b6 Richard Henderson
369 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
370 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 5cd2c5b6 Richard Henderson
        void **p = *lp;
372 5cd2c5b6 Richard Henderson
373 5cd2c5b6 Richard Henderson
        if (p == NULL) {
374 5cd2c5b6 Richard Henderson
            if (!alloc) {
375 5cd2c5b6 Richard Henderson
                return NULL;
376 5cd2c5b6 Richard Henderson
            }
377 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
378 5cd2c5b6 Richard Henderson
            *lp = p;
379 17e2377a pbrook
        }
380 5cd2c5b6 Richard Henderson
381 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 5cd2c5b6 Richard Henderson
    }
383 5cd2c5b6 Richard Henderson
384 5cd2c5b6 Richard Henderson
    pd = *lp;
385 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
386 5cd2c5b6 Richard Henderson
        if (!alloc) {
387 5cd2c5b6 Richard Henderson
            return NULL;
388 5cd2c5b6 Richard Henderson
        }
389 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 5cd2c5b6 Richard Henderson
        *lp = pd;
391 54936004 bellard
    }
392 5cd2c5b6 Richard Henderson
393 5cd2c5b6 Richard Henderson
#undef ALLOC
394 5cd2c5b6 Richard Henderson
395 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
396 54936004 bellard
}
397 54936004 bellard
398 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
399 54936004 bellard
{
400 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
401 fd6ce8f6 bellard
}
402 fd6ce8f6 bellard
403 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
404 d6f2ea22 Avi Kivity
405 f7bf5461 Avi Kivity
static void phys_map_node_reserve(unsigned nodes)
406 d6f2ea22 Avi Kivity
{
407 f7bf5461 Avi Kivity
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
408 d6f2ea22 Avi Kivity
        typedef PhysPageEntry Node[L2_SIZE];
409 d6f2ea22 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
410 f7bf5461 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
411 f7bf5461 Avi Kivity
                                      phys_map_nodes_nb + nodes);
412 d6f2ea22 Avi Kivity
        phys_map_nodes = g_renew(Node, phys_map_nodes,
413 d6f2ea22 Avi Kivity
                                 phys_map_nodes_nb_alloc);
414 d6f2ea22 Avi Kivity
    }
415 f7bf5461 Avi Kivity
}
416 f7bf5461 Avi Kivity
417 f7bf5461 Avi Kivity
static uint16_t phys_map_node_alloc(void)
418 f7bf5461 Avi Kivity
{
419 f7bf5461 Avi Kivity
    unsigned i;
420 f7bf5461 Avi Kivity
    uint16_t ret;
421 f7bf5461 Avi Kivity
422 f7bf5461 Avi Kivity
    ret = phys_map_nodes_nb++;
423 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
424 f7bf5461 Avi Kivity
    assert(ret != phys_map_nodes_nb_alloc);
425 d6f2ea22 Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
426 07f07b31 Avi Kivity
        phys_map_nodes[ret][i].is_leaf = 0;
427 c19e8800 Avi Kivity
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
428 d6f2ea22 Avi Kivity
    }
429 f7bf5461 Avi Kivity
    return ret;
430 d6f2ea22 Avi Kivity
}
431 d6f2ea22 Avi Kivity
432 d6f2ea22 Avi Kivity
static void phys_map_nodes_reset(void)
433 d6f2ea22 Avi Kivity
{
434 d6f2ea22 Avi Kivity
    phys_map_nodes_nb = 0;
435 d6f2ea22 Avi Kivity
}
436 d6f2ea22 Avi Kivity
437 92e873b9 bellard
438 2999097b Avi Kivity
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
439 2999097b Avi Kivity
                                target_phys_addr_t *nb, uint16_t leaf,
440 2999097b Avi Kivity
                                int level)
441 f7bf5461 Avi Kivity
{
442 f7bf5461 Avi Kivity
    PhysPageEntry *p;
443 f7bf5461 Avi Kivity
    int i;
444 07f07b31 Avi Kivity
    target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
445 108c49b8 bellard
446 07f07b31 Avi Kivity
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
447 c19e8800 Avi Kivity
        lp->ptr = phys_map_node_alloc();
448 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
449 f7bf5461 Avi Kivity
        if (level == 0) {
450 f7bf5461 Avi Kivity
            for (i = 0; i < L2_SIZE; i++) {
451 07f07b31 Avi Kivity
                p[i].is_leaf = 1;
452 c19e8800 Avi Kivity
                p[i].ptr = phys_section_unassigned;
453 4346ae3e Avi Kivity
            }
454 67c4d23c pbrook
        }
455 f7bf5461 Avi Kivity
    } else {
456 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
457 92e873b9 bellard
    }
458 2999097b Avi Kivity
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
459 f7bf5461 Avi Kivity
460 2999097b Avi Kivity
    while (*nb && lp < &p[L2_SIZE]) {
461 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
462 07f07b31 Avi Kivity
            lp->is_leaf = true;
463 c19e8800 Avi Kivity
            lp->ptr = leaf;
464 07f07b31 Avi Kivity
            *index += step;
465 07f07b31 Avi Kivity
            *nb -= step;
466 2999097b Avi Kivity
        } else {
467 2999097b Avi Kivity
            phys_page_set_level(lp, index, nb, leaf, level - 1);
468 2999097b Avi Kivity
        }
469 2999097b Avi Kivity
        ++lp;
470 f7bf5461 Avi Kivity
    }
471 f7bf5461 Avi Kivity
}
472 f7bf5461 Avi Kivity
473 2999097b Avi Kivity
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
474 2999097b Avi Kivity
                          uint16_t leaf)
475 f7bf5461 Avi Kivity
{
476 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
477 07f07b31 Avi Kivity
    phys_map_node_reserve(3 * P_L2_LEVELS);
478 5cd2c5b6 Richard Henderson
479 2999097b Avi Kivity
    phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
480 92e873b9 bellard
}
481 92e873b9 bellard
482 f3705d53 Avi Kivity
static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
483 92e873b9 bellard
{
484 31ab2b4a Avi Kivity
    PhysPageEntry lp = phys_map;
485 31ab2b4a Avi Kivity
    PhysPageEntry *p;
486 31ab2b4a Avi Kivity
    int i;
487 31ab2b4a Avi Kivity
    uint16_t s_index = phys_section_unassigned;
488 f1f6e3b8 Avi Kivity
489 07f07b31 Avi Kivity
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
490 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
491 31ab2b4a Avi Kivity
            goto not_found;
492 31ab2b4a Avi Kivity
        }
493 c19e8800 Avi Kivity
        p = phys_map_nodes[lp.ptr];
494 31ab2b4a Avi Kivity
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
495 5312bd8b Avi Kivity
    }
496 31ab2b4a Avi Kivity
497 c19e8800 Avi Kivity
    s_index = lp.ptr;
498 31ab2b4a Avi Kivity
not_found:
499 f3705d53 Avi Kivity
    return &phys_sections[s_index];
500 f3705d53 Avi Kivity
}
501 f3705d53 Avi Kivity
502 f3705d53 Avi Kivity
static target_phys_addr_t section_addr(MemoryRegionSection *section,
503 f3705d53 Avi Kivity
                                       target_phys_addr_t addr)
504 f3705d53 Avi Kivity
{
505 f3705d53 Avi Kivity
    addr -= section->offset_within_address_space;
506 f3705d53 Avi Kivity
    addr += section->offset_within_region;
507 f3705d53 Avi Kivity
    return addr;
508 92e873b9 bellard
}
509 92e873b9 bellard
510 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
511 9349b4f9 Andreas Färber
static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
512 3a7d929e bellard
                                    target_ulong vaddr);
513 c8a706fe pbrook
#define mmap_lock() do { } while(0)
514 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
515 9fa3e853 bellard
#endif
516 fd6ce8f6 bellard
517 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
518 4369415f bellard
519 4369415f bellard
#if defined(CONFIG_USER_ONLY)
520 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
521 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
522 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
523 4369415f bellard
#endif
524 4369415f bellard
525 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
526 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
528 4369415f bellard
#endif
529 4369415f bellard
530 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
531 26a5f13b bellard
{
532 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
533 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
534 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
536 4369415f bellard
#else
537 26a5f13b bellard
    code_gen_buffer_size = tb_size;
538 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
539 4369415f bellard
#if defined(CONFIG_USER_ONLY)
540 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541 4369415f bellard
#else
542 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
543 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
544 4369415f bellard
#endif
545 26a5f13b bellard
    }
546 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
547 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
548 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
549 26a5f13b bellard
       the host cpu and OS */
550 26a5f13b bellard
#if defined(__linux__) 
551 26a5f13b bellard
    {
552 26a5f13b bellard
        int flags;
553 141ac468 blueswir1
        void *start = NULL;
554 141ac468 blueswir1
555 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
556 26a5f13b bellard
#if defined(__x86_64__)
557 26a5f13b bellard
        flags |= MAP_32BIT;
558 26a5f13b bellard
        /* Cannot map more than that */
559 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
560 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
561 141ac468 blueswir1
#elif defined(__sparc_v9__)
562 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
563 141ac468 blueswir1
        flags |= MAP_FIXED;
564 141ac468 blueswir1
        start = (void *) 0x60000000UL;
565 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
566 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
567 1cb0661e balrog
#elif defined(__arm__)
568 5c84bd90 Aurelien Jarno
        /* Keep the buffer no bigger than 16MB to branch between blocks */
569 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
570 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
571 eba0b893 Richard Henderson
#elif defined(__s390x__)
572 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
573 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
574 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
575 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
576 eba0b893 Richard Henderson
        }
577 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
578 26a5f13b bellard
#endif
579 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
580 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
581 26a5f13b bellard
                               flags, -1, 0);
582 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
583 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 26a5f13b bellard
            exit(1);
585 26a5f13b bellard
        }
586 26a5f13b bellard
    }
587 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
588 9f4b09a4 Tobias Nygren
    || defined(__DragonFly__) || defined(__OpenBSD__) \
589 9f4b09a4 Tobias Nygren
    || defined(__NetBSD__)
590 06e67a82 aliguori
    {
591 06e67a82 aliguori
        int flags;
592 06e67a82 aliguori
        void *addr = NULL;
593 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
594 06e67a82 aliguori
#if defined(__x86_64__)
595 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 06e67a82 aliguori
         * 0x40000000 is free */
597 06e67a82 aliguori
        flags |= MAP_FIXED;
598 06e67a82 aliguori
        addr = (void *)0x40000000;
599 06e67a82 aliguori
        /* Cannot map more than that */
600 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
601 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
602 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
603 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
604 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
605 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
606 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
607 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
608 4cd31ad2 Blue Swirl
        }
609 06e67a82 aliguori
#endif
610 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
612 06e67a82 aliguori
                               flags, -1, 0);
613 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
614 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 06e67a82 aliguori
            exit(1);
616 06e67a82 aliguori
        }
617 06e67a82 aliguori
    }
618 26a5f13b bellard
#else
619 7267c094 Anthony Liguori
    code_gen_buffer = g_malloc(code_gen_buffer_size);
620 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
621 26a5f13b bellard
#endif
622 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
623 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
624 a884da8a Peter Maydell
    code_gen_buffer_max_size = code_gen_buffer_size -
625 a884da8a Peter Maydell
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
626 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
627 7267c094 Anthony Liguori
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
628 26a5f13b bellard
}
629 26a5f13b bellard
630 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
631 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
632 26a5f13b bellard
   size. */
633 d5ab9713 Jan Kiszka
void tcg_exec_init(unsigned long tb_size)
634 26a5f13b bellard
{
635 26a5f13b bellard
    cpu_gen_init();
636 26a5f13b bellard
    code_gen_alloc(tb_size);
637 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
638 813da627 Richard Henderson
    tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
639 4369415f bellard
    page_init();
640 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
642 9002ec79 Richard Henderson
       initialize the prologue now.  */
643 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
644 9002ec79 Richard Henderson
#endif
645 26a5f13b bellard
}
646 26a5f13b bellard
647 d5ab9713 Jan Kiszka
bool tcg_enabled(void)
648 d5ab9713 Jan Kiszka
{
649 d5ab9713 Jan Kiszka
    return code_gen_buffer != NULL;
650 d5ab9713 Jan Kiszka
}
651 d5ab9713 Jan Kiszka
652 d5ab9713 Jan Kiszka
void cpu_exec_init_all(void)
653 d5ab9713 Jan Kiszka
{
654 d5ab9713 Jan Kiszka
#if !defined(CONFIG_USER_ONLY)
655 d5ab9713 Jan Kiszka
    memory_map_init();
656 d5ab9713 Jan Kiszka
    io_mem_init();
657 d5ab9713 Jan Kiszka
#endif
658 d5ab9713 Jan Kiszka
}
659 d5ab9713 Jan Kiszka
660 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661 9656f324 pbrook
662 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
663 e7f4eff7 Juan Quintela
{
664 9349b4f9 Andreas Färber
    CPUArchState *env = opaque;
665 9656f324 pbrook
666 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 3098dba0 aurel32
       version_id is increased. */
668 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
669 9656f324 pbrook
    tlb_flush(env, 1);
670 9656f324 pbrook
671 9656f324 pbrook
    return 0;
672 9656f324 pbrook
}
673 e7f4eff7 Juan Quintela
674 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
675 e7f4eff7 Juan Quintela
    .name = "cpu_common",
676 e7f4eff7 Juan Quintela
    .version_id = 1,
677 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
678 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
679 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
680 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
681 9349b4f9 Andreas Färber
        VMSTATE_UINT32(halted, CPUArchState),
682 9349b4f9 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUArchState),
683 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
684 e7f4eff7 Juan Quintela
    }
685 e7f4eff7 Juan Quintela
};
686 9656f324 pbrook
#endif
687 9656f324 pbrook
688 9349b4f9 Andreas Färber
CPUArchState *qemu_get_cpu(int cpu)
689 950f1472 Glauber Costa
{
690 9349b4f9 Andreas Färber
    CPUArchState *env = first_cpu;
691 950f1472 Glauber Costa
692 950f1472 Glauber Costa
    while (env) {
693 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
694 950f1472 Glauber Costa
            break;
695 950f1472 Glauber Costa
        env = env->next_cpu;
696 950f1472 Glauber Costa
    }
697 950f1472 Glauber Costa
698 950f1472 Glauber Costa
    return env;
699 950f1472 Glauber Costa
}
700 950f1472 Glauber Costa
701 9349b4f9 Andreas Färber
void cpu_exec_init(CPUArchState *env)
702 fd6ce8f6 bellard
{
703 9349b4f9 Andreas Färber
    CPUArchState **penv;
704 6a00d601 bellard
    int cpu_index;
705 6a00d601 bellard
706 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
707 c2764719 pbrook
    cpu_list_lock();
708 c2764719 pbrook
#endif
709 6a00d601 bellard
    env->next_cpu = NULL;
710 6a00d601 bellard
    penv = &first_cpu;
711 6a00d601 bellard
    cpu_index = 0;
712 6a00d601 bellard
    while (*penv != NULL) {
713 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
714 6a00d601 bellard
        cpu_index++;
715 6a00d601 bellard
    }
716 6a00d601 bellard
    env->cpu_index = cpu_index;
717 268a362c aliguori
    env->numa_node = 0;
718 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
719 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
720 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
721 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
722 dc7a09cf Jan Kiszka
#endif
723 6a00d601 bellard
    *penv = env;
724 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
725 c2764719 pbrook
    cpu_list_unlock();
726 c2764719 pbrook
#endif
727 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
728 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
730 b3c7724c pbrook
                    cpu_save, cpu_load, env);
731 b3c7724c pbrook
#endif
732 fd6ce8f6 bellard
}
733 fd6ce8f6 bellard
734 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
735 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
736 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
737 d1a1eb74 Tristan Gingold
{
738 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
739 d1a1eb74 Tristan Gingold
740 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
741 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 d1a1eb74 Tristan Gingold
        return NULL;
743 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
744 d1a1eb74 Tristan Gingold
    tb->pc = pc;
745 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
746 d1a1eb74 Tristan Gingold
    return tb;
747 d1a1eb74 Tristan Gingold
}
748 d1a1eb74 Tristan Gingold
749 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
750 d1a1eb74 Tristan Gingold
{
751 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
752 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
753 d1a1eb74 Tristan Gingold
       be the last one generated.  */
754 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
756 d1a1eb74 Tristan Gingold
        nb_tbs--;
757 d1a1eb74 Tristan Gingold
    }
758 d1a1eb74 Tristan Gingold
}
759 d1a1eb74 Tristan Gingold
760 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
761 9fa3e853 bellard
{
762 9fa3e853 bellard
    if (p->code_bitmap) {
763 7267c094 Anthony Liguori
        g_free(p->code_bitmap);
764 9fa3e853 bellard
        p->code_bitmap = NULL;
765 9fa3e853 bellard
    }
766 9fa3e853 bellard
    p->code_write_count = 0;
767 9fa3e853 bellard
}
768 9fa3e853 bellard
769 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770 5cd2c5b6 Richard Henderson
771 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
772 fd6ce8f6 bellard
{
773 5cd2c5b6 Richard Henderson
    int i;
774 fd6ce8f6 bellard
775 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
776 5cd2c5b6 Richard Henderson
        return;
777 5cd2c5b6 Richard Henderson
    }
778 5cd2c5b6 Richard Henderson
    if (level == 0) {
779 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
780 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
781 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
782 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
783 fd6ce8f6 bellard
        }
784 5cd2c5b6 Richard Henderson
    } else {
785 5cd2c5b6 Richard Henderson
        void **pp = *lp;
786 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
787 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
788 5cd2c5b6 Richard Henderson
        }
789 5cd2c5b6 Richard Henderson
    }
790 5cd2c5b6 Richard Henderson
}
791 5cd2c5b6 Richard Henderson
792 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
793 5cd2c5b6 Richard Henderson
{
794 5cd2c5b6 Richard Henderson
    int i;
795 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
796 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
797 fd6ce8f6 bellard
    }
798 fd6ce8f6 bellard
}
799 fd6ce8f6 bellard
800 fd6ce8f6 bellard
/* flush all the translation blocks */
801 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
802 9349b4f9 Andreas Färber
void tb_flush(CPUArchState *env1)
803 fd6ce8f6 bellard
{
804 9349b4f9 Andreas Färber
    CPUArchState *env;
805 0124311e bellard
#if defined(DEBUG_FLUSH)
806 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
808 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
809 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
810 fd6ce8f6 bellard
#endif
811 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
812 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
813 a208e54a pbrook
814 fd6ce8f6 bellard
    nb_tbs = 0;
815 3b46e624 ths
816 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 6a00d601 bellard
    }
819 9fa3e853 bellard
820 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
821 fd6ce8f6 bellard
    page_flush_tb();
822 9fa3e853 bellard
823 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
824 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
825 d4e8164f bellard
       expensive */
826 e3db7226 bellard
    tb_flush_count++;
827 fd6ce8f6 bellard
}
828 fd6ce8f6 bellard
829 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
830 fd6ce8f6 bellard
831 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
832 fd6ce8f6 bellard
{
833 fd6ce8f6 bellard
    TranslationBlock *tb;
834 fd6ce8f6 bellard
    int i;
835 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
836 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
838 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
840 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
841 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
842 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
843 fd6ce8f6 bellard
            }
844 fd6ce8f6 bellard
        }
845 fd6ce8f6 bellard
    }
846 fd6ce8f6 bellard
}
847 fd6ce8f6 bellard
848 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
849 fd6ce8f6 bellard
static void tb_page_check(void)
850 fd6ce8f6 bellard
{
851 fd6ce8f6 bellard
    TranslationBlock *tb;
852 fd6ce8f6 bellard
    int i, flags1, flags2;
853 3b46e624 ths
854 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
856 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
857 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
858 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
860 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
861 fd6ce8f6 bellard
            }
862 fd6ce8f6 bellard
        }
863 fd6ce8f6 bellard
    }
864 fd6ce8f6 bellard
}
865 fd6ce8f6 bellard
866 fd6ce8f6 bellard
#endif
867 fd6ce8f6 bellard
868 fd6ce8f6 bellard
/* invalidate one TB */
869 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 fd6ce8f6 bellard
                             int next_offset)
871 fd6ce8f6 bellard
{
872 fd6ce8f6 bellard
    TranslationBlock *tb1;
873 fd6ce8f6 bellard
    for(;;) {
874 fd6ce8f6 bellard
        tb1 = *ptb;
875 fd6ce8f6 bellard
        if (tb1 == tb) {
876 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 fd6ce8f6 bellard
            break;
878 fd6ce8f6 bellard
        }
879 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 fd6ce8f6 bellard
    }
881 fd6ce8f6 bellard
}
882 fd6ce8f6 bellard
883 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884 9fa3e853 bellard
{
885 9fa3e853 bellard
    TranslationBlock *tb1;
886 9fa3e853 bellard
    unsigned int n1;
887 9fa3e853 bellard
888 9fa3e853 bellard
    for(;;) {
889 9fa3e853 bellard
        tb1 = *ptb;
890 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
891 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
892 9fa3e853 bellard
        if (tb1 == tb) {
893 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
894 9fa3e853 bellard
            break;
895 9fa3e853 bellard
        }
896 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
897 9fa3e853 bellard
    }
898 9fa3e853 bellard
}
899 9fa3e853 bellard
900 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901 d4e8164f bellard
{
902 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
903 d4e8164f bellard
    unsigned int n1;
904 d4e8164f bellard
905 d4e8164f bellard
    ptb = &tb->jmp_next[n];
906 d4e8164f bellard
    tb1 = *ptb;
907 d4e8164f bellard
    if (tb1) {
908 d4e8164f bellard
        /* find tb(n) in circular list */
909 d4e8164f bellard
        for(;;) {
910 d4e8164f bellard
            tb1 = *ptb;
911 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
912 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
913 d4e8164f bellard
            if (n1 == n && tb1 == tb)
914 d4e8164f bellard
                break;
915 d4e8164f bellard
            if (n1 == 2) {
916 d4e8164f bellard
                ptb = &tb1->jmp_first;
917 d4e8164f bellard
            } else {
918 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
919 d4e8164f bellard
            }
920 d4e8164f bellard
        }
921 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
922 d4e8164f bellard
        *ptb = tb->jmp_next[n];
923 d4e8164f bellard
924 d4e8164f bellard
        tb->jmp_next[n] = NULL;
925 d4e8164f bellard
    }
926 d4e8164f bellard
}
927 d4e8164f bellard
928 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
929 d4e8164f bellard
   another TB */
930 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
931 d4e8164f bellard
{
932 8efe0ca8 Stefan Weil
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
933 d4e8164f bellard
}
934 d4e8164f bellard
935 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
936 fd6ce8f6 bellard
{
937 9349b4f9 Andreas Färber
    CPUArchState *env;
938 8a40a180 bellard
    PageDesc *p;
939 d4e8164f bellard
    unsigned int h, n1;
940 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
941 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
942 3b46e624 ths
943 8a40a180 bellard
    /* remove the TB from the hash list */
944 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
946 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
947 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
948 8a40a180 bellard
949 8a40a180 bellard
    /* remove the TB from the page list */
950 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
951 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
953 8a40a180 bellard
        invalidate_page_bitmap(p);
954 8a40a180 bellard
    }
955 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
958 8a40a180 bellard
        invalidate_page_bitmap(p);
959 8a40a180 bellard
    }
960 8a40a180 bellard
961 36bdbe54 bellard
    tb_invalidated_flag = 1;
962 59817ccb bellard
963 fd6ce8f6 bellard
    /* remove the TB from the hash list */
964 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
965 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
967 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
968 6a00d601 bellard
    }
969 d4e8164f bellard
970 d4e8164f bellard
    /* suppress this TB from the two jump lists */
971 d4e8164f bellard
    tb_jmp_remove(tb, 0);
972 d4e8164f bellard
    tb_jmp_remove(tb, 1);
973 d4e8164f bellard
974 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
975 d4e8164f bellard
    tb1 = tb->jmp_first;
976 d4e8164f bellard
    for(;;) {
977 8efe0ca8 Stefan Weil
        n1 = (uintptr_t)tb1 & 3;
978 d4e8164f bellard
        if (n1 == 2)
979 d4e8164f bellard
            break;
980 8efe0ca8 Stefan Weil
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
981 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
982 d4e8164f bellard
        tb_reset_jump(tb1, n1);
983 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
984 d4e8164f bellard
        tb1 = tb2;
985 d4e8164f bellard
    }
986 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
987 9fa3e853 bellard
988 e3db7226 bellard
    tb_phys_invalidate_count++;
989 9fa3e853 bellard
}
990 9fa3e853 bellard
991 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
992 9fa3e853 bellard
{
993 9fa3e853 bellard
    int end, mask, end1;
994 9fa3e853 bellard
995 9fa3e853 bellard
    end = start + len;
996 9fa3e853 bellard
    tab += start >> 3;
997 9fa3e853 bellard
    mask = 0xff << (start & 7);
998 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
999 9fa3e853 bellard
        if (start < end) {
1000 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
1001 9fa3e853 bellard
            *tab |= mask;
1002 9fa3e853 bellard
        }
1003 9fa3e853 bellard
    } else {
1004 9fa3e853 bellard
        *tab++ |= mask;
1005 9fa3e853 bellard
        start = (start + 8) & ~7;
1006 9fa3e853 bellard
        end1 = end & ~7;
1007 9fa3e853 bellard
        while (start < end1) {
1008 9fa3e853 bellard
            *tab++ = 0xff;
1009 9fa3e853 bellard
            start += 8;
1010 9fa3e853 bellard
        }
1011 9fa3e853 bellard
        if (start < end) {
1012 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
1013 9fa3e853 bellard
            *tab |= mask;
1014 9fa3e853 bellard
        }
1015 9fa3e853 bellard
    }
1016 9fa3e853 bellard
}
1017 9fa3e853 bellard
1018 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
1019 9fa3e853 bellard
{
1020 9fa3e853 bellard
    int n, tb_start, tb_end;
1021 9fa3e853 bellard
    TranslationBlock *tb;
1022 3b46e624 ths
1023 7267c094 Anthony Liguori
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1024 9fa3e853 bellard
1025 9fa3e853 bellard
    tb = p->first_tb;
1026 9fa3e853 bellard
    while (tb != NULL) {
1027 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1028 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1029 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1030 9fa3e853 bellard
        if (n == 0) {
1031 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1032 9fa3e853 bellard
               it is not a problem */
1033 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1035 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
1036 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
1037 9fa3e853 bellard
        } else {
1038 9fa3e853 bellard
            tb_start = 0;
1039 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 9fa3e853 bellard
        }
1041 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 9fa3e853 bellard
        tb = tb->page_next[n];
1043 9fa3e853 bellard
    }
1044 9fa3e853 bellard
}
1045 9fa3e853 bellard
1046 9349b4f9 Andreas Färber
TranslationBlock *tb_gen_code(CPUArchState *env,
1047 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
1048 2e70f6ef pbrook
                              int flags, int cflags)
1049 d720b93d bellard
{
1050 d720b93d bellard
    TranslationBlock *tb;
1051 d720b93d bellard
    uint8_t *tc_ptr;
1052 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
1053 41c1b1c9 Paul Brook
    target_ulong virt_page2;
1054 d720b93d bellard
    int code_gen_size;
1055 d720b93d bellard
1056 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
1057 c27004ec bellard
    tb = tb_alloc(pc);
1058 d720b93d bellard
    if (!tb) {
1059 d720b93d bellard
        /* flush must be done */
1060 d720b93d bellard
        tb_flush(env);
1061 d720b93d bellard
        /* cannot fail at this point */
1062 c27004ec bellard
        tb = tb_alloc(pc);
1063 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
1064 2e70f6ef pbrook
        tb_invalidated_flag = 1;
1065 d720b93d bellard
    }
1066 d720b93d bellard
    tc_ptr = code_gen_ptr;
1067 d720b93d bellard
    tb->tc_ptr = tc_ptr;
1068 d720b93d bellard
    tb->cs_base = cs_base;
1069 d720b93d bellard
    tb->flags = flags;
1070 d720b93d bellard
    tb->cflags = cflags;
1071 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
1072 8efe0ca8 Stefan Weil
    code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1073 8efe0ca8 Stefan Weil
                             CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1074 3b46e624 ths
1075 d720b93d bellard
    /* check next page if needed */
1076 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1077 d720b93d bellard
    phys_page2 = -1;
1078 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1079 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1080 d720b93d bellard
    }
1081 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1082 2e70f6ef pbrook
    return tb;
1083 d720b93d bellard
}
1084 3b46e624 ths
1085 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1086 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1087 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1088 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1089 d720b93d bellard
   TB if code is modified inside this TB. */
1090 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1091 d720b93d bellard
                                   int is_cpu_write_access)
1092 d720b93d bellard
{
1093 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1094 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1095 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1096 6b917547 aliguori
    PageDesc *p;
1097 6b917547 aliguori
    int n;
1098 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1099 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1100 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1101 6b917547 aliguori
    int current_tb_modified = 0;
1102 6b917547 aliguori
    target_ulong current_pc = 0;
1103 6b917547 aliguori
    target_ulong current_cs_base = 0;
1104 6b917547 aliguori
    int current_flags = 0;
1105 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1106 9fa3e853 bellard
1107 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1108 5fafdf24 ths
    if (!p)
1109 9fa3e853 bellard
        return;
1110 5fafdf24 ths
    if (!p->code_bitmap &&
1111 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 d720b93d bellard
        is_cpu_write_access) {
1113 9fa3e853 bellard
        /* build code bitmap */
1114 9fa3e853 bellard
        build_page_bitmap(p);
1115 9fa3e853 bellard
    }
1116 9fa3e853 bellard
1117 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1118 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 9fa3e853 bellard
    tb = p->first_tb;
1120 9fa3e853 bellard
    while (tb != NULL) {
1121 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1122 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1123 9fa3e853 bellard
        tb_next = tb->page_next[n];
1124 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1125 9fa3e853 bellard
        if (n == 0) {
1126 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1127 9fa3e853 bellard
               it is not a problem */
1128 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1130 9fa3e853 bellard
        } else {
1131 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1132 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 9fa3e853 bellard
        }
1134 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1135 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1136 d720b93d bellard
            if (current_tb_not_found) {
1137 d720b93d bellard
                current_tb_not_found = 0;
1138 d720b93d bellard
                current_tb = NULL;
1139 2e70f6ef pbrook
                if (env->mem_io_pc) {
1140 d720b93d bellard
                    /* now we have a real cpu fault */
1141 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1142 d720b93d bellard
                }
1143 d720b93d bellard
            }
1144 d720b93d bellard
            if (current_tb == tb &&
1145 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1146 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1147 d720b93d bellard
                its execution. We could be more precise by checking
1148 d720b93d bellard
                that the modification is after the current PC, but it
1149 d720b93d bellard
                would require a specialized function to partially
1150 d720b93d bellard
                restore the CPU state */
1151 3b46e624 ths
1152 d720b93d bellard
                current_tb_modified = 1;
1153 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1154 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 6b917547 aliguori
                                     &current_flags);
1156 d720b93d bellard
            }
1157 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1158 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1159 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1160 6f5a9f7e bellard
            saved_tb = NULL;
1161 6f5a9f7e bellard
            if (env) {
1162 6f5a9f7e bellard
                saved_tb = env->current_tb;
1163 6f5a9f7e bellard
                env->current_tb = NULL;
1164 6f5a9f7e bellard
            }
1165 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1166 6f5a9f7e bellard
            if (env) {
1167 6f5a9f7e bellard
                env->current_tb = saved_tb;
1168 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1169 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1170 6f5a9f7e bellard
            }
1171 9fa3e853 bellard
        }
1172 9fa3e853 bellard
        tb = tb_next;
1173 9fa3e853 bellard
    }
1174 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1175 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1176 9fa3e853 bellard
    if (!p->first_tb) {
1177 9fa3e853 bellard
        invalidate_page_bitmap(p);
1178 d720b93d bellard
        if (is_cpu_write_access) {
1179 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1180 d720b93d bellard
        }
1181 d720b93d bellard
    }
1182 d720b93d bellard
#endif
1183 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1184 d720b93d bellard
    if (current_tb_modified) {
1185 d720b93d bellard
        /* we generate a block containing just the instruction
1186 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1187 d720b93d bellard
           itself */
1188 ea1c1802 bellard
        env->current_tb = NULL;
1189 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1190 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1191 9fa3e853 bellard
    }
1192 fd6ce8f6 bellard
#endif
1193 9fa3e853 bellard
}
1194 fd6ce8f6 bellard
1195 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1196 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1197 9fa3e853 bellard
{
1198 9fa3e853 bellard
    PageDesc *p;
1199 9fa3e853 bellard
    int offset, b;
1200 59817ccb bellard
#if 0
1201 a4193c8a bellard
    if (1) {
1202 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1204 93fcfe39 aliguori
                  cpu_single_env->eip,
1205 8efe0ca8 Stefan Weil
                  cpu_single_env->eip +
1206 8efe0ca8 Stefan Weil
                  (intptr_t)cpu_single_env->segs[R_CS].base);
1207 59817ccb bellard
    }
1208 59817ccb bellard
#endif
1209 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1210 5fafdf24 ths
    if (!p)
1211 9fa3e853 bellard
        return;
1212 9fa3e853 bellard
    if (p->code_bitmap) {
1213 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1214 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1215 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1216 9fa3e853 bellard
            goto do_invalidate;
1217 9fa3e853 bellard
    } else {
1218 9fa3e853 bellard
    do_invalidate:
1219 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1220 9fa3e853 bellard
    }
1221 9fa3e853 bellard
}
1222 9fa3e853 bellard
1223 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1224 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1225 20503968 Blue Swirl
                                    uintptr_t pc, void *puc)
1226 9fa3e853 bellard
{
1227 6b917547 aliguori
    TranslationBlock *tb;
1228 9fa3e853 bellard
    PageDesc *p;
1229 6b917547 aliguori
    int n;
1230 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1231 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1232 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1233 6b917547 aliguori
    int current_tb_modified = 0;
1234 6b917547 aliguori
    target_ulong current_pc = 0;
1235 6b917547 aliguori
    target_ulong current_cs_base = 0;
1236 6b917547 aliguori
    int current_flags = 0;
1237 d720b93d bellard
#endif
1238 9fa3e853 bellard
1239 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1240 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1241 5fafdf24 ths
    if (!p)
1242 9fa3e853 bellard
        return;
1243 9fa3e853 bellard
    tb = p->first_tb;
1244 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1245 d720b93d bellard
    if (tb && pc != 0) {
1246 d720b93d bellard
        current_tb = tb_find_pc(pc);
1247 d720b93d bellard
    }
1248 d720b93d bellard
#endif
1249 9fa3e853 bellard
    while (tb != NULL) {
1250 8efe0ca8 Stefan Weil
        n = (uintptr_t)tb & 3;
1251 8efe0ca8 Stefan Weil
        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1252 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1253 d720b93d bellard
        if (current_tb == tb &&
1254 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1255 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1256 d720b93d bellard
                   its execution. We could be more precise by checking
1257 d720b93d bellard
                   that the modification is after the current PC, but it
1258 d720b93d bellard
                   would require a specialized function to partially
1259 d720b93d bellard
                   restore the CPU state */
1260 3b46e624 ths
1261 d720b93d bellard
            current_tb_modified = 1;
1262 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1263 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1264 6b917547 aliguori
                                 &current_flags);
1265 d720b93d bellard
        }
1266 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1267 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1268 9fa3e853 bellard
        tb = tb->page_next[n];
1269 9fa3e853 bellard
    }
1270 fd6ce8f6 bellard
    p->first_tb = NULL;
1271 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1272 d720b93d bellard
    if (current_tb_modified) {
1273 d720b93d bellard
        /* we generate a block containing just the instruction
1274 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1275 d720b93d bellard
           itself */
1276 ea1c1802 bellard
        env->current_tb = NULL;
1277 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1278 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1279 d720b93d bellard
    }
1280 d720b93d bellard
#endif
1281 fd6ce8f6 bellard
}
1282 9fa3e853 bellard
#endif
1283 fd6ce8f6 bellard
1284 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1285 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1286 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1287 fd6ce8f6 bellard
{
1288 fd6ce8f6 bellard
    PageDesc *p;
1289 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1290 4429ab44 Juan Quintela
    bool page_already_protected;
1291 4429ab44 Juan Quintela
#endif
1292 9fa3e853 bellard
1293 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1294 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1295 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1296 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1297 4429ab44 Juan Quintela
    page_already_protected = p->first_tb != NULL;
1298 4429ab44 Juan Quintela
#endif
1299 8efe0ca8 Stefan Weil
    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1300 9fa3e853 bellard
    invalidate_page_bitmap(p);
1301 fd6ce8f6 bellard
1302 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1303 d720b93d bellard
1304 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1305 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1306 53a5960a pbrook
        target_ulong addr;
1307 53a5960a pbrook
        PageDesc *p2;
1308 9fa3e853 bellard
        int prot;
1309 9fa3e853 bellard
1310 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1311 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1312 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1313 fd6ce8f6 bellard
        prot = 0;
1314 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1315 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1316 53a5960a pbrook
1317 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1318 53a5960a pbrook
            if (!p2)
1319 53a5960a pbrook
                continue;
1320 53a5960a pbrook
            prot |= p2->flags;
1321 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1322 53a5960a pbrook
          }
1323 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1324 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1325 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1326 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1327 53a5960a pbrook
               page_addr);
1328 fd6ce8f6 bellard
#endif
1329 fd6ce8f6 bellard
    }
1330 9fa3e853 bellard
#else
1331 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1332 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1333 9fa3e853 bellard
       allocated in a physical page */
1334 4429ab44 Juan Quintela
    if (!page_already_protected) {
1335 6a00d601 bellard
        tlb_protect_code(page_addr);
1336 9fa3e853 bellard
    }
1337 9fa3e853 bellard
#endif
1338 d720b93d bellard
1339 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1340 fd6ce8f6 bellard
}
1341 fd6ce8f6 bellard
1342 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1343 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1344 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1345 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1346 d4e8164f bellard
{
1347 9fa3e853 bellard
    unsigned int h;
1348 9fa3e853 bellard
    TranslationBlock **ptb;
1349 9fa3e853 bellard
1350 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1351 c8a706fe pbrook
       before we are done.  */
1352 c8a706fe pbrook
    mmap_lock();
1353 9fa3e853 bellard
    /* add in the physical hash table */
1354 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1355 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1356 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1357 9fa3e853 bellard
    *ptb = tb;
1358 fd6ce8f6 bellard
1359 fd6ce8f6 bellard
    /* add in the page list */
1360 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1361 9fa3e853 bellard
    if (phys_page2 != -1)
1362 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1363 9fa3e853 bellard
    else
1364 9fa3e853 bellard
        tb->page_addr[1] = -1;
1365 9fa3e853 bellard
1366 8efe0ca8 Stefan Weil
    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1367 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1368 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1369 d4e8164f bellard
1370 d4e8164f bellard
    /* init original jump addresses */
1371 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1372 d4e8164f bellard
        tb_reset_jump(tb, 0);
1373 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1374 d4e8164f bellard
        tb_reset_jump(tb, 1);
1375 8a40a180 bellard
1376 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1377 8a40a180 bellard
    tb_page_check();
1378 8a40a180 bellard
#endif
1379 c8a706fe pbrook
    mmap_unlock();
1380 fd6ce8f6 bellard
}
1381 fd6ce8f6 bellard
1382 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1383 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1384 6375e09e Stefan Weil
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1385 fd6ce8f6 bellard
{
1386 9fa3e853 bellard
    int m_min, m_max, m;
1387 8efe0ca8 Stefan Weil
    uintptr_t v;
1388 9fa3e853 bellard
    TranslationBlock *tb;
1389 a513fe19 bellard
1390 a513fe19 bellard
    if (nb_tbs <= 0)
1391 a513fe19 bellard
        return NULL;
1392 8efe0ca8 Stefan Weil
    if (tc_ptr < (uintptr_t)code_gen_buffer ||
1393 8efe0ca8 Stefan Weil
        tc_ptr >= (uintptr_t)code_gen_ptr) {
1394 a513fe19 bellard
        return NULL;
1395 8efe0ca8 Stefan Weil
    }
1396 a513fe19 bellard
    /* binary search (cf Knuth) */
1397 a513fe19 bellard
    m_min = 0;
1398 a513fe19 bellard
    m_max = nb_tbs - 1;
1399 a513fe19 bellard
    while (m_min <= m_max) {
1400 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1401 a513fe19 bellard
        tb = &tbs[m];
1402 8efe0ca8 Stefan Weil
        v = (uintptr_t)tb->tc_ptr;
1403 a513fe19 bellard
        if (v == tc_ptr)
1404 a513fe19 bellard
            return tb;
1405 a513fe19 bellard
        else if (tc_ptr < v) {
1406 a513fe19 bellard
            m_max = m - 1;
1407 a513fe19 bellard
        } else {
1408 a513fe19 bellard
            m_min = m + 1;
1409 a513fe19 bellard
        }
1410 5fafdf24 ths
    }
1411 a513fe19 bellard
    return &tbs[m_max];
1412 a513fe19 bellard
}
1413 7501267e bellard
1414 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1415 ea041c0e bellard
1416 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417 ea041c0e bellard
{
1418 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1419 ea041c0e bellard
    unsigned int n1;
1420 ea041c0e bellard
1421 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1422 ea041c0e bellard
    if (tb1 != NULL) {
1423 ea041c0e bellard
        /* find head of list */
1424 ea041c0e bellard
        for(;;) {
1425 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1426 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1427 ea041c0e bellard
            if (n1 == 2)
1428 ea041c0e bellard
                break;
1429 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1430 ea041c0e bellard
        }
1431 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1432 ea041c0e bellard
        tb_next = tb1;
1433 ea041c0e bellard
1434 ea041c0e bellard
        /* remove tb from the jmp_first list */
1435 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1436 ea041c0e bellard
        for(;;) {
1437 ea041c0e bellard
            tb1 = *ptb;
1438 8efe0ca8 Stefan Weil
            n1 = (uintptr_t)tb1 & 3;
1439 8efe0ca8 Stefan Weil
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1440 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1441 ea041c0e bellard
                break;
1442 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1443 ea041c0e bellard
        }
1444 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1445 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1446 3b46e624 ths
1447 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1448 ea041c0e bellard
        tb_reset_jump(tb, n);
1449 ea041c0e bellard
1450 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1451 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1452 ea041c0e bellard
    }
1453 ea041c0e bellard
}
1454 ea041c0e bellard
1455 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1456 ea041c0e bellard
{
1457 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1458 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1459 ea041c0e bellard
}
1460 ea041c0e bellard
1461 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1462 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1463 9349b4f9 Andreas Färber
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1464 94df27fd Paul Brook
{
1465 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466 94df27fd Paul Brook
}
1467 94df27fd Paul Brook
#else
1468 1e7855a5 Max Filippov
void tb_invalidate_phys_addr(target_phys_addr_t addr)
1469 d720b93d bellard
{
1470 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1471 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1472 d720b93d bellard
1473 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
1474 f3705d53 Avi Kivity
    if (!(memory_region_is_ram(section->mr)
1475 f3705d53 Avi Kivity
          || (section->mr->rom_device && section->mr->readable))) {
1476 06ef3525 Avi Kivity
        return;
1477 06ef3525 Avi Kivity
    }
1478 f3705d53 Avi Kivity
    ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1479 f3705d53 Avi Kivity
        + section_addr(section, addr);
1480 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1481 d720b93d bellard
}
1482 1e7855a5 Max Filippov
1483 1e7855a5 Max Filippov
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1484 1e7855a5 Max Filippov
{
1485 1e7855a5 Max Filippov
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1486 1e7855a5 Max Filippov
}
1487 c27004ec bellard
#endif
1488 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1489 d720b93d bellard
1490 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1491 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1492 c527ee8f Paul Brook
1493 c527ee8f Paul Brook
{
1494 c527ee8f Paul Brook
}
1495 c527ee8f Paul Brook
1496 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1497 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1498 c527ee8f Paul Brook
{
1499 c527ee8f Paul Brook
    return -ENOSYS;
1500 c527ee8f Paul Brook
}
1501 c527ee8f Paul Brook
#else
1502 6658ffb8 pbrook
/* Add a watchpoint.  */
1503 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1504 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1505 6658ffb8 pbrook
{
1506 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1507 c0ce998e aliguori
    CPUWatchpoint *wp;
1508 6658ffb8 pbrook
1509 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1510 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
1511 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
1512 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1513 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1514 b4051334 aliguori
        return -EINVAL;
1515 b4051334 aliguori
    }
1516 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
1517 a1d1bb31 aliguori
1518 a1d1bb31 aliguori
    wp->vaddr = addr;
1519 b4051334 aliguori
    wp->len_mask = len_mask;
1520 a1d1bb31 aliguori
    wp->flags = flags;
1521 a1d1bb31 aliguori
1522 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1523 c0ce998e aliguori
    if (flags & BP_GDB)
1524 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1525 c0ce998e aliguori
    else
1526 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1527 6658ffb8 pbrook
1528 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1529 a1d1bb31 aliguori
1530 a1d1bb31 aliguori
    if (watchpoint)
1531 a1d1bb31 aliguori
        *watchpoint = wp;
1532 a1d1bb31 aliguori
    return 0;
1533 6658ffb8 pbrook
}
1534 6658ffb8 pbrook
1535 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1536 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1537 a1d1bb31 aliguori
                          int flags)
1538 6658ffb8 pbrook
{
1539 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1540 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1541 6658ffb8 pbrook
1542 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1543 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1544 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1545 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1546 6658ffb8 pbrook
            return 0;
1547 6658ffb8 pbrook
        }
1548 6658ffb8 pbrook
    }
1549 a1d1bb31 aliguori
    return -ENOENT;
1550 6658ffb8 pbrook
}
1551 6658ffb8 pbrook
1552 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1553 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1554 a1d1bb31 aliguori
{
1555 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1556 7d03f82f edgar_igl
1557 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1558 a1d1bb31 aliguori
1559 7267c094 Anthony Liguori
    g_free(watchpoint);
1560 a1d1bb31 aliguori
}
1561 a1d1bb31 aliguori
1562 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1563 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1564 a1d1bb31 aliguori
{
1565 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1566 a1d1bb31 aliguori
1567 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1568 a1d1bb31 aliguori
        if (wp->flags & mask)
1569 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1570 c0ce998e aliguori
    }
1571 7d03f82f edgar_igl
}
1572 c527ee8f Paul Brook
#endif
1573 7d03f82f edgar_igl
1574 a1d1bb31 aliguori
/* Add a breakpoint.  */
1575 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1576 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1577 4c3a88a2 bellard
{
1578 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1579 c0ce998e aliguori
    CPUBreakpoint *bp;
1580 3b46e624 ths
1581 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
1582 4c3a88a2 bellard
1583 a1d1bb31 aliguori
    bp->pc = pc;
1584 a1d1bb31 aliguori
    bp->flags = flags;
1585 a1d1bb31 aliguori
1586 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1587 c0ce998e aliguori
    if (flags & BP_GDB)
1588 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1589 c0ce998e aliguori
    else
1590 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1591 3b46e624 ths
1592 d720b93d bellard
    breakpoint_invalidate(env, pc);
1593 a1d1bb31 aliguori
1594 a1d1bb31 aliguori
    if (breakpoint)
1595 a1d1bb31 aliguori
        *breakpoint = bp;
1596 4c3a88a2 bellard
    return 0;
1597 4c3a88a2 bellard
#else
1598 a1d1bb31 aliguori
    return -ENOSYS;
1599 4c3a88a2 bellard
#endif
1600 4c3a88a2 bellard
}
1601 4c3a88a2 bellard
1602 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1603 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1604 a1d1bb31 aliguori
{
1605 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1606 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1607 a1d1bb31 aliguori
1608 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1609 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1610 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1611 a1d1bb31 aliguori
            return 0;
1612 a1d1bb31 aliguori
        }
1613 7d03f82f edgar_igl
    }
1614 a1d1bb31 aliguori
    return -ENOENT;
1615 a1d1bb31 aliguori
#else
1616 a1d1bb31 aliguori
    return -ENOSYS;
1617 7d03f82f edgar_igl
#endif
1618 7d03f82f edgar_igl
}
1619 7d03f82f edgar_igl
1620 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1621 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1622 4c3a88a2 bellard
{
1623 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1624 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1625 d720b93d bellard
1626 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1627 a1d1bb31 aliguori
1628 7267c094 Anthony Liguori
    g_free(breakpoint);
1629 a1d1bb31 aliguori
#endif
1630 a1d1bb31 aliguori
}
1631 a1d1bb31 aliguori
1632 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1633 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1634 a1d1bb31 aliguori
{
1635 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1636 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1637 a1d1bb31 aliguori
1638 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1639 a1d1bb31 aliguori
        if (bp->flags & mask)
1640 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1641 c0ce998e aliguori
    }
1642 4c3a88a2 bellard
#endif
1643 4c3a88a2 bellard
}
1644 4c3a88a2 bellard
1645 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1646 c33a346e bellard
   CPU loop after each instruction */
1647 9349b4f9 Andreas Färber
void cpu_single_step(CPUArchState *env, int enabled)
1648 c33a346e bellard
{
1649 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1650 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1651 c33a346e bellard
        env->singlestep_enabled = enabled;
1652 e22a25c9 aliguori
        if (kvm_enabled())
1653 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1654 e22a25c9 aliguori
        else {
1655 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1656 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1657 e22a25c9 aliguori
            tb_flush(env);
1658 e22a25c9 aliguori
        }
1659 c33a346e bellard
    }
1660 c33a346e bellard
#endif
1661 c33a346e bellard
}
1662 c33a346e bellard
1663 34865134 bellard
/* enable or disable low levels log */
1664 34865134 bellard
void cpu_set_log(int log_flags)
1665 34865134 bellard
{
1666 34865134 bellard
    loglevel = log_flags;
1667 34865134 bellard
    if (loglevel && !logfile) {
1668 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1669 34865134 bellard
        if (!logfile) {
1670 34865134 bellard
            perror(logfilename);
1671 34865134 bellard
            _exit(1);
1672 34865134 bellard
        }
1673 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1674 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1675 9fa3e853 bellard
        {
1676 b55266b5 blueswir1
            static char logfile_buf[4096];
1677 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1678 9fa3e853 bellard
        }
1679 daf767b1 Stefan Weil
#elif defined(_WIN32)
1680 daf767b1 Stefan Weil
        /* Win32 doesn't support line-buffering, so use unbuffered output. */
1681 daf767b1 Stefan Weil
        setvbuf(logfile, NULL, _IONBF, 0);
1682 daf767b1 Stefan Weil
#else
1683 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1684 9fa3e853 bellard
#endif
1685 e735b91c pbrook
        log_append = 1;
1686 e735b91c pbrook
    }
1687 e735b91c pbrook
    if (!loglevel && logfile) {
1688 e735b91c pbrook
        fclose(logfile);
1689 e735b91c pbrook
        logfile = NULL;
1690 34865134 bellard
    }
1691 34865134 bellard
}
1692 34865134 bellard
1693 34865134 bellard
void cpu_set_log_filename(const char *filename)
1694 34865134 bellard
{
1695 34865134 bellard
    logfilename = strdup(filename);
1696 e735b91c pbrook
    if (logfile) {
1697 e735b91c pbrook
        fclose(logfile);
1698 e735b91c pbrook
        logfile = NULL;
1699 e735b91c pbrook
    }
1700 e735b91c pbrook
    cpu_set_log(loglevel);
1701 34865134 bellard
}
1702 c33a346e bellard
1703 9349b4f9 Andreas Färber
static void cpu_unlink_tb(CPUArchState *env)
1704 ea041c0e bellard
{
1705 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1706 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1707 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1708 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1709 ea041c0e bellard
    TranslationBlock *tb;
1710 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1711 59817ccb bellard
1712 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1713 3098dba0 aurel32
    tb = env->current_tb;
1714 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1715 3098dba0 aurel32
       all the potentially executing TB */
1716 f76cfe56 Riku Voipio
    if (tb) {
1717 3098dba0 aurel32
        env->current_tb = NULL;
1718 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1719 be214e6c aurel32
    }
1720 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1721 3098dba0 aurel32
}
1722 3098dba0 aurel32
1723 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1724 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1725 9349b4f9 Andreas Färber
static void tcg_handle_interrupt(CPUArchState *env, int mask)
1726 3098dba0 aurel32
{
1727 3098dba0 aurel32
    int old_mask;
1728 be214e6c aurel32
1729 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1730 68a79315 bellard
    env->interrupt_request |= mask;
1731 3098dba0 aurel32
1732 8edac960 aliguori
    /*
1733 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1734 8edac960 aliguori
     * case its halted.
1735 8edac960 aliguori
     */
1736 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1737 8edac960 aliguori
        qemu_cpu_kick(env);
1738 8edac960 aliguori
        return;
1739 8edac960 aliguori
    }
1740 8edac960 aliguori
1741 2e70f6ef pbrook
    if (use_icount) {
1742 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1743 2e70f6ef pbrook
        if (!can_do_io(env)
1744 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1745 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1746 2e70f6ef pbrook
        }
1747 2e70f6ef pbrook
    } else {
1748 3098dba0 aurel32
        cpu_unlink_tb(env);
1749 ea041c0e bellard
    }
1750 ea041c0e bellard
}
1751 ea041c0e bellard
1752 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1753 ec6959d0 Jan Kiszka
1754 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1755 97ffbd8d Jan Kiszka
1756 9349b4f9 Andreas Färber
void cpu_interrupt(CPUArchState *env, int mask)
1757 97ffbd8d Jan Kiszka
{
1758 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1759 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1760 97ffbd8d Jan Kiszka
}
1761 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1762 97ffbd8d Jan Kiszka
1763 9349b4f9 Andreas Färber
void cpu_reset_interrupt(CPUArchState *env, int mask)
1764 b54ad049 bellard
{
1765 b54ad049 bellard
    env->interrupt_request &= ~mask;
1766 b54ad049 bellard
}
1767 b54ad049 bellard
1768 9349b4f9 Andreas Färber
void cpu_exit(CPUArchState *env)
1769 3098dba0 aurel32
{
1770 3098dba0 aurel32
    env->exit_request = 1;
1771 3098dba0 aurel32
    cpu_unlink_tb(env);
1772 3098dba0 aurel32
}
1773 3098dba0 aurel32
1774 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1775 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1776 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1777 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1778 f193c797 bellard
      "show target assembly code for each compiled TB" },
1779 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1780 57fec1fe bellard
      "show micro ops for each compiled TB" },
1781 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1782 e01a1157 blueswir1
      "show micro ops "
1783 e01a1157 blueswir1
#ifdef TARGET_I386
1784 e01a1157 blueswir1
      "before eflags optimization and "
1785 f193c797 bellard
#endif
1786 e01a1157 blueswir1
      "after liveness analysis" },
1787 f193c797 bellard
    { CPU_LOG_INT, "int",
1788 f193c797 bellard
      "show interrupts/exceptions in short format" },
1789 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1790 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1791 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1792 e91c8a77 ths
      "show CPU state before block translation" },
1793 f193c797 bellard
#ifdef TARGET_I386
1794 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1795 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1796 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1797 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1798 f193c797 bellard
#endif
1799 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1800 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1801 fd872598 bellard
      "show all i/o ports accesses" },
1802 8e3a9fd2 bellard
#endif
1803 f193c797 bellard
    { 0, NULL, NULL },
1804 f193c797 bellard
};
1805 f193c797 bellard
1806 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1807 f193c797 bellard
{
1808 f193c797 bellard
    if (strlen(s2) != n)
1809 f193c797 bellard
        return 0;
1810 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1811 f193c797 bellard
}
1812 3b46e624 ths
1813 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1814 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1815 f193c797 bellard
{
1816 c7cd6a37 blueswir1
    const CPULogItem *item;
1817 f193c797 bellard
    int mask;
1818 f193c797 bellard
    const char *p, *p1;
1819 f193c797 bellard
1820 f193c797 bellard
    p = str;
1821 f193c797 bellard
    mask = 0;
1822 f193c797 bellard
    for(;;) {
1823 f193c797 bellard
        p1 = strchr(p, ',');
1824 f193c797 bellard
        if (!p1)
1825 f193c797 bellard
            p1 = p + strlen(p);
1826 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1827 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1828 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1829 9742bf26 Yoshiaki Tamura
            }
1830 9742bf26 Yoshiaki Tamura
        } else {
1831 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1832 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1833 9742bf26 Yoshiaki Tamura
                    goto found;
1834 9742bf26 Yoshiaki Tamura
            }
1835 9742bf26 Yoshiaki Tamura
            return 0;
1836 f193c797 bellard
        }
1837 f193c797 bellard
    found:
1838 f193c797 bellard
        mask |= item->mask;
1839 f193c797 bellard
        if (*p1 != ',')
1840 f193c797 bellard
            break;
1841 f193c797 bellard
        p = p1 + 1;
1842 f193c797 bellard
    }
1843 f193c797 bellard
    return mask;
1844 f193c797 bellard
}
1845 ea041c0e bellard
1846 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
1847 7501267e bellard
{
1848 7501267e bellard
    va_list ap;
1849 493ae1f0 pbrook
    va_list ap2;
1850 7501267e bellard
1851 7501267e bellard
    va_start(ap, fmt);
1852 493ae1f0 pbrook
    va_copy(ap2, ap);
1853 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1854 7501267e bellard
    vfprintf(stderr, fmt, ap);
1855 7501267e bellard
    fprintf(stderr, "\n");
1856 7501267e bellard
#ifdef TARGET_I386
1857 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1858 7fe48483 bellard
#else
1859 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1860 7501267e bellard
#endif
1861 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1862 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1863 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1864 93fcfe39 aliguori
        qemu_log("\n");
1865 f9373291 j_mayer
#ifdef TARGET_I386
1866 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1867 f9373291 j_mayer
#else
1868 93fcfe39 aliguori
        log_cpu_state(env, 0);
1869 f9373291 j_mayer
#endif
1870 31b1a7b4 aliguori
        qemu_log_flush();
1871 93fcfe39 aliguori
        qemu_log_close();
1872 924edcae balrog
    }
1873 493ae1f0 pbrook
    va_end(ap2);
1874 f9373291 j_mayer
    va_end(ap);
1875 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1876 fd052bf6 Riku Voipio
    {
1877 fd052bf6 Riku Voipio
        struct sigaction act;
1878 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1879 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1880 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1881 fd052bf6 Riku Voipio
    }
1882 fd052bf6 Riku Voipio
#endif
1883 7501267e bellard
    abort();
1884 7501267e bellard
}
1885 7501267e bellard
1886 9349b4f9 Andreas Färber
CPUArchState *cpu_copy(CPUArchState *env)
1887 c5be9f08 ths
{
1888 9349b4f9 Andreas Färber
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
1889 9349b4f9 Andreas Färber
    CPUArchState *next_cpu = new_env->next_cpu;
1890 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1891 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1892 5a38f081 aliguori
    CPUBreakpoint *bp;
1893 5a38f081 aliguori
    CPUWatchpoint *wp;
1894 5a38f081 aliguori
#endif
1895 5a38f081 aliguori
1896 9349b4f9 Andreas Färber
    memcpy(new_env, env, sizeof(CPUArchState));
1897 5a38f081 aliguori
1898 5a38f081 aliguori
    /* Preserve chaining and index. */
1899 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1900 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1901 5a38f081 aliguori
1902 5a38f081 aliguori
    /* Clone all break/watchpoints.
1903 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1904 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1905 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1906 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1907 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1908 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1909 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1910 5a38f081 aliguori
    }
1911 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1912 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1913 5a38f081 aliguori
                              wp->flags, NULL);
1914 5a38f081 aliguori
    }
1915 5a38f081 aliguori
#endif
1916 5a38f081 aliguori
1917 c5be9f08 ths
    return new_env;
1918 c5be9f08 ths
}
1919 c5be9f08 ths
1920 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1921 0124311e bellard
1922 9349b4f9 Andreas Färber
static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1923 5c751e99 edgar_igl
{
1924 5c751e99 edgar_igl
    unsigned int i;
1925 5c751e99 edgar_igl
1926 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1927 5c751e99 edgar_igl
       overlap the flushed page.  */
1928 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1929 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1930 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1931 5c751e99 edgar_igl
1932 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1933 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1934 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1935 5c751e99 edgar_igl
}
1936 5c751e99 edgar_igl
1937 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1938 08738984 Igor Kovalenko
    .addr_read  = -1,
1939 08738984 Igor Kovalenko
    .addr_write = -1,
1940 08738984 Igor Kovalenko
    .addr_code  = -1,
1941 08738984 Igor Kovalenko
    .addend     = -1,
1942 08738984 Igor Kovalenko
};
1943 08738984 Igor Kovalenko
1944 771124e1 Peter Maydell
/* NOTE:
1945 771124e1 Peter Maydell
 * If flush_global is true (the usual case), flush all tlb entries.
1946 771124e1 Peter Maydell
 * If flush_global is false, flush (at least) all tlb entries not
1947 771124e1 Peter Maydell
 * marked global.
1948 771124e1 Peter Maydell
 *
1949 771124e1 Peter Maydell
 * Since QEMU doesn't currently implement a global/not-global flag
1950 771124e1 Peter Maydell
 * for tlb entries, at the moment tlb_flush() will also flush all
1951 771124e1 Peter Maydell
 * tlb entries in the flush_global == false case. This is OK because
1952 771124e1 Peter Maydell
 * CPU architectures generally permit an implementation to drop
1953 771124e1 Peter Maydell
 * entries from the TLB at any time, so flushing more entries than
1954 771124e1 Peter Maydell
 * required is only an efficiency issue, not a correctness issue.
1955 771124e1 Peter Maydell
 */
1956 9349b4f9 Andreas Färber
void tlb_flush(CPUArchState *env, int flush_global)
1957 33417e70 bellard
{
1958 33417e70 bellard
    int i;
1959 0124311e bellard
1960 9fa3e853 bellard
#if defined(DEBUG_TLB)
1961 9fa3e853 bellard
    printf("tlb_flush:\n");
1962 9fa3e853 bellard
#endif
1963 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1964 0124311e bellard
       links while we are modifying them */
1965 0124311e bellard
    env->current_tb = NULL;
1966 0124311e bellard
1967 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1968 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1969 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1970 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1971 cfde4bd9 Isaku Yamahata
        }
1972 33417e70 bellard
    }
1973 9fa3e853 bellard
1974 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1975 9fa3e853 bellard
1976 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1977 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1978 e3db7226 bellard
    tlb_flush_count++;
1979 33417e70 bellard
}
1980 33417e70 bellard
1981 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1982 61382a50 bellard
{
1983 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1984 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1985 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1986 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1987 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1988 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1989 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1990 84b7b8e7 bellard
    }
1991 61382a50 bellard
}
1992 61382a50 bellard
1993 9349b4f9 Andreas Färber
void tlb_flush_page(CPUArchState *env, target_ulong addr)
1994 33417e70 bellard
{
1995 8a40a180 bellard
    int i;
1996 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1997 0124311e bellard
1998 9fa3e853 bellard
#if defined(DEBUG_TLB)
1999 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2000 9fa3e853 bellard
#endif
2001 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
2002 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2003 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
2004 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
2005 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2006 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
2007 d4c430a8 Paul Brook
#endif
2008 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2009 d4c430a8 Paul Brook
        return;
2010 d4c430a8 Paul Brook
    }
2011 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2012 0124311e bellard
       links while we are modifying them */
2013 0124311e bellard
    env->current_tb = NULL;
2014 61382a50 bellard
2015 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2016 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2017 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2018 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2019 0124311e bellard
2020 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2021 9fa3e853 bellard
}
2022 9fa3e853 bellard
2023 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2024 9fa3e853 bellard
   can be detected */
2025 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2026 9fa3e853 bellard
{
2027 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2028 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2029 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2030 9fa3e853 bellard
}
2031 9fa3e853 bellard
2032 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2033 3a7d929e bellard
   tested for self modifying code */
2034 9349b4f9 Andreas Färber
static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
2035 3a7d929e bellard
                                    target_ulong vaddr)
2036 9fa3e853 bellard
{
2037 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2038 1ccde1cb bellard
}
2039 1ccde1cb bellard
2040 7859cc6e Avi Kivity
static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2041 7859cc6e Avi Kivity
{
2042 7859cc6e Avi Kivity
    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2043 7859cc6e Avi Kivity
}
2044 7859cc6e Avi Kivity
2045 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2046 8efe0ca8 Stefan Weil
                                         uintptr_t start, uintptr_t length)
2047 1ccde1cb bellard
{
2048 8efe0ca8 Stefan Weil
    uintptr_t addr;
2049 7859cc6e Avi Kivity
    if (tlb_is_dirty_ram(tlb_entry)) {
2050 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2051 1ccde1cb bellard
        if ((addr - start) < length) {
2052 7859cc6e Avi Kivity
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2053 1ccde1cb bellard
        }
2054 1ccde1cb bellard
    }
2055 1ccde1cb bellard
}
2056 1ccde1cb bellard
2057 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2058 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2059 0a962c02 bellard
                                     int dirty_flags)
2060 1ccde1cb bellard
{
2061 9349b4f9 Andreas Färber
    CPUArchState *env;
2062 8efe0ca8 Stefan Weil
    uintptr_t length, start1;
2063 f7c11b53 Yoshiaki Tamura
    int i;
2064 1ccde1cb bellard
2065 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2066 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2067 1ccde1cb bellard
2068 1ccde1cb bellard
    length = end - start;
2069 1ccde1cb bellard
    if (length == 0)
2070 1ccde1cb bellard
        return;
2071 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2072 f23db169 bellard
2073 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2074 1ccde1cb bellard
       when accessing the range */
2075 8efe0ca8 Stefan Weil
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
2076 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
2077 5579c7f3 pbrook
       address comparisons below.  */
2078 8efe0ca8 Stefan Weil
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
2079 5579c7f3 pbrook
            != (end - 1) - start) {
2080 5579c7f3 pbrook
        abort();
2081 5579c7f3 pbrook
    }
2082 5579c7f3 pbrook
2083 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2084 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2085 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2086 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2087 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2088 cfde4bd9 Isaku Yamahata
                                      start1, length);
2089 cfde4bd9 Isaku Yamahata
        }
2090 6a00d601 bellard
    }
2091 1ccde1cb bellard
}
2092 1ccde1cb bellard
2093 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2094 74576198 aliguori
{
2095 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2096 74576198 aliguori
    in_migration = enable;
2097 f6f3fbca Michael S. Tsirkin
    return ret;
2098 74576198 aliguori
}
2099 74576198 aliguori
2100 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2101 3a7d929e bellard
{
2102 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2103 5579c7f3 pbrook
    void *p;
2104 3a7d929e bellard
2105 7859cc6e Avi Kivity
    if (tlb_is_dirty_ram(tlb_entry)) {
2106 8efe0ca8 Stefan Weil
        p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2107 5579c7f3 pbrook
            + tlb_entry->addend);
2108 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2109 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2110 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2111 3a7d929e bellard
        }
2112 3a7d929e bellard
    }
2113 3a7d929e bellard
}
2114 3a7d929e bellard
2115 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2116 9349b4f9 Andreas Färber
void cpu_tlb_update_dirty(CPUArchState *env)
2117 3a7d929e bellard
{
2118 3a7d929e bellard
    int i;
2119 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2120 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2121 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2122 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2123 cfde4bd9 Isaku Yamahata
    }
2124 3a7d929e bellard
}
2125 3a7d929e bellard
2126 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2127 1ccde1cb bellard
{
2128 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2129 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2130 1ccde1cb bellard
}
2131 1ccde1cb bellard
2132 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2133 0f459d16 pbrook
   so that it is no longer dirty */
2134 9349b4f9 Andreas Färber
static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
2135 1ccde1cb bellard
{
2136 1ccde1cb bellard
    int i;
2137 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2138 1ccde1cb bellard
2139 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2140 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2141 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2142 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2143 9fa3e853 bellard
}
2144 9fa3e853 bellard
2145 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2146 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2147 9349b4f9 Andreas Färber
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
2148 d4c430a8 Paul Brook
                               target_ulong size)
2149 d4c430a8 Paul Brook
{
2150 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2151 d4c430a8 Paul Brook
2152 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2153 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2154 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2155 d4c430a8 Paul Brook
        return;
2156 d4c430a8 Paul Brook
    }
2157 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2158 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2159 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2160 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2161 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2162 d4c430a8 Paul Brook
        mask <<= 1;
2163 d4c430a8 Paul Brook
    }
2164 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2165 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2166 d4c430a8 Paul Brook
}
2167 d4c430a8 Paul Brook
2168 06ef3525 Avi Kivity
static bool is_ram_rom(MemoryRegionSection *s)
2169 1d393fa2 Avi Kivity
{
2170 06ef3525 Avi Kivity
    return memory_region_is_ram(s->mr);
2171 1d393fa2 Avi Kivity
}
2172 1d393fa2 Avi Kivity
2173 06ef3525 Avi Kivity
static bool is_romd(MemoryRegionSection *s)
2174 75c578dc Avi Kivity
{
2175 06ef3525 Avi Kivity
    MemoryRegion *mr = s->mr;
2176 75c578dc Avi Kivity
2177 75c578dc Avi Kivity
    return mr->rom_device && mr->readable;
2178 75c578dc Avi Kivity
}
2179 75c578dc Avi Kivity
2180 06ef3525 Avi Kivity
static bool is_ram_rom_romd(MemoryRegionSection *s)
2181 1d393fa2 Avi Kivity
{
2182 06ef3525 Avi Kivity
    return is_ram_rom(s) || is_romd(s);
2183 1d393fa2 Avi Kivity
}
2184 1d393fa2 Avi Kivity
2185 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2186 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2187 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2188 9349b4f9 Andreas Färber
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
2189 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2190 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2191 9fa3e853 bellard
{
2192 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2193 9fa3e853 bellard
    unsigned int index;
2194 4f2ac237 bellard
    target_ulong address;
2195 0f459d16 pbrook
    target_ulong code_address;
2196 8efe0ca8 Stefan Weil
    uintptr_t addend;
2197 84b7b8e7 bellard
    CPUTLBEntry *te;
2198 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2199 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2200 9fa3e853 bellard
2201 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2202 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2203 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2204 d4c430a8 Paul Brook
    }
2205 06ef3525 Avi Kivity
    section = phys_page_find(paddr >> TARGET_PAGE_BITS);
2206 9fa3e853 bellard
#if defined(DEBUG_TLB)
2207 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2208 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2209 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2210 9fa3e853 bellard
#endif
2211 9fa3e853 bellard
2212 0f459d16 pbrook
    address = vaddr;
2213 f3705d53 Avi Kivity
    if (!is_ram_rom_romd(section)) {
2214 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2215 0f459d16 pbrook
        address |= TLB_MMIO;
2216 0f459d16 pbrook
    }
2217 f3705d53 Avi Kivity
    if (is_ram_rom_romd(section)) {
2218 8efe0ca8 Stefan Weil
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr)
2219 f3705d53 Avi Kivity
                                 + section_addr(section, paddr);
2220 06ef3525 Avi Kivity
    } else {
2221 06ef3525 Avi Kivity
        addend = 0;
2222 06ef3525 Avi Kivity
    }
2223 f3705d53 Avi Kivity
    if (is_ram_rom(section)) {
2224 0f459d16 pbrook
        /* Normal RAM.  */
2225 f3705d53 Avi Kivity
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2226 f3705d53 Avi Kivity
            + section_addr(section, paddr);
2227 f3705d53 Avi Kivity
        if (!section->readonly)
2228 aa102231 Avi Kivity
            iotlb |= phys_section_notdirty;
2229 0f459d16 pbrook
        else
2230 aa102231 Avi Kivity
            iotlb |= phys_section_rom;
2231 0f459d16 pbrook
    } else {
2232 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2233 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2234 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2235 0f459d16 pbrook
           and avoid full address decoding in every device.
2236 0f459d16 pbrook
           We can't use the high bits of pd for this because
2237 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2238 aa102231 Avi Kivity
        iotlb = section - phys_sections;
2239 f3705d53 Avi Kivity
        iotlb += section_addr(section, paddr);
2240 0f459d16 pbrook
    }
2241 0f459d16 pbrook
2242 0f459d16 pbrook
    code_address = address;
2243 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2244 0f459d16 pbrook
       watchpoint trap routines.  */
2245 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2246 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2247 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2248 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2249 aa102231 Avi Kivity
                iotlb = phys_section_watch + paddr;
2250 bf298f83 Jun Koi
                address |= TLB_MMIO;
2251 bf298f83 Jun Koi
                break;
2252 bf298f83 Jun Koi
            }
2253 6658ffb8 pbrook
        }
2254 0f459d16 pbrook
    }
2255 d79acba4 balrog
2256 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2257 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2258 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2259 0f459d16 pbrook
    te->addend = addend - vaddr;
2260 0f459d16 pbrook
    if (prot & PAGE_READ) {
2261 0f459d16 pbrook
        te->addr_read = address;
2262 0f459d16 pbrook
    } else {
2263 0f459d16 pbrook
        te->addr_read = -1;
2264 0f459d16 pbrook
    }
2265 5c751e99 edgar_igl
2266 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2267 0f459d16 pbrook
        te->addr_code = code_address;
2268 0f459d16 pbrook
    } else {
2269 0f459d16 pbrook
        te->addr_code = -1;
2270 0f459d16 pbrook
    }
2271 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2272 f3705d53 Avi Kivity
        if ((memory_region_is_ram(section->mr) && section->readonly)
2273 f3705d53 Avi Kivity
            || is_romd(section)) {
2274 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2275 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2276 f3705d53 Avi Kivity
        } else if (memory_region_is_ram(section->mr)
2277 06ef3525 Avi Kivity
                   && !cpu_physical_memory_is_dirty(
2278 f3705d53 Avi Kivity
                           section->mr->ram_addr
2279 f3705d53 Avi Kivity
                           + section_addr(section, paddr))) {
2280 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2281 9fa3e853 bellard
        } else {
2282 0f459d16 pbrook
            te->addr_write = address;
2283 9fa3e853 bellard
        }
2284 0f459d16 pbrook
    } else {
2285 0f459d16 pbrook
        te->addr_write = -1;
2286 9fa3e853 bellard
    }
2287 9fa3e853 bellard
}
2288 9fa3e853 bellard
2289 0124311e bellard
#else
2290 0124311e bellard
2291 9349b4f9 Andreas Färber
void tlb_flush(CPUArchState *env, int flush_global)
2292 0124311e bellard
{
2293 0124311e bellard
}
2294 0124311e bellard
2295 9349b4f9 Andreas Färber
void tlb_flush_page(CPUArchState *env, target_ulong addr)
2296 0124311e bellard
{
2297 0124311e bellard
}
2298 0124311e bellard
2299 edf8e2af Mika Westerberg
/*
2300 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2301 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2302 edf8e2af Mika Westerberg
 */
2303 5cd2c5b6 Richard Henderson
2304 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2305 5cd2c5b6 Richard Henderson
{
2306 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2307 5cd2c5b6 Richard Henderson
    void *priv;
2308 8efe0ca8 Stefan Weil
    uintptr_t start;
2309 5cd2c5b6 Richard Henderson
    int prot;
2310 5cd2c5b6 Richard Henderson
};
2311 5cd2c5b6 Richard Henderson
2312 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2313 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2314 5cd2c5b6 Richard Henderson
{
2315 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2316 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2317 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2318 5cd2c5b6 Richard Henderson
            return rc;
2319 5cd2c5b6 Richard Henderson
        }
2320 5cd2c5b6 Richard Henderson
    }
2321 5cd2c5b6 Richard Henderson
2322 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2323 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2324 5cd2c5b6 Richard Henderson
2325 5cd2c5b6 Richard Henderson
    return 0;
2326 5cd2c5b6 Richard Henderson
}
2327 5cd2c5b6 Richard Henderson
2328 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2329 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2330 5cd2c5b6 Richard Henderson
{
2331 b480d9b7 Paul Brook
    abi_ulong pa;
2332 5cd2c5b6 Richard Henderson
    int i, rc;
2333 5cd2c5b6 Richard Henderson
2334 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2335 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2336 5cd2c5b6 Richard Henderson
    }
2337 5cd2c5b6 Richard Henderson
2338 5cd2c5b6 Richard Henderson
    if (level == 0) {
2339 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2340 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2341 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2342 5cd2c5b6 Richard Henderson
2343 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2344 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2345 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2346 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2347 5cd2c5b6 Richard Henderson
                    return rc;
2348 9fa3e853 bellard
                }
2349 9fa3e853 bellard
            }
2350 5cd2c5b6 Richard Henderson
        }
2351 5cd2c5b6 Richard Henderson
    } else {
2352 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2353 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2354 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2355 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2356 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2357 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2358 5cd2c5b6 Richard Henderson
                return rc;
2359 5cd2c5b6 Richard Henderson
            }
2360 5cd2c5b6 Richard Henderson
        }
2361 5cd2c5b6 Richard Henderson
    }
2362 5cd2c5b6 Richard Henderson
2363 5cd2c5b6 Richard Henderson
    return 0;
2364 5cd2c5b6 Richard Henderson
}
2365 5cd2c5b6 Richard Henderson
2366 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2367 5cd2c5b6 Richard Henderson
{
2368 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2369 8efe0ca8 Stefan Weil
    uintptr_t i;
2370 5cd2c5b6 Richard Henderson
2371 5cd2c5b6 Richard Henderson
    data.fn = fn;
2372 5cd2c5b6 Richard Henderson
    data.priv = priv;
2373 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2374 5cd2c5b6 Richard Henderson
    data.prot = 0;
2375 5cd2c5b6 Richard Henderson
2376 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2377 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2378 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2379 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2380 5cd2c5b6 Richard Henderson
            return rc;
2381 9fa3e853 bellard
        }
2382 33417e70 bellard
    }
2383 5cd2c5b6 Richard Henderson
2384 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2385 edf8e2af Mika Westerberg
}
2386 edf8e2af Mika Westerberg
2387 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2388 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2389 edf8e2af Mika Westerberg
{
2390 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2391 edf8e2af Mika Westerberg
2392 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2393 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2394 edf8e2af Mika Westerberg
        start, end, end - start,
2395 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2396 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2397 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2398 edf8e2af Mika Westerberg
2399 edf8e2af Mika Westerberg
    return (0);
2400 edf8e2af Mika Westerberg
}
2401 edf8e2af Mika Westerberg
2402 edf8e2af Mika Westerberg
/* dump memory mappings */
2403 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2404 edf8e2af Mika Westerberg
{
2405 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2406 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2407 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2408 33417e70 bellard
}
2409 33417e70 bellard
2410 53a5960a pbrook
int page_get_flags(target_ulong address)
2411 33417e70 bellard
{
2412 9fa3e853 bellard
    PageDesc *p;
2413 9fa3e853 bellard
2414 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2415 33417e70 bellard
    if (!p)
2416 9fa3e853 bellard
        return 0;
2417 9fa3e853 bellard
    return p->flags;
2418 9fa3e853 bellard
}
2419 9fa3e853 bellard
2420 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2421 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2422 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2423 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2424 9fa3e853 bellard
{
2425 376a7909 Richard Henderson
    target_ulong addr, len;
2426 376a7909 Richard Henderson
2427 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2428 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2429 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2430 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2431 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2432 376a7909 Richard Henderson
#endif
2433 376a7909 Richard Henderson
    assert(start < end);
2434 9fa3e853 bellard
2435 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2436 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2437 376a7909 Richard Henderson
2438 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2439 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2440 376a7909 Richard Henderson
    }
2441 376a7909 Richard Henderson
2442 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2443 376a7909 Richard Henderson
         len != 0;
2444 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2445 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2446 376a7909 Richard Henderson
2447 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2448 376a7909 Richard Henderson
           the code inside.  */
2449 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2450 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2451 9fa3e853 bellard
            p->first_tb) {
2452 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2453 9fa3e853 bellard
        }
2454 9fa3e853 bellard
        p->flags = flags;
2455 9fa3e853 bellard
    }
2456 33417e70 bellard
}
2457 33417e70 bellard
2458 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2459 3d97b40b ths
{
2460 3d97b40b ths
    PageDesc *p;
2461 3d97b40b ths
    target_ulong end;
2462 3d97b40b ths
    target_ulong addr;
2463 3d97b40b ths
2464 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2465 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2466 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2467 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2468 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2469 376a7909 Richard Henderson
#endif
2470 376a7909 Richard Henderson
2471 3e0650a9 Richard Henderson
    if (len == 0) {
2472 3e0650a9 Richard Henderson
        return 0;
2473 3e0650a9 Richard Henderson
    }
2474 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2475 376a7909 Richard Henderson
        /* We've wrapped around.  */
2476 55f280c9 balrog
        return -1;
2477 376a7909 Richard Henderson
    }
2478 55f280c9 balrog
2479 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2480 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2481 3d97b40b ths
2482 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2483 376a7909 Richard Henderson
         len != 0;
2484 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2485 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2486 3d97b40b ths
        if( !p )
2487 3d97b40b ths
            return -1;
2488 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2489 3d97b40b ths
            return -1;
2490 3d97b40b ths
2491 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2492 3d97b40b ths
            return -1;
2493 dae3270c bellard
        if (flags & PAGE_WRITE) {
2494 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2495 dae3270c bellard
                return -1;
2496 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2497 dae3270c bellard
               contains translated code */
2498 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2499 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2500 dae3270c bellard
                    return -1;
2501 dae3270c bellard
            }
2502 dae3270c bellard
            return 0;
2503 dae3270c bellard
        }
2504 3d97b40b ths
    }
2505 3d97b40b ths
    return 0;
2506 3d97b40b ths
}
2507 3d97b40b ths
2508 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2509 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2510 6375e09e Stefan Weil
int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2511 9fa3e853 bellard
{
2512 45d679d6 Aurelien Jarno
    unsigned int prot;
2513 45d679d6 Aurelien Jarno
    PageDesc *p;
2514 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2515 9fa3e853 bellard
2516 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2517 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2518 c8a706fe pbrook
       practice it seems to be ok.  */
2519 c8a706fe pbrook
    mmap_lock();
2520 c8a706fe pbrook
2521 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2522 45d679d6 Aurelien Jarno
    if (!p) {
2523 c8a706fe pbrook
        mmap_unlock();
2524 9fa3e853 bellard
        return 0;
2525 c8a706fe pbrook
    }
2526 45d679d6 Aurelien Jarno
2527 9fa3e853 bellard
    /* if the page was really writable, then we change its
2528 9fa3e853 bellard
       protection back to writable */
2529 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2530 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2531 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2532 45d679d6 Aurelien Jarno
2533 45d679d6 Aurelien Jarno
        prot = 0;
2534 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2535 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2536 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2537 45d679d6 Aurelien Jarno
            prot |= p->flags;
2538 45d679d6 Aurelien Jarno
2539 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2540 9fa3e853 bellard
               the corresponding translated code. */
2541 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2542 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2543 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2544 9fa3e853 bellard
#endif
2545 9fa3e853 bellard
        }
2546 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2547 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2548 45d679d6 Aurelien Jarno
2549 45d679d6 Aurelien Jarno
        mmap_unlock();
2550 45d679d6 Aurelien Jarno
        return 1;
2551 9fa3e853 bellard
    }
2552 c8a706fe pbrook
    mmap_unlock();
2553 9fa3e853 bellard
    return 0;
2554 9fa3e853 bellard
}
2555 9fa3e853 bellard
2556 9349b4f9 Andreas Färber
static inline void tlb_set_dirty(CPUArchState *env,
2557 8efe0ca8 Stefan Weil
                                 uintptr_t addr, target_ulong vaddr)
2558 1ccde1cb bellard
{
2559 1ccde1cb bellard
}
2560 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2561 9fa3e853 bellard
2562 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2563 8da3ff18 pbrook
2564 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2565 c04b2b78 Paul Brook
typedef struct subpage_t {
2566 70c68e44 Avi Kivity
    MemoryRegion iomem;
2567 c04b2b78 Paul Brook
    target_phys_addr_t base;
2568 5312bd8b Avi Kivity
    uint16_t sub_section[TARGET_PAGE_SIZE];
2569 c04b2b78 Paul Brook
} subpage_t;
2570 c04b2b78 Paul Brook
2571 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2572 5312bd8b Avi Kivity
                             uint16_t section);
2573 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base);
2574 5312bd8b Avi Kivity
static void destroy_page_desc(uint16_t section_index)
2575 54688b1e Avi Kivity
{
2576 5312bd8b Avi Kivity
    MemoryRegionSection *section = &phys_sections[section_index];
2577 5312bd8b Avi Kivity
    MemoryRegion *mr = section->mr;
2578 54688b1e Avi Kivity
2579 54688b1e Avi Kivity
    if (mr->subpage) {
2580 54688b1e Avi Kivity
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
2581 54688b1e Avi Kivity
        memory_region_destroy(&subpage->iomem);
2582 54688b1e Avi Kivity
        g_free(subpage);
2583 54688b1e Avi Kivity
    }
2584 54688b1e Avi Kivity
}
2585 54688b1e Avi Kivity
2586 4346ae3e Avi Kivity
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2587 54688b1e Avi Kivity
{
2588 54688b1e Avi Kivity
    unsigned i;
2589 d6f2ea22 Avi Kivity
    PhysPageEntry *p;
2590 54688b1e Avi Kivity
2591 c19e8800 Avi Kivity
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
2592 54688b1e Avi Kivity
        return;
2593 54688b1e Avi Kivity
    }
2594 54688b1e Avi Kivity
2595 c19e8800 Avi Kivity
    p = phys_map_nodes[lp->ptr];
2596 4346ae3e Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
2597 07f07b31 Avi Kivity
        if (!p[i].is_leaf) {
2598 54688b1e Avi Kivity
            destroy_l2_mapping(&p[i], level - 1);
2599 4346ae3e Avi Kivity
        } else {
2600 c19e8800 Avi Kivity
            destroy_page_desc(p[i].ptr);
2601 54688b1e Avi Kivity
        }
2602 54688b1e Avi Kivity
    }
2603 07f07b31 Avi Kivity
    lp->is_leaf = 0;
2604 c19e8800 Avi Kivity
    lp->ptr = PHYS_MAP_NODE_NIL;
2605 54688b1e Avi Kivity
}
2606 54688b1e Avi Kivity
2607 54688b1e Avi Kivity
static void destroy_all_mappings(void)
2608 54688b1e Avi Kivity
{
2609 3eef53df Avi Kivity
    destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2610 d6f2ea22 Avi Kivity
    phys_map_nodes_reset();
2611 54688b1e Avi Kivity
}
2612 54688b1e Avi Kivity
2613 5312bd8b Avi Kivity
static uint16_t phys_section_add(MemoryRegionSection *section)
2614 5312bd8b Avi Kivity
{
2615 5312bd8b Avi Kivity
    if (phys_sections_nb == phys_sections_nb_alloc) {
2616 5312bd8b Avi Kivity
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2617 5312bd8b Avi Kivity
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
2618 5312bd8b Avi Kivity
                                phys_sections_nb_alloc);
2619 5312bd8b Avi Kivity
    }
2620 5312bd8b Avi Kivity
    phys_sections[phys_sections_nb] = *section;
2621 5312bd8b Avi Kivity
    return phys_sections_nb++;
2622 5312bd8b Avi Kivity
}
2623 5312bd8b Avi Kivity
2624 5312bd8b Avi Kivity
static void phys_sections_clear(void)
2625 5312bd8b Avi Kivity
{
2626 5312bd8b Avi Kivity
    phys_sections_nb = 0;
2627 5312bd8b Avi Kivity
}
2628 5312bd8b Avi Kivity
2629 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2630 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2631 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2632 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2633 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2634 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2635 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2636 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2637 0f0cb164 Avi Kivity
static void register_subpage(MemoryRegionSection *section)
2638 0f0cb164 Avi Kivity
{
2639 0f0cb164 Avi Kivity
    subpage_t *subpage;
2640 0f0cb164 Avi Kivity
    target_phys_addr_t base = section->offset_within_address_space
2641 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
2642 f3705d53 Avi Kivity
    MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2643 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
2644 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
2645 0f0cb164 Avi Kivity
        .size = TARGET_PAGE_SIZE,
2646 0f0cb164 Avi Kivity
    };
2647 0f0cb164 Avi Kivity
    target_phys_addr_t start, end;
2648 0f0cb164 Avi Kivity
2649 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2650 0f0cb164 Avi Kivity
2651 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
2652 0f0cb164 Avi Kivity
        subpage = subpage_init(base);
2653 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
2654 2999097b Avi Kivity
        phys_page_set(base >> TARGET_PAGE_BITS, 1,
2655 2999097b Avi Kivity
                      phys_section_add(&subsection));
2656 0f0cb164 Avi Kivity
    } else {
2657 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
2658 0f0cb164 Avi Kivity
    }
2659 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2660 0f0cb164 Avi Kivity
    end = start + section->size;
2661 0f0cb164 Avi Kivity
    subpage_register(subpage, start, end, phys_section_add(section));
2662 0f0cb164 Avi Kivity
}
2663 0f0cb164 Avi Kivity
2664 0f0cb164 Avi Kivity
2665 0f0cb164 Avi Kivity
static void register_multipage(MemoryRegionSection *section)
2666 33417e70 bellard
{
2667 dd81124b Avi Kivity
    target_phys_addr_t start_addr = section->offset_within_address_space;
2668 dd81124b Avi Kivity
    ram_addr_t size = section->size;
2669 2999097b Avi Kivity
    target_phys_addr_t addr;
2670 5312bd8b Avi Kivity
    uint16_t section_index = phys_section_add(section);
2671 dd81124b Avi Kivity
2672 3b8e6a2d Edgar E. Iglesias
    assert(size);
2673 f6f3fbca Michael S. Tsirkin
2674 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2675 2999097b Avi Kivity
    phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2676 2999097b Avi Kivity
                  section_index);
2677 33417e70 bellard
}
2678 33417e70 bellard
2679 0f0cb164 Avi Kivity
void cpu_register_physical_memory_log(MemoryRegionSection *section,
2680 0f0cb164 Avi Kivity
                                      bool readonly)
2681 0f0cb164 Avi Kivity
{
2682 0f0cb164 Avi Kivity
    MemoryRegionSection now = *section, remain = *section;
2683 0f0cb164 Avi Kivity
2684 0f0cb164 Avi Kivity
    if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2685 0f0cb164 Avi Kivity
        || (now.size < TARGET_PAGE_SIZE)) {
2686 0f0cb164 Avi Kivity
        now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2687 0f0cb164 Avi Kivity
                       - now.offset_within_address_space,
2688 0f0cb164 Avi Kivity
                       now.size);
2689 0f0cb164 Avi Kivity
        register_subpage(&now);
2690 0f0cb164 Avi Kivity
        remain.size -= now.size;
2691 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2692 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2693 0f0cb164 Avi Kivity
    }
2694 0f0cb164 Avi Kivity
    now = remain;
2695 0f0cb164 Avi Kivity
    now.size &= TARGET_PAGE_MASK;
2696 0f0cb164 Avi Kivity
    if (now.size) {
2697 0f0cb164 Avi Kivity
        register_multipage(&now);
2698 0f0cb164 Avi Kivity
        remain.size -= now.size;
2699 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
2700 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
2701 0f0cb164 Avi Kivity
    }
2702 0f0cb164 Avi Kivity
    now = remain;
2703 0f0cb164 Avi Kivity
    if (now.size) {
2704 0f0cb164 Avi Kivity
        register_subpage(&now);
2705 0f0cb164 Avi Kivity
    }
2706 0f0cb164 Avi Kivity
}
2707 0f0cb164 Avi Kivity
2708 0f0cb164 Avi Kivity
2709 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2710 f65ed4c1 aliguori
{
2711 f65ed4c1 aliguori
    if (kvm_enabled())
2712 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2713 f65ed4c1 aliguori
}
2714 f65ed4c1 aliguori
2715 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2716 f65ed4c1 aliguori
{
2717 f65ed4c1 aliguori
    if (kvm_enabled())
2718 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2719 f65ed4c1 aliguori
}
2720 f65ed4c1 aliguori
2721 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2722 62a2744c Sheng Yang
{
2723 62a2744c Sheng Yang
    if (kvm_enabled())
2724 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2725 62a2744c Sheng Yang
}
2726 62a2744c Sheng Yang
2727 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2728 c902760f Marcelo Tosatti
2729 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2730 c902760f Marcelo Tosatti
2731 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2732 c902760f Marcelo Tosatti
2733 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2734 c902760f Marcelo Tosatti
{
2735 c902760f Marcelo Tosatti
    struct statfs fs;
2736 c902760f Marcelo Tosatti
    int ret;
2737 c902760f Marcelo Tosatti
2738 c902760f Marcelo Tosatti
    do {
2739 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2740 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2741 c902760f Marcelo Tosatti
2742 c902760f Marcelo Tosatti
    if (ret != 0) {
2743 9742bf26 Yoshiaki Tamura
        perror(path);
2744 9742bf26 Yoshiaki Tamura
        return 0;
2745 c902760f Marcelo Tosatti
    }
2746 c902760f Marcelo Tosatti
2747 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2748 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2749 c902760f Marcelo Tosatti
2750 c902760f Marcelo Tosatti
    return fs.f_bsize;
2751 c902760f Marcelo Tosatti
}
2752 c902760f Marcelo Tosatti
2753 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2754 04b16653 Alex Williamson
                            ram_addr_t memory,
2755 04b16653 Alex Williamson
                            const char *path)
2756 c902760f Marcelo Tosatti
{
2757 c902760f Marcelo Tosatti
    char *filename;
2758 c902760f Marcelo Tosatti
    void *area;
2759 c902760f Marcelo Tosatti
    int fd;
2760 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2761 c902760f Marcelo Tosatti
    int flags;
2762 c902760f Marcelo Tosatti
#endif
2763 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2764 c902760f Marcelo Tosatti
2765 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2766 c902760f Marcelo Tosatti
    if (!hpagesize) {
2767 9742bf26 Yoshiaki Tamura
        return NULL;
2768 c902760f Marcelo Tosatti
    }
2769 c902760f Marcelo Tosatti
2770 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2771 c902760f Marcelo Tosatti
        return NULL;
2772 c902760f Marcelo Tosatti
    }
2773 c902760f Marcelo Tosatti
2774 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2775 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2776 c902760f Marcelo Tosatti
        return NULL;
2777 c902760f Marcelo Tosatti
    }
2778 c902760f Marcelo Tosatti
2779 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2780 9742bf26 Yoshiaki Tamura
        return NULL;
2781 c902760f Marcelo Tosatti
    }
2782 c902760f Marcelo Tosatti
2783 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2784 c902760f Marcelo Tosatti
    if (fd < 0) {
2785 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2786 9742bf26 Yoshiaki Tamura
        free(filename);
2787 9742bf26 Yoshiaki Tamura
        return NULL;
2788 c902760f Marcelo Tosatti
    }
2789 c902760f Marcelo Tosatti
    unlink(filename);
2790 c902760f Marcelo Tosatti
    free(filename);
2791 c902760f Marcelo Tosatti
2792 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2793 c902760f Marcelo Tosatti
2794 c902760f Marcelo Tosatti
    /*
2795 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2796 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2797 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2798 c902760f Marcelo Tosatti
     * mmap will fail.
2799 c902760f Marcelo Tosatti
     */
2800 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2801 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2802 c902760f Marcelo Tosatti
2803 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2804 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2805 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2806 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2807 c902760f Marcelo Tosatti
     */
2808 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2809 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2810 c902760f Marcelo Tosatti
#else
2811 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2812 c902760f Marcelo Tosatti
#endif
2813 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2814 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2815 9742bf26 Yoshiaki Tamura
        close(fd);
2816 9742bf26 Yoshiaki Tamura
        return (NULL);
2817 c902760f Marcelo Tosatti
    }
2818 04b16653 Alex Williamson
    block->fd = fd;
2819 c902760f Marcelo Tosatti
    return area;
2820 c902760f Marcelo Tosatti
}
2821 c902760f Marcelo Tosatti
#endif
2822 c902760f Marcelo Tosatti
2823 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2824 d17b5288 Alex Williamson
{
2825 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2826 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2827 04b16653 Alex Williamson
2828 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2829 04b16653 Alex Williamson
        return 0;
2830 04b16653 Alex Williamson
2831 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2832 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
2833 04b16653 Alex Williamson
2834 04b16653 Alex Williamson
        end = block->offset + block->length;
2835 04b16653 Alex Williamson
2836 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2837 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2838 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2839 04b16653 Alex Williamson
            }
2840 04b16653 Alex Williamson
        }
2841 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2842 3e837b2c Alex Williamson
            offset = end;
2843 04b16653 Alex Williamson
            mingap = next - end;
2844 04b16653 Alex Williamson
        }
2845 04b16653 Alex Williamson
    }
2846 3e837b2c Alex Williamson
2847 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
2848 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2849 3e837b2c Alex Williamson
                (uint64_t)size);
2850 3e837b2c Alex Williamson
        abort();
2851 3e837b2c Alex Williamson
    }
2852 3e837b2c Alex Williamson
2853 04b16653 Alex Williamson
    return offset;
2854 04b16653 Alex Williamson
}
2855 04b16653 Alex Williamson
2856 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2857 04b16653 Alex Williamson
{
2858 d17b5288 Alex Williamson
    RAMBlock *block;
2859 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2860 d17b5288 Alex Williamson
2861 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2862 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2863 d17b5288 Alex Williamson
2864 d17b5288 Alex Williamson
    return last;
2865 d17b5288 Alex Williamson
}
2866 d17b5288 Alex Williamson
2867 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2868 84b89d78 Cam Macdonell
{
2869 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2870 84b89d78 Cam Macdonell
2871 c5705a77 Avi Kivity
    new_block = NULL;
2872 c5705a77 Avi Kivity
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2873 c5705a77 Avi Kivity
        if (block->offset == addr) {
2874 c5705a77 Avi Kivity
            new_block = block;
2875 c5705a77 Avi Kivity
            break;
2876 c5705a77 Avi Kivity
        }
2877 c5705a77 Avi Kivity
    }
2878 c5705a77 Avi Kivity
    assert(new_block);
2879 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
2880 84b89d78 Cam Macdonell
2881 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2882 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2883 84b89d78 Cam Macdonell
        if (id) {
2884 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2885 7267c094 Anthony Liguori
            g_free(id);
2886 84b89d78 Cam Macdonell
        }
2887 84b89d78 Cam Macdonell
    }
2888 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2889 84b89d78 Cam Macdonell
2890 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2892 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2893 84b89d78 Cam Macdonell
                    new_block->idstr);
2894 84b89d78 Cam Macdonell
            abort();
2895 84b89d78 Cam Macdonell
        }
2896 84b89d78 Cam Macdonell
    }
2897 c5705a77 Avi Kivity
}
2898 c5705a77 Avi Kivity
2899 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2900 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
2901 c5705a77 Avi Kivity
{
2902 c5705a77 Avi Kivity
    RAMBlock *new_block;
2903 c5705a77 Avi Kivity
2904 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
2905 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
2906 84b89d78 Cam Macdonell
2907 7c637366 Avi Kivity
    new_block->mr = mr;
2908 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2909 6977dfe6 Yoshiaki Tamura
    if (host) {
2910 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2911 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2912 6977dfe6 Yoshiaki Tamura
    } else {
2913 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2914 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2915 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2916 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2917 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2918 e78815a5 Andreas Färber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2919 6977dfe6 Yoshiaki Tamura
            }
2920 c902760f Marcelo Tosatti
#else
2921 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2922 6977dfe6 Yoshiaki Tamura
            exit(1);
2923 c902760f Marcelo Tosatti
#endif
2924 6977dfe6 Yoshiaki Tamura
        } else {
2925 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2926 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2927 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2928 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2929 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2930 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2931 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2932 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2933 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2934 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2935 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2936 fb8b2735 Alexander Graf
                abort();
2937 fb8b2735 Alexander Graf
            }
2938 6b02494d Alexander Graf
#else
2939 868bb33f Jan Kiszka
            if (xen_enabled()) {
2940 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
2941 432d268c Jun Nakajima
            } else {
2942 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2943 432d268c Jun Nakajima
            }
2944 6b02494d Alexander Graf
#endif
2945 e78815a5 Andreas Färber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2946 6977dfe6 Yoshiaki Tamura
        }
2947 c902760f Marcelo Tosatti
    }
2948 94a6b54f pbrook
    new_block->length = size;
2949 94a6b54f pbrook
2950 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2951 94a6b54f pbrook
2952 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2953 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2954 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2955 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2956 94a6b54f pbrook
2957 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2958 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2959 6f0437e8 Jan Kiszka
2960 94a6b54f pbrook
    return new_block->offset;
2961 94a6b54f pbrook
}
2962 e9a1ab19 bellard
2963 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2964 6977dfe6 Yoshiaki Tamura
{
2965 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
2966 6977dfe6 Yoshiaki Tamura
}
2967 6977dfe6 Yoshiaki Tamura
2968 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2969 1f2e98b6 Alex Williamson
{
2970 1f2e98b6 Alex Williamson
    RAMBlock *block;
2971 1f2e98b6 Alex Williamson
2972 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2974 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2975 7267c094 Anthony Liguori
            g_free(block);
2976 1f2e98b6 Alex Williamson
            return;
2977 1f2e98b6 Alex Williamson
        }
2978 1f2e98b6 Alex Williamson
    }
2979 1f2e98b6 Alex Williamson
}
2980 1f2e98b6 Alex Williamson
2981 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2982 e9a1ab19 bellard
{
2983 04b16653 Alex Williamson
    RAMBlock *block;
2984 04b16653 Alex Williamson
2985 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2986 04b16653 Alex Williamson
        if (addr == block->offset) {
2987 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2988 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2989 cd19cfa2 Huang Ying
                ;
2990 cd19cfa2 Huang Ying
            } else if (mem_path) {
2991 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2992 04b16653 Alex Williamson
                if (block->fd) {
2993 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2994 04b16653 Alex Williamson
                    close(block->fd);
2995 04b16653 Alex Williamson
                } else {
2996 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2997 04b16653 Alex Williamson
                }
2998 fd28aa13 Jan Kiszka
#else
2999 fd28aa13 Jan Kiszka
                abort();
3000 04b16653 Alex Williamson
#endif
3001 04b16653 Alex Williamson
            } else {
3002 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3003 04b16653 Alex Williamson
                munmap(block->host, block->length);
3004 04b16653 Alex Williamson
#else
3005 868bb33f Jan Kiszka
                if (xen_enabled()) {
3006 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
3007 432d268c Jun Nakajima
                } else {
3008 432d268c Jun Nakajima
                    qemu_vfree(block->host);
3009 432d268c Jun Nakajima
                }
3010 04b16653 Alex Williamson
#endif
3011 04b16653 Alex Williamson
            }
3012 7267c094 Anthony Liguori
            g_free(block);
3013 04b16653 Alex Williamson
            return;
3014 04b16653 Alex Williamson
        }
3015 04b16653 Alex Williamson
    }
3016 04b16653 Alex Williamson
3017 e9a1ab19 bellard
}
3018 e9a1ab19 bellard
3019 cd19cfa2 Huang Ying
#ifndef _WIN32
3020 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3021 cd19cfa2 Huang Ying
{
3022 cd19cfa2 Huang Ying
    RAMBlock *block;
3023 cd19cfa2 Huang Ying
    ram_addr_t offset;
3024 cd19cfa2 Huang Ying
    int flags;
3025 cd19cfa2 Huang Ying
    void *area, *vaddr;
3026 cd19cfa2 Huang Ying
3027 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3028 cd19cfa2 Huang Ying
        offset = addr - block->offset;
3029 cd19cfa2 Huang Ying
        if (offset < block->length) {
3030 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
3031 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3032 cd19cfa2 Huang Ying
                ;
3033 cd19cfa2 Huang Ying
            } else {
3034 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
3035 cd19cfa2 Huang Ying
                munmap(vaddr, length);
3036 cd19cfa2 Huang Ying
                if (mem_path) {
3037 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
3038 cd19cfa2 Huang Ying
                    if (block->fd) {
3039 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
3040 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3041 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
3042 cd19cfa2 Huang Ying
#else
3043 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
3044 cd19cfa2 Huang Ying
#endif
3045 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3046 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
3047 cd19cfa2 Huang Ying
                    } else {
3048 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3049 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3050 cd19cfa2 Huang Ying
                                    flags, -1, 0);
3051 cd19cfa2 Huang Ying
                    }
3052 fd28aa13 Jan Kiszka
#else
3053 fd28aa13 Jan Kiszka
                    abort();
3054 cd19cfa2 Huang Ying
#endif
3055 cd19cfa2 Huang Ying
                } else {
3056 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3057 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
3058 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3059 cd19cfa2 Huang Ying
                                flags, -1, 0);
3060 cd19cfa2 Huang Ying
#else
3061 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3062 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 cd19cfa2 Huang Ying
                                flags, -1, 0);
3064 cd19cfa2 Huang Ying
#endif
3065 cd19cfa2 Huang Ying
                }
3066 cd19cfa2 Huang Ying
                if (area != vaddr) {
3067 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
3068 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3069 cd19cfa2 Huang Ying
                            length, addr);
3070 cd19cfa2 Huang Ying
                    exit(1);
3071 cd19cfa2 Huang Ying
                }
3072 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3073 cd19cfa2 Huang Ying
            }
3074 cd19cfa2 Huang Ying
            return;
3075 cd19cfa2 Huang Ying
        }
3076 cd19cfa2 Huang Ying
    }
3077 cd19cfa2 Huang Ying
}
3078 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3079 cd19cfa2 Huang Ying
3080 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3081 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3082 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3083 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3084 5579c7f3 pbrook

3085 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3086 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3087 5579c7f3 pbrook
 */
3088 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3089 dc828ca1 pbrook
{
3090 94a6b54f pbrook
    RAMBlock *block;
3091 94a6b54f pbrook
3092 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3093 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3094 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3095 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3096 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3097 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3098 7d82af38 Vincent Palatin
            }
3099 868bb33f Jan Kiszka
            if (xen_enabled()) {
3100 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3101 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3102 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3103 432d268c Jun Nakajima
                 */
3104 432d268c Jun Nakajima
                if (block->offset == 0) {
3105 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
3106 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3107 e41d7c69 Jan Kiszka
                    block->host =
3108 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
3109 432d268c Jun Nakajima
                }
3110 432d268c Jun Nakajima
            }
3111 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3112 f471a17e Alex Williamson
        }
3113 94a6b54f pbrook
    }
3114 f471a17e Alex Williamson
3115 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3116 f471a17e Alex Williamson
    abort();
3117 f471a17e Alex Williamson
3118 f471a17e Alex Williamson
    return NULL;
3119 dc828ca1 pbrook
}
3120 dc828ca1 pbrook
3121 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3122 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3123 b2e0a138 Michael S. Tsirkin
 */
3124 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3125 b2e0a138 Michael S. Tsirkin
{
3126 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3127 b2e0a138 Michael S. Tsirkin
3128 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3129 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3130 868bb33f Jan Kiszka
            if (xen_enabled()) {
3131 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3132 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3133 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3134 432d268c Jun Nakajima
                 */
3135 432d268c Jun Nakajima
                if (block->offset == 0) {
3136 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
3137 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3138 e41d7c69 Jan Kiszka
                    block->host =
3139 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
3140 432d268c Jun Nakajima
                }
3141 432d268c Jun Nakajima
            }
3142 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3143 b2e0a138 Michael S. Tsirkin
        }
3144 b2e0a138 Michael S. Tsirkin
    }
3145 b2e0a138 Michael S. Tsirkin
3146 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3147 b2e0a138 Michael S. Tsirkin
    abort();
3148 b2e0a138 Michael S. Tsirkin
3149 b2e0a138 Michael S. Tsirkin
    return NULL;
3150 b2e0a138 Michael S. Tsirkin
}
3151 b2e0a138 Michael S. Tsirkin
3152 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3153 38bee5dc Stefano Stabellini
 * but takes a size argument */
3154 8ab934f9 Stefano Stabellini
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3155 38bee5dc Stefano Stabellini
{
3156 8ab934f9 Stefano Stabellini
    if (*size == 0) {
3157 8ab934f9 Stefano Stabellini
        return NULL;
3158 8ab934f9 Stefano Stabellini
    }
3159 868bb33f Jan Kiszka
    if (xen_enabled()) {
3160 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
3161 868bb33f Jan Kiszka
    } else {
3162 38bee5dc Stefano Stabellini
        RAMBlock *block;
3163 38bee5dc Stefano Stabellini
3164 38bee5dc Stefano Stabellini
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3165 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
3166 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
3167 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
3168 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
3169 38bee5dc Stefano Stabellini
            }
3170 38bee5dc Stefano Stabellini
        }
3171 38bee5dc Stefano Stabellini
3172 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3173 38bee5dc Stefano Stabellini
        abort();
3174 38bee5dc Stefano Stabellini
    }
3175 38bee5dc Stefano Stabellini
}
3176 38bee5dc Stefano Stabellini
3177 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
3178 050a0ddf Anthony PERARD
{
3179 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
3180 050a0ddf Anthony PERARD
}
3181 050a0ddf Anthony PERARD
3182 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3183 5579c7f3 pbrook
{
3184 94a6b54f pbrook
    RAMBlock *block;
3185 94a6b54f pbrook
    uint8_t *host = ptr;
3186 94a6b54f pbrook
3187 868bb33f Jan Kiszka
    if (xen_enabled()) {
3188 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
3189 712c2b41 Stefano Stabellini
        return 0;
3190 712c2b41 Stefano Stabellini
    }
3191 712c2b41 Stefano Stabellini
3192 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3193 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
3194 432d268c Jun Nakajima
        if (block->host == NULL) {
3195 432d268c Jun Nakajima
            continue;
3196 432d268c Jun Nakajima
        }
3197 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3198 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3199 e890261f Marcelo Tosatti
            return 0;
3200 f471a17e Alex Williamson
        }
3201 94a6b54f pbrook
    }
3202 432d268c Jun Nakajima
3203 e890261f Marcelo Tosatti
    return -1;
3204 e890261f Marcelo Tosatti
}
3205 f471a17e Alex Williamson
3206 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3207 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3208 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3209 e890261f Marcelo Tosatti
{
3210 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3211 f471a17e Alex Williamson
3212 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3213 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3214 e890261f Marcelo Tosatti
        abort();
3215 e890261f Marcelo Tosatti
    }
3216 e890261f Marcelo Tosatti
    return ram_addr;
3217 5579c7f3 pbrook
}
3218 5579c7f3 pbrook
3219 0e0df1e2 Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3220 0e0df1e2 Avi Kivity
                                    unsigned size)
3221 e18231a3 blueswir1
{
3222 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3223 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3224 e18231a3 blueswir1
#endif
3225 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3226 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3227 e18231a3 blueswir1
#endif
3228 e18231a3 blueswir1
    return 0;
3229 e18231a3 blueswir1
}
3230 e18231a3 blueswir1
3231 0e0df1e2 Avi Kivity
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3232 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
3233 e18231a3 blueswir1
{
3234 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3235 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3236 e18231a3 blueswir1
#endif
3237 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3238 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3239 b4f0a316 blueswir1
#endif
3240 33417e70 bellard
}
3241 33417e70 bellard
3242 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
3243 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
3244 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
3245 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3246 0e0df1e2 Avi Kivity
};
3247 e18231a3 blueswir1
3248 0e0df1e2 Avi Kivity
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3249 0e0df1e2 Avi Kivity
                               unsigned size)
3250 e18231a3 blueswir1
{
3251 0e0df1e2 Avi Kivity
    abort();
3252 e18231a3 blueswir1
}
3253 e18231a3 blueswir1
3254 0e0df1e2 Avi Kivity
static void error_mem_write(void *opaque, target_phys_addr_t addr,
3255 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
3256 e18231a3 blueswir1
{
3257 0e0df1e2 Avi Kivity
    abort();
3258 33417e70 bellard
}
3259 33417e70 bellard
3260 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
3261 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3262 0e0df1e2 Avi Kivity
    .write = error_mem_write,
3263 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3264 33417e70 bellard
};
3265 33417e70 bellard
3266 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
3267 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3268 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
3269 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3270 33417e70 bellard
};
3271 33417e70 bellard
3272 0e0df1e2 Avi Kivity
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3273 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
3274 9fa3e853 bellard
{
3275 3a7d929e bellard
    int dirty_flags;
3276 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3277 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3278 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3279 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
3280 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3281 9fa3e853 bellard
#endif
3282 3a7d929e bellard
    }
3283 0e0df1e2 Avi Kivity
    switch (size) {
3284 0e0df1e2 Avi Kivity
    case 1:
3285 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
3286 0e0df1e2 Avi Kivity
        break;
3287 0e0df1e2 Avi Kivity
    case 2:
3288 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
3289 0e0df1e2 Avi Kivity
        break;
3290 0e0df1e2 Avi Kivity
    case 4:
3291 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
3292 0e0df1e2 Avi Kivity
        break;
3293 0e0df1e2 Avi Kivity
    default:
3294 0e0df1e2 Avi Kivity
        abort();
3295 3a7d929e bellard
    }
3296 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3297 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3298 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3299 f23db169 bellard
       flushed */
3300 f23db169 bellard
    if (dirty_flags == 0xff)
3301 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3302 9fa3e853 bellard
}
3303 9fa3e853 bellard
3304 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
3305 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3306 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
3307 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3308 1ccde1cb bellard
};
3309 1ccde1cb bellard
3310 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3311 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3312 0f459d16 pbrook
{
3313 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
3314 06d55cc1 aliguori
    target_ulong pc, cs_base;
3315 06d55cc1 aliguori
    TranslationBlock *tb;
3316 0f459d16 pbrook
    target_ulong vaddr;
3317 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3318 06d55cc1 aliguori
    int cpu_flags;
3319 0f459d16 pbrook
3320 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3321 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3322 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3323 06d55cc1 aliguori
         * current instruction. */
3324 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3325 06d55cc1 aliguori
        return;
3326 06d55cc1 aliguori
    }
3327 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3328 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3329 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3330 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3331 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3332 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3333 6e140f28 aliguori
                env->watchpoint_hit = wp;
3334 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3335 6e140f28 aliguori
                if (!tb) {
3336 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3337 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3338 6e140f28 aliguori
                }
3339 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3340 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3341 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3342 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3343 488d6577 Max Filippov
                    cpu_loop_exit(env);
3344 6e140f28 aliguori
                } else {
3345 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3346 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3347 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
3348 6e140f28 aliguori
                }
3349 06d55cc1 aliguori
            }
3350 6e140f28 aliguori
        } else {
3351 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3352 0f459d16 pbrook
        }
3353 0f459d16 pbrook
    }
3354 0f459d16 pbrook
}
3355 0f459d16 pbrook
3356 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3357 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3358 6658ffb8 pbrook
   phys routines.  */
3359 1ec9b909 Avi Kivity
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3360 1ec9b909 Avi Kivity
                               unsigned size)
3361 6658ffb8 pbrook
{
3362 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3363 1ec9b909 Avi Kivity
    switch (size) {
3364 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
3365 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
3366 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
3367 1ec9b909 Avi Kivity
    default: abort();
3368 1ec9b909 Avi Kivity
    }
3369 6658ffb8 pbrook
}
3370 6658ffb8 pbrook
3371 1ec9b909 Avi Kivity
static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3372 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
3373 6658ffb8 pbrook
{
3374 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3375 1ec9b909 Avi Kivity
    switch (size) {
3376 67364150 Max Filippov
    case 1:
3377 67364150 Max Filippov
        stb_phys(addr, val);
3378 67364150 Max Filippov
        break;
3379 67364150 Max Filippov
    case 2:
3380 67364150 Max Filippov
        stw_phys(addr, val);
3381 67364150 Max Filippov
        break;
3382 67364150 Max Filippov
    case 4:
3383 67364150 Max Filippov
        stl_phys(addr, val);
3384 67364150 Max Filippov
        break;
3385 1ec9b909 Avi Kivity
    default: abort();
3386 1ec9b909 Avi Kivity
    }
3387 6658ffb8 pbrook
}
3388 6658ffb8 pbrook
3389 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
3390 1ec9b909 Avi Kivity
    .read = watch_mem_read,
3391 1ec9b909 Avi Kivity
    .write = watch_mem_write,
3392 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3393 6658ffb8 pbrook
};
3394 6658ffb8 pbrook
3395 70c68e44 Avi Kivity
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3396 70c68e44 Avi Kivity
                             unsigned len)
3397 db7b5426 blueswir1
{
3398 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3399 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3400 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3401 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3402 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3403 db7b5426 blueswir1
           mmio, len, addr, idx);
3404 db7b5426 blueswir1
#endif
3405 db7b5426 blueswir1
3406 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3407 5312bd8b Avi Kivity
    addr += mmio->base;
3408 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3409 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3410 37ec01d4 Avi Kivity
    return io_mem_read(section->mr, addr, len);
3411 db7b5426 blueswir1
}
3412 db7b5426 blueswir1
3413 70c68e44 Avi Kivity
static void subpage_write(void *opaque, target_phys_addr_t addr,
3414 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
3415 db7b5426 blueswir1
{
3416 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3417 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3418 5312bd8b Avi Kivity
    MemoryRegionSection *section;
3419 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3420 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3421 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
3422 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3423 db7b5426 blueswir1
#endif
3424 f6405247 Richard Henderson
3425 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
3426 5312bd8b Avi Kivity
    addr += mmio->base;
3427 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
3428 5312bd8b Avi Kivity
    addr += section->offset_within_region;
3429 37ec01d4 Avi Kivity
    io_mem_write(section->mr, addr, value, len);
3430 db7b5426 blueswir1
}
3431 db7b5426 blueswir1
3432 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
3433 70c68e44 Avi Kivity
    .read = subpage_read,
3434 70c68e44 Avi Kivity
    .write = subpage_write,
3435 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3436 db7b5426 blueswir1
};
3437 db7b5426 blueswir1
3438 de712f94 Avi Kivity
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3439 de712f94 Avi Kivity
                                 unsigned size)
3440 56384e8b Andreas Färber
{
3441 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3442 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3443 de712f94 Avi Kivity
    switch (size) {
3444 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
3445 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
3446 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
3447 de712f94 Avi Kivity
    default: abort();
3448 de712f94 Avi Kivity
    }
3449 56384e8b Andreas Färber
}
3450 56384e8b Andreas Färber
3451 de712f94 Avi Kivity
static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3452 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
3453 56384e8b Andreas Färber
{
3454 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
3455 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
3456 de712f94 Avi Kivity
    switch (size) {
3457 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
3458 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
3459 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
3460 de712f94 Avi Kivity
    default: abort();
3461 de712f94 Avi Kivity
    }
3462 56384e8b Andreas Färber
}
3463 56384e8b Andreas Färber
3464 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
3465 de712f94 Avi Kivity
    .read = subpage_ram_read,
3466 de712f94 Avi Kivity
    .write = subpage_ram_write,
3467 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3468 56384e8b Andreas Färber
};
3469 56384e8b Andreas Färber
3470 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3471 5312bd8b Avi Kivity
                             uint16_t section)
3472 db7b5426 blueswir1
{
3473 db7b5426 blueswir1
    int idx, eidx;
3474 db7b5426 blueswir1
3475 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3476 db7b5426 blueswir1
        return -1;
3477 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3478 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3479 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3480 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3481 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3482 db7b5426 blueswir1
#endif
3483 5312bd8b Avi Kivity
    if (memory_region_is_ram(phys_sections[section].mr)) {
3484 5312bd8b Avi Kivity
        MemoryRegionSection new_section = phys_sections[section];
3485 5312bd8b Avi Kivity
        new_section.mr = &io_mem_subpage_ram;
3486 5312bd8b Avi Kivity
        section = phys_section_add(&new_section);
3487 56384e8b Andreas Färber
    }
3488 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3489 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
3490 db7b5426 blueswir1
    }
3491 db7b5426 blueswir1
3492 db7b5426 blueswir1
    return 0;
3493 db7b5426 blueswir1
}
3494 db7b5426 blueswir1
3495 0f0cb164 Avi Kivity
static subpage_t *subpage_init(target_phys_addr_t base)
3496 db7b5426 blueswir1
{
3497 c227f099 Anthony Liguori
    subpage_t *mmio;
3498 db7b5426 blueswir1
3499 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
3500 1eec614b aliguori
3501 1eec614b aliguori
    mmio->base = base;
3502 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3503 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
3504 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
3505 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3506 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3507 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3508 db7b5426 blueswir1
#endif
3509 0f0cb164 Avi Kivity
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3510 db7b5426 blueswir1
3511 db7b5426 blueswir1
    return mmio;
3512 db7b5426 blueswir1
}
3513 db7b5426 blueswir1
3514 5312bd8b Avi Kivity
static uint16_t dummy_section(MemoryRegion *mr)
3515 5312bd8b Avi Kivity
{
3516 5312bd8b Avi Kivity
    MemoryRegionSection section = {
3517 5312bd8b Avi Kivity
        .mr = mr,
3518 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
3519 5312bd8b Avi Kivity
        .offset_within_region = 0,
3520 5312bd8b Avi Kivity
        .size = UINT64_MAX,
3521 5312bd8b Avi Kivity
    };
3522 5312bd8b Avi Kivity
3523 5312bd8b Avi Kivity
    return phys_section_add(&section);
3524 5312bd8b Avi Kivity
}
3525 5312bd8b Avi Kivity
3526 37ec01d4 Avi Kivity
MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3527 aa102231 Avi Kivity
{
3528 37ec01d4 Avi Kivity
    return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3529 aa102231 Avi Kivity
}
3530 aa102231 Avi Kivity
3531 e9179ce1 Avi Kivity
static void io_mem_init(void)
3532 e9179ce1 Avi Kivity
{
3533 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3534 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3535 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3536 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
3537 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3538 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
3539 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3540 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
3541 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3542 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
3543 e9179ce1 Avi Kivity
}
3544 e9179ce1 Avi Kivity
3545 50c1e149 Avi Kivity
static void core_begin(MemoryListener *listener)
3546 50c1e149 Avi Kivity
{
3547 54688b1e Avi Kivity
    destroy_all_mappings();
3548 5312bd8b Avi Kivity
    phys_sections_clear();
3549 c19e8800 Avi Kivity
    phys_map.ptr = PHYS_MAP_NODE_NIL;
3550 5312bd8b Avi Kivity
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
3551 aa102231 Avi Kivity
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
3552 aa102231 Avi Kivity
    phys_section_rom = dummy_section(&io_mem_rom);
3553 aa102231 Avi Kivity
    phys_section_watch = dummy_section(&io_mem_watch);
3554 50c1e149 Avi Kivity
}
3555 50c1e149 Avi Kivity
3556 50c1e149 Avi Kivity
static void core_commit(MemoryListener *listener)
3557 50c1e149 Avi Kivity
{
3558 9349b4f9 Andreas Färber
    CPUArchState *env;
3559 117712c3 Avi Kivity
3560 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
3561 117712c3 Avi Kivity
       reset the modified entries */
3562 117712c3 Avi Kivity
    /* XXX: slow ! */
3563 117712c3 Avi Kivity
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
3564 117712c3 Avi Kivity
        tlb_flush(env, 1);
3565 117712c3 Avi Kivity
    }
3566 50c1e149 Avi Kivity
}
3567 50c1e149 Avi Kivity
3568 93632747 Avi Kivity
static void core_region_add(MemoryListener *listener,
3569 93632747 Avi Kivity
                            MemoryRegionSection *section)
3570 93632747 Avi Kivity
{
3571 4855d41a Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3572 93632747 Avi Kivity
}
3573 93632747 Avi Kivity
3574 93632747 Avi Kivity
static void core_region_del(MemoryListener *listener,
3575 93632747 Avi Kivity
                            MemoryRegionSection *section)
3576 93632747 Avi Kivity
{
3577 93632747 Avi Kivity
}
3578 93632747 Avi Kivity
3579 50c1e149 Avi Kivity
static void core_region_nop(MemoryListener *listener,
3580 50c1e149 Avi Kivity
                            MemoryRegionSection *section)
3581 50c1e149 Avi Kivity
{
3582 54688b1e Avi Kivity
    cpu_register_physical_memory_log(section, section->readonly);
3583 50c1e149 Avi Kivity
}
3584 50c1e149 Avi Kivity
3585 93632747 Avi Kivity
static void core_log_start(MemoryListener *listener,
3586 93632747 Avi Kivity
                           MemoryRegionSection *section)
3587 93632747 Avi Kivity
{
3588 93632747 Avi Kivity
}
3589 93632747 Avi Kivity
3590 93632747 Avi Kivity
static void core_log_stop(MemoryListener *listener,
3591 93632747 Avi Kivity
                          MemoryRegionSection *section)
3592 93632747 Avi Kivity
{
3593 93632747 Avi Kivity
}
3594 93632747 Avi Kivity
3595 93632747 Avi Kivity
static void core_log_sync(MemoryListener *listener,
3596 93632747 Avi Kivity
                          MemoryRegionSection *section)
3597 93632747 Avi Kivity
{
3598 93632747 Avi Kivity
}
3599 93632747 Avi Kivity
3600 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
3601 93632747 Avi Kivity
{
3602 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(1);
3603 93632747 Avi Kivity
}
3604 93632747 Avi Kivity
3605 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
3606 93632747 Avi Kivity
{
3607 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(0);
3608 93632747 Avi Kivity
}
3609 93632747 Avi Kivity
3610 93632747 Avi Kivity
static void core_eventfd_add(MemoryListener *listener,
3611 93632747 Avi Kivity
                             MemoryRegionSection *section,
3612 93632747 Avi Kivity
                             bool match_data, uint64_t data, int fd)
3613 93632747 Avi Kivity
{
3614 93632747 Avi Kivity
}
3615 93632747 Avi Kivity
3616 93632747 Avi Kivity
static void core_eventfd_del(MemoryListener *listener,
3617 93632747 Avi Kivity
                             MemoryRegionSection *section,
3618 93632747 Avi Kivity
                             bool match_data, uint64_t data, int fd)
3619 93632747 Avi Kivity
{
3620 93632747 Avi Kivity
}
3621 93632747 Avi Kivity
3622 50c1e149 Avi Kivity
static void io_begin(MemoryListener *listener)
3623 50c1e149 Avi Kivity
{
3624 50c1e149 Avi Kivity
}
3625 50c1e149 Avi Kivity
3626 50c1e149 Avi Kivity
static void io_commit(MemoryListener *listener)
3627 50c1e149 Avi Kivity
{
3628 50c1e149 Avi Kivity
}
3629 50c1e149 Avi Kivity
3630 4855d41a Avi Kivity
static void io_region_add(MemoryListener *listener,
3631 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3632 4855d41a Avi Kivity
{
3633 a2d33521 Avi Kivity
    MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3634 a2d33521 Avi Kivity
3635 a2d33521 Avi Kivity
    mrio->mr = section->mr;
3636 a2d33521 Avi Kivity
    mrio->offset = section->offset_within_region;
3637 a2d33521 Avi Kivity
    iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3638 4855d41a Avi Kivity
                 section->offset_within_address_space, section->size);
3639 a2d33521 Avi Kivity
    ioport_register(&mrio->iorange);
3640 4855d41a Avi Kivity
}
3641 4855d41a Avi Kivity
3642 4855d41a Avi Kivity
static void io_region_del(MemoryListener *listener,
3643 4855d41a Avi Kivity
                          MemoryRegionSection *section)
3644 4855d41a Avi Kivity
{
3645 4855d41a Avi Kivity
    isa_unassign_ioport(section->offset_within_address_space, section->size);
3646 4855d41a Avi Kivity
}
3647 4855d41a Avi Kivity
3648 50c1e149 Avi Kivity
static void io_region_nop(MemoryListener *listener,
3649 50c1e149 Avi Kivity
                          MemoryRegionSection *section)
3650 50c1e149 Avi Kivity
{
3651 50c1e149 Avi Kivity
}
3652 50c1e149 Avi Kivity
3653 4855d41a Avi Kivity
static void io_log_start(MemoryListener *listener,
3654 4855d41a Avi Kivity
                         MemoryRegionSection *section)
3655 4855d41a Avi Kivity
{
3656 4855d41a Avi Kivity
}
3657 4855d41a Avi Kivity
3658 4855d41a Avi Kivity
static void io_log_stop(MemoryListener *listener,
3659 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3660 4855d41a Avi Kivity
{
3661 4855d41a Avi Kivity
}
3662 4855d41a Avi Kivity
3663 4855d41a Avi Kivity
static void io_log_sync(MemoryListener *listener,
3664 4855d41a Avi Kivity
                        MemoryRegionSection *section)
3665 4855d41a Avi Kivity
{
3666 4855d41a Avi Kivity
}
3667 4855d41a Avi Kivity
3668 4855d41a Avi Kivity
static void io_log_global_start(MemoryListener *listener)
3669 4855d41a Avi Kivity
{
3670 4855d41a Avi Kivity
}
3671 4855d41a Avi Kivity
3672 4855d41a Avi Kivity
static void io_log_global_stop(MemoryListener *listener)
3673 4855d41a Avi Kivity
{
3674 4855d41a Avi Kivity
}
3675 4855d41a Avi Kivity
3676 4855d41a Avi Kivity
static void io_eventfd_add(MemoryListener *listener,
3677 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3678 4855d41a Avi Kivity
                           bool match_data, uint64_t data, int fd)
3679 4855d41a Avi Kivity
{
3680 4855d41a Avi Kivity
}
3681 4855d41a Avi Kivity
3682 4855d41a Avi Kivity
static void io_eventfd_del(MemoryListener *listener,
3683 4855d41a Avi Kivity
                           MemoryRegionSection *section,
3684 4855d41a Avi Kivity
                           bool match_data, uint64_t data, int fd)
3685 4855d41a Avi Kivity
{
3686 4855d41a Avi Kivity
}
3687 4855d41a Avi Kivity
3688 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
3689 50c1e149 Avi Kivity
    .begin = core_begin,
3690 50c1e149 Avi Kivity
    .commit = core_commit,
3691 93632747 Avi Kivity
    .region_add = core_region_add,
3692 93632747 Avi Kivity
    .region_del = core_region_del,
3693 50c1e149 Avi Kivity
    .region_nop = core_region_nop,
3694 93632747 Avi Kivity
    .log_start = core_log_start,
3695 93632747 Avi Kivity
    .log_stop = core_log_stop,
3696 93632747 Avi Kivity
    .log_sync = core_log_sync,
3697 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
3698 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
3699 93632747 Avi Kivity
    .eventfd_add = core_eventfd_add,
3700 93632747 Avi Kivity
    .eventfd_del = core_eventfd_del,
3701 93632747 Avi Kivity
    .priority = 0,
3702 93632747 Avi Kivity
};
3703 93632747 Avi Kivity
3704 4855d41a Avi Kivity
static MemoryListener io_memory_listener = {
3705 50c1e149 Avi Kivity
    .begin = io_begin,
3706 50c1e149 Avi Kivity
    .commit = io_commit,
3707 4855d41a Avi Kivity
    .region_add = io_region_add,
3708 4855d41a Avi Kivity
    .region_del = io_region_del,
3709 50c1e149 Avi Kivity
    .region_nop = io_region_nop,
3710 4855d41a Avi Kivity
    .log_start = io_log_start,
3711 4855d41a Avi Kivity
    .log_stop = io_log_stop,
3712 4855d41a Avi Kivity
    .log_sync = io_log_sync,
3713 4855d41a Avi Kivity
    .log_global_start = io_log_global_start,
3714 4855d41a Avi Kivity
    .log_global_stop = io_log_global_stop,
3715 4855d41a Avi Kivity
    .eventfd_add = io_eventfd_add,
3716 4855d41a Avi Kivity
    .eventfd_del = io_eventfd_del,
3717 4855d41a Avi Kivity
    .priority = 0,
3718 4855d41a Avi Kivity
};
3719 4855d41a Avi Kivity
3720 62152b8a Avi Kivity
static void memory_map_init(void)
3721 62152b8a Avi Kivity
{
3722 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
3723 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
3724 62152b8a Avi Kivity
    set_system_memory_map(system_memory);
3725 309cb471 Avi Kivity
3726 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
3727 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
3728 309cb471 Avi Kivity
    set_system_io_map(system_io);
3729 93632747 Avi Kivity
3730 4855d41a Avi Kivity
    memory_listener_register(&core_memory_listener, system_memory);
3731 4855d41a Avi Kivity
    memory_listener_register(&io_memory_listener, system_io);
3732 62152b8a Avi Kivity
}
3733 62152b8a Avi Kivity
3734 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
3735 62152b8a Avi Kivity
{
3736 62152b8a Avi Kivity
    return system_memory;
3737 62152b8a Avi Kivity
}
3738 62152b8a Avi Kivity
3739 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
3740 309cb471 Avi Kivity
{
3741 309cb471 Avi Kivity
    return system_io;
3742 309cb471 Avi Kivity
}
3743 309cb471 Avi Kivity
3744 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3745 e2eef170 pbrook
3746 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3747 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3748 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3749 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3750 13eb76e0 bellard
{
3751 13eb76e0 bellard
    int l, flags;
3752 13eb76e0 bellard
    target_ulong page;
3753 53a5960a pbrook
    void * p;
3754 13eb76e0 bellard
3755 13eb76e0 bellard
    while (len > 0) {
3756 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3757 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3758 13eb76e0 bellard
        if (l > len)
3759 13eb76e0 bellard
            l = len;
3760 13eb76e0 bellard
        flags = page_get_flags(page);
3761 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3762 a68fe89c Paul Brook
            return -1;
3763 13eb76e0 bellard
        if (is_write) {
3764 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3765 a68fe89c Paul Brook
                return -1;
3766 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3767 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3768 a68fe89c Paul Brook
                return -1;
3769 72fb7daa aurel32
            memcpy(p, buf, l);
3770 72fb7daa aurel32
            unlock_user(p, addr, l);
3771 13eb76e0 bellard
        } else {
3772 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3773 a68fe89c Paul Brook
                return -1;
3774 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3775 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3776 a68fe89c Paul Brook
                return -1;
3777 72fb7daa aurel32
            memcpy(buf, p, l);
3778 5b257578 aurel32
            unlock_user(p, addr, 0);
3779 13eb76e0 bellard
        }
3780 13eb76e0 bellard
        len -= l;
3781 13eb76e0 bellard
        buf += l;
3782 13eb76e0 bellard
        addr += l;
3783 13eb76e0 bellard
    }
3784 a68fe89c Paul Brook
    return 0;
3785 13eb76e0 bellard
}
3786 8df1cd07 bellard
3787 13eb76e0 bellard
#else
3788 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3789 13eb76e0 bellard
                            int len, int is_write)
3790 13eb76e0 bellard
{
3791 37ec01d4 Avi Kivity
    int l;
3792 13eb76e0 bellard
    uint8_t *ptr;
3793 13eb76e0 bellard
    uint32_t val;
3794 c227f099 Anthony Liguori
    target_phys_addr_t page;
3795 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3796 3b46e624 ths
3797 13eb76e0 bellard
    while (len > 0) {
3798 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3799 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3800 13eb76e0 bellard
        if (l > len)
3801 13eb76e0 bellard
            l = len;
3802 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3803 3b46e624 ths
3804 13eb76e0 bellard
        if (is_write) {
3805 f3705d53 Avi Kivity
            if (!memory_region_is_ram(section->mr)) {
3806 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3807 f3705d53 Avi Kivity
                addr1 = section_addr(section, addr);
3808 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3809 6a00d601 bellard
                   potential bugs */
3810 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3811 1c213d19 bellard
                    /* 32 bit write access */
3812 c27004ec bellard
                    val = ldl_p(buf);
3813 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 4);
3814 13eb76e0 bellard
                    l = 4;
3815 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3816 1c213d19 bellard
                    /* 16 bit write access */
3817 c27004ec bellard
                    val = lduw_p(buf);
3818 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 2);
3819 13eb76e0 bellard
                    l = 2;
3820 13eb76e0 bellard
                } else {
3821 1c213d19 bellard
                    /* 8 bit write access */
3822 c27004ec bellard
                    val = ldub_p(buf);
3823 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 1);
3824 13eb76e0 bellard
                    l = 1;
3825 13eb76e0 bellard
                }
3826 f3705d53 Avi Kivity
            } else if (!section->readonly) {
3827 8ca5692d Anthony PERARD
                ram_addr_t addr1;
3828 f3705d53 Avi Kivity
                addr1 = memory_region_get_ram_addr(section->mr)
3829 f3705d53 Avi Kivity
                    + section_addr(section, addr);
3830 13eb76e0 bellard
                /* RAM case */
3831 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3832 13eb76e0 bellard
                memcpy(ptr, buf, l);
3833 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3834 3a7d929e bellard
                    /* invalidate code */
3835 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3836 3a7d929e bellard
                    /* set dirty bit */
3837 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3838 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3839 3a7d929e bellard
                }
3840 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3841 13eb76e0 bellard
            }
3842 13eb76e0 bellard
        } else {
3843 f3705d53 Avi Kivity
            if (!is_ram_rom_romd(section)) {
3844 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3845 13eb76e0 bellard
                /* I/O case */
3846 f3705d53 Avi Kivity
                addr1 = section_addr(section, addr);
3847 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3848 13eb76e0 bellard
                    /* 32 bit read access */
3849 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 4);
3850 c27004ec bellard
                    stl_p(buf, val);
3851 13eb76e0 bellard
                    l = 4;
3852 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3853 13eb76e0 bellard
                    /* 16 bit read access */
3854 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 2);
3855 c27004ec bellard
                    stw_p(buf, val);
3856 13eb76e0 bellard
                    l = 2;
3857 13eb76e0 bellard
                } else {
3858 1c213d19 bellard
                    /* 8 bit read access */
3859 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 1);
3860 c27004ec bellard
                    stb_p(buf, val);
3861 13eb76e0 bellard
                    l = 1;
3862 13eb76e0 bellard
                }
3863 13eb76e0 bellard
            } else {
3864 13eb76e0 bellard
                /* RAM case */
3865 0a1b357f Anthony PERARD
                ptr = qemu_get_ram_ptr(section->mr->ram_addr
3866 0a1b357f Anthony PERARD
                                       + section_addr(section, addr));
3867 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
3868 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3869 13eb76e0 bellard
            }
3870 13eb76e0 bellard
        }
3871 13eb76e0 bellard
        len -= l;
3872 13eb76e0 bellard
        buf += l;
3873 13eb76e0 bellard
        addr += l;
3874 13eb76e0 bellard
    }
3875 13eb76e0 bellard
}
3876 8df1cd07 bellard
3877 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3878 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3879 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3880 d0ecd2aa bellard
{
3881 d0ecd2aa bellard
    int l;
3882 d0ecd2aa bellard
    uint8_t *ptr;
3883 c227f099 Anthony Liguori
    target_phys_addr_t page;
3884 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3885 3b46e624 ths
3886 d0ecd2aa bellard
    while (len > 0) {
3887 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3888 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3889 d0ecd2aa bellard
        if (l > len)
3890 d0ecd2aa bellard
            l = len;
3891 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3892 3b46e624 ths
3893 f3705d53 Avi Kivity
        if (!is_ram_rom_romd(section)) {
3894 d0ecd2aa bellard
            /* do nothing */
3895 d0ecd2aa bellard
        } else {
3896 d0ecd2aa bellard
            unsigned long addr1;
3897 f3705d53 Avi Kivity
            addr1 = memory_region_get_ram_addr(section->mr)
3898 f3705d53 Avi Kivity
                + section_addr(section, addr);
3899 d0ecd2aa bellard
            /* ROM/RAM case */
3900 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3901 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3902 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3903 d0ecd2aa bellard
        }
3904 d0ecd2aa bellard
        len -= l;
3905 d0ecd2aa bellard
        buf += l;
3906 d0ecd2aa bellard
        addr += l;
3907 d0ecd2aa bellard
    }
3908 d0ecd2aa bellard
}
3909 d0ecd2aa bellard
3910 6d16c2f8 aliguori
typedef struct {
3911 6d16c2f8 aliguori
    void *buffer;
3912 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3913 c227f099 Anthony Liguori
    target_phys_addr_t len;
3914 6d16c2f8 aliguori
} BounceBuffer;
3915 6d16c2f8 aliguori
3916 6d16c2f8 aliguori
static BounceBuffer bounce;
3917 6d16c2f8 aliguori
3918 ba223c29 aliguori
typedef struct MapClient {
3919 ba223c29 aliguori
    void *opaque;
3920 ba223c29 aliguori
    void (*callback)(void *opaque);
3921 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3922 ba223c29 aliguori
} MapClient;
3923 ba223c29 aliguori
3924 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3925 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3926 ba223c29 aliguori
3927 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3928 ba223c29 aliguori
{
3929 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
3930 ba223c29 aliguori
3931 ba223c29 aliguori
    client->opaque = opaque;
3932 ba223c29 aliguori
    client->callback = callback;
3933 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3934 ba223c29 aliguori
    return client;
3935 ba223c29 aliguori
}
3936 ba223c29 aliguori
3937 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3938 ba223c29 aliguori
{
3939 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3940 ba223c29 aliguori
3941 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3942 7267c094 Anthony Liguori
    g_free(client);
3943 ba223c29 aliguori
}
3944 ba223c29 aliguori
3945 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3946 ba223c29 aliguori
{
3947 ba223c29 aliguori
    MapClient *client;
3948 ba223c29 aliguori
3949 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3950 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3951 ba223c29 aliguori
        client->callback(client->opaque);
3952 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3953 ba223c29 aliguori
    }
3954 ba223c29 aliguori
}
3955 ba223c29 aliguori
3956 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3957 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3958 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3959 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3960 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3961 ba223c29 aliguori
 * likely to succeed.
3962 6d16c2f8 aliguori
 */
3963 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3964 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3965 6d16c2f8 aliguori
                              int is_write)
3966 6d16c2f8 aliguori
{
3967 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3968 38bee5dc Stefano Stabellini
    target_phys_addr_t todo = 0;
3969 6d16c2f8 aliguori
    int l;
3970 c227f099 Anthony Liguori
    target_phys_addr_t page;
3971 f3705d53 Avi Kivity
    MemoryRegionSection *section;
3972 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
3973 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
3974 8ab934f9 Stefano Stabellini
    void *ret;
3975 6d16c2f8 aliguori
3976 6d16c2f8 aliguori
    while (len > 0) {
3977 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3978 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3979 6d16c2f8 aliguori
        if (l > len)
3980 6d16c2f8 aliguori
            l = len;
3981 06ef3525 Avi Kivity
        section = phys_page_find(page >> TARGET_PAGE_BITS);
3982 6d16c2f8 aliguori
3983 f3705d53 Avi Kivity
        if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3984 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
3985 6d16c2f8 aliguori
                break;
3986 6d16c2f8 aliguori
            }
3987 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3988 6d16c2f8 aliguori
            bounce.addr = addr;
3989 6d16c2f8 aliguori
            bounce.len = l;
3990 6d16c2f8 aliguori
            if (!is_write) {
3991 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
3992 6d16c2f8 aliguori
            }
3993 38bee5dc Stefano Stabellini
3994 38bee5dc Stefano Stabellini
            *plen = l;
3995 38bee5dc Stefano Stabellini
            return bounce.buffer;
3996 6d16c2f8 aliguori
        }
3997 8ab934f9 Stefano Stabellini
        if (!todo) {
3998 f3705d53 Avi Kivity
            raddr = memory_region_get_ram_addr(section->mr)
3999 f3705d53 Avi Kivity
                + section_addr(section, addr);
4000 8ab934f9 Stefano Stabellini
        }
4001 6d16c2f8 aliguori
4002 6d16c2f8 aliguori
        len -= l;
4003 6d16c2f8 aliguori
        addr += l;
4004 38bee5dc Stefano Stabellini
        todo += l;
4005 6d16c2f8 aliguori
    }
4006 8ab934f9 Stefano Stabellini
    rlen = todo;
4007 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
4008 8ab934f9 Stefano Stabellini
    *plen = rlen;
4009 8ab934f9 Stefano Stabellini
    return ret;
4010 6d16c2f8 aliguori
}
4011 6d16c2f8 aliguori
4012 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4013 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
4014 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
4015 6d16c2f8 aliguori
 */
4016 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4017 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
4018 6d16c2f8 aliguori
{
4019 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
4020 6d16c2f8 aliguori
        if (is_write) {
4021 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4022 6d16c2f8 aliguori
            while (access_len) {
4023 6d16c2f8 aliguori
                unsigned l;
4024 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
4025 6d16c2f8 aliguori
                if (l > access_len)
4026 6d16c2f8 aliguori
                    l = access_len;
4027 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
4028 6d16c2f8 aliguori
                    /* invalidate code */
4029 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4030 6d16c2f8 aliguori
                    /* set dirty bit */
4031 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
4032 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
4033 6d16c2f8 aliguori
                }
4034 6d16c2f8 aliguori
                addr1 += l;
4035 6d16c2f8 aliguori
                access_len -= l;
4036 6d16c2f8 aliguori
            }
4037 6d16c2f8 aliguori
        }
4038 868bb33f Jan Kiszka
        if (xen_enabled()) {
4039 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
4040 050a0ddf Anthony PERARD
        }
4041 6d16c2f8 aliguori
        return;
4042 6d16c2f8 aliguori
    }
4043 6d16c2f8 aliguori
    if (is_write) {
4044 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4045 6d16c2f8 aliguori
    }
4046 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
4047 6d16c2f8 aliguori
    bounce.buffer = NULL;
4048 ba223c29 aliguori
    cpu_notify_map_clients();
4049 6d16c2f8 aliguori
}
4050 d0ecd2aa bellard
4051 8df1cd07 bellard
/* warning: addr must be aligned */
4052 1e78bcc1 Alexander Graf
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4053 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
4054 8df1cd07 bellard
{
4055 8df1cd07 bellard
    uint8_t *ptr;
4056 8df1cd07 bellard
    uint32_t val;
4057 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4058 8df1cd07 bellard
4059 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4060 3b46e624 ths
4061 f3705d53 Avi Kivity
    if (!is_ram_rom_romd(section)) {
4062 8df1cd07 bellard
        /* I/O case */
4063 f3705d53 Avi Kivity
        addr = section_addr(section, addr);
4064 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
4065 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4066 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4067 1e78bcc1 Alexander Graf
            val = bswap32(val);
4068 1e78bcc1 Alexander Graf
        }
4069 1e78bcc1 Alexander Graf
#else
4070 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4071 1e78bcc1 Alexander Graf
            val = bswap32(val);
4072 1e78bcc1 Alexander Graf
        }
4073 1e78bcc1 Alexander Graf
#endif
4074 8df1cd07 bellard
    } else {
4075 8df1cd07 bellard
        /* RAM case */
4076 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4077 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
4078 f3705d53 Avi Kivity
                               + section_addr(section, addr));
4079 1e78bcc1 Alexander Graf
        switch (endian) {
4080 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4081 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
4082 1e78bcc1 Alexander Graf
            break;
4083 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4084 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
4085 1e78bcc1 Alexander Graf
            break;
4086 1e78bcc1 Alexander Graf
        default:
4087 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
4088 1e78bcc1 Alexander Graf
            break;
4089 1e78bcc1 Alexander Graf
        }
4090 8df1cd07 bellard
    }
4091 8df1cd07 bellard
    return val;
4092 8df1cd07 bellard
}
4093 8df1cd07 bellard
4094 1e78bcc1 Alexander Graf
uint32_t ldl_phys(target_phys_addr_t addr)
4095 1e78bcc1 Alexander Graf
{
4096 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4097 1e78bcc1 Alexander Graf
}
4098 1e78bcc1 Alexander Graf
4099 1e78bcc1 Alexander Graf
uint32_t ldl_le_phys(target_phys_addr_t addr)
4100 1e78bcc1 Alexander Graf
{
4101 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4102 1e78bcc1 Alexander Graf
}
4103 1e78bcc1 Alexander Graf
4104 1e78bcc1 Alexander Graf
uint32_t ldl_be_phys(target_phys_addr_t addr)
4105 1e78bcc1 Alexander Graf
{
4106 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4107 1e78bcc1 Alexander Graf
}
4108 1e78bcc1 Alexander Graf
4109 84b7b8e7 bellard
/* warning: addr must be aligned */
4110 1e78bcc1 Alexander Graf
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4111 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
4112 84b7b8e7 bellard
{
4113 84b7b8e7 bellard
    uint8_t *ptr;
4114 84b7b8e7 bellard
    uint64_t val;
4115 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4116 84b7b8e7 bellard
4117 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4118 3b46e624 ths
4119 f3705d53 Avi Kivity
    if (!is_ram_rom_romd(section)) {
4120 84b7b8e7 bellard
        /* I/O case */
4121 f3705d53 Avi Kivity
        addr = section_addr(section, addr);
4122 1e78bcc1 Alexander Graf
4123 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
4124 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
4125 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4126 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4) << 32;
4127 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4);
4128 84b7b8e7 bellard
#else
4129 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
4130 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4) << 32;
4131 84b7b8e7 bellard
#endif
4132 84b7b8e7 bellard
    } else {
4133 84b7b8e7 bellard
        /* RAM case */
4134 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4135 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
4136 f3705d53 Avi Kivity
                               + section_addr(section, addr));
4137 1e78bcc1 Alexander Graf
        switch (endian) {
4138 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4139 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
4140 1e78bcc1 Alexander Graf
            break;
4141 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4142 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
4143 1e78bcc1 Alexander Graf
            break;
4144 1e78bcc1 Alexander Graf
        default:
4145 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
4146 1e78bcc1 Alexander Graf
            break;
4147 1e78bcc1 Alexander Graf
        }
4148 84b7b8e7 bellard
    }
4149 84b7b8e7 bellard
    return val;
4150 84b7b8e7 bellard
}
4151 84b7b8e7 bellard
4152 1e78bcc1 Alexander Graf
uint64_t ldq_phys(target_phys_addr_t addr)
4153 1e78bcc1 Alexander Graf
{
4154 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4155 1e78bcc1 Alexander Graf
}
4156 1e78bcc1 Alexander Graf
4157 1e78bcc1 Alexander Graf
uint64_t ldq_le_phys(target_phys_addr_t addr)
4158 1e78bcc1 Alexander Graf
{
4159 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4160 1e78bcc1 Alexander Graf
}
4161 1e78bcc1 Alexander Graf
4162 1e78bcc1 Alexander Graf
uint64_t ldq_be_phys(target_phys_addr_t addr)
4163 1e78bcc1 Alexander Graf
{
4164 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4165 1e78bcc1 Alexander Graf
}
4166 1e78bcc1 Alexander Graf
4167 aab33094 bellard
/* XXX: optimize */
4168 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4169 aab33094 bellard
{
4170 aab33094 bellard
    uint8_t val;
4171 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4172 aab33094 bellard
    return val;
4173 aab33094 bellard
}
4174 aab33094 bellard
4175 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4176 1e78bcc1 Alexander Graf
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4177 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
4178 aab33094 bellard
{
4179 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4180 733f0b02 Michael S. Tsirkin
    uint64_t val;
4181 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4182 733f0b02 Michael S. Tsirkin
4183 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4184 733f0b02 Michael S. Tsirkin
4185 f3705d53 Avi Kivity
    if (!is_ram_rom_romd(section)) {
4186 733f0b02 Michael S. Tsirkin
        /* I/O case */
4187 f3705d53 Avi Kivity
        addr = section_addr(section, addr);
4188 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 2);
4189 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4190 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4191 1e78bcc1 Alexander Graf
            val = bswap16(val);
4192 1e78bcc1 Alexander Graf
        }
4193 1e78bcc1 Alexander Graf
#else
4194 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4195 1e78bcc1 Alexander Graf
            val = bswap16(val);
4196 1e78bcc1 Alexander Graf
        }
4197 1e78bcc1 Alexander Graf
#endif
4198 733f0b02 Michael S. Tsirkin
    } else {
4199 733f0b02 Michael S. Tsirkin
        /* RAM case */
4200 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4201 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
4202 f3705d53 Avi Kivity
                               + section_addr(section, addr));
4203 1e78bcc1 Alexander Graf
        switch (endian) {
4204 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4205 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
4206 1e78bcc1 Alexander Graf
            break;
4207 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4208 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
4209 1e78bcc1 Alexander Graf
            break;
4210 1e78bcc1 Alexander Graf
        default:
4211 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
4212 1e78bcc1 Alexander Graf
            break;
4213 1e78bcc1 Alexander Graf
        }
4214 733f0b02 Michael S. Tsirkin
    }
4215 733f0b02 Michael S. Tsirkin
    return val;
4216 aab33094 bellard
}
4217 aab33094 bellard
4218 1e78bcc1 Alexander Graf
uint32_t lduw_phys(target_phys_addr_t addr)
4219 1e78bcc1 Alexander Graf
{
4220 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4221 1e78bcc1 Alexander Graf
}
4222 1e78bcc1 Alexander Graf
4223 1e78bcc1 Alexander Graf
uint32_t lduw_le_phys(target_phys_addr_t addr)
4224 1e78bcc1 Alexander Graf
{
4225 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4226 1e78bcc1 Alexander Graf
}
4227 1e78bcc1 Alexander Graf
4228 1e78bcc1 Alexander Graf
uint32_t lduw_be_phys(target_phys_addr_t addr)
4229 1e78bcc1 Alexander Graf
{
4230 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4231 1e78bcc1 Alexander Graf
}
4232 1e78bcc1 Alexander Graf
4233 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4234 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4235 8df1cd07 bellard
   bits are used to track modified PTEs */
4236 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4237 8df1cd07 bellard
{
4238 8df1cd07 bellard
    uint8_t *ptr;
4239 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4240 8df1cd07 bellard
4241 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4242 3b46e624 ths
4243 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4244 37ec01d4 Avi Kivity
        addr = section_addr(section, addr);
4245 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4246 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4247 06ef3525 Avi Kivity
        }
4248 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
4249 8df1cd07 bellard
    } else {
4250 f3705d53 Avi Kivity
        unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
4251 06ef3525 Avi Kivity
                               & TARGET_PAGE_MASK)
4252 f3705d53 Avi Kivity
            + section_addr(section, addr);
4253 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4254 8df1cd07 bellard
        stl_p(ptr, val);
4255 74576198 aliguori
4256 74576198 aliguori
        if (unlikely(in_migration)) {
4257 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4258 74576198 aliguori
                /* invalidate code */
4259 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4260 74576198 aliguori
                /* set dirty bit */
4261 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4262 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4263 74576198 aliguori
            }
4264 74576198 aliguori
        }
4265 8df1cd07 bellard
    }
4266 8df1cd07 bellard
}
4267 8df1cd07 bellard
4268 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4269 bc98a7ef j_mayer
{
4270 bc98a7ef j_mayer
    uint8_t *ptr;
4271 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4272 bc98a7ef j_mayer
4273 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4274 3b46e624 ths
4275 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4276 37ec01d4 Avi Kivity
        addr = section_addr(section, addr);
4277 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4278 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4279 06ef3525 Avi Kivity
        }
4280 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4281 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val >> 32, 4);
4282 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
4283 bc98a7ef j_mayer
#else
4284 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, (uint32_t)val, 4);
4285 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, val >> 32, 4);
4286 bc98a7ef j_mayer
#endif
4287 bc98a7ef j_mayer
    } else {
4288 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4289 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
4290 f3705d53 Avi Kivity
                               + section_addr(section, addr));
4291 bc98a7ef j_mayer
        stq_p(ptr, val);
4292 bc98a7ef j_mayer
    }
4293 bc98a7ef j_mayer
}
4294 bc98a7ef j_mayer
4295 8df1cd07 bellard
/* warning: addr must be aligned */
4296 1e78bcc1 Alexander Graf
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4297 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4298 8df1cd07 bellard
{
4299 8df1cd07 bellard
    uint8_t *ptr;
4300 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4301 8df1cd07 bellard
4302 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4303 3b46e624 ths
4304 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4305 37ec01d4 Avi Kivity
        addr = section_addr(section, addr);
4306 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4307 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4308 06ef3525 Avi Kivity
        }
4309 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4310 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4311 1e78bcc1 Alexander Graf
            val = bswap32(val);
4312 1e78bcc1 Alexander Graf
        }
4313 1e78bcc1 Alexander Graf
#else
4314 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4315 1e78bcc1 Alexander Graf
            val = bswap32(val);
4316 1e78bcc1 Alexander Graf
        }
4317 1e78bcc1 Alexander Graf
#endif
4318 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
4319 8df1cd07 bellard
    } else {
4320 8df1cd07 bellard
        unsigned long addr1;
4321 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4322 f3705d53 Avi Kivity
            + section_addr(section, addr);
4323 8df1cd07 bellard
        /* RAM case */
4324 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4325 1e78bcc1 Alexander Graf
        switch (endian) {
4326 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4327 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
4328 1e78bcc1 Alexander Graf
            break;
4329 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4330 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
4331 1e78bcc1 Alexander Graf
            break;
4332 1e78bcc1 Alexander Graf
        default:
4333 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
4334 1e78bcc1 Alexander Graf
            break;
4335 1e78bcc1 Alexander Graf
        }
4336 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4337 3a7d929e bellard
            /* invalidate code */
4338 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4339 3a7d929e bellard
            /* set dirty bit */
4340 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4341 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4342 3a7d929e bellard
        }
4343 8df1cd07 bellard
    }
4344 8df1cd07 bellard
}
4345 8df1cd07 bellard
4346 1e78bcc1 Alexander Graf
void stl_phys(target_phys_addr_t addr, uint32_t val)
4347 1e78bcc1 Alexander Graf
{
4348 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4349 1e78bcc1 Alexander Graf
}
4350 1e78bcc1 Alexander Graf
4351 1e78bcc1 Alexander Graf
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4352 1e78bcc1 Alexander Graf
{
4353 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4354 1e78bcc1 Alexander Graf
}
4355 1e78bcc1 Alexander Graf
4356 1e78bcc1 Alexander Graf
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4357 1e78bcc1 Alexander Graf
{
4358 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4359 1e78bcc1 Alexander Graf
}
4360 1e78bcc1 Alexander Graf
4361 aab33094 bellard
/* XXX: optimize */
4362 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4363 aab33094 bellard
{
4364 aab33094 bellard
    uint8_t v = val;
4365 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4366 aab33094 bellard
}
4367 aab33094 bellard
4368 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4369 1e78bcc1 Alexander Graf
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4370 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4371 aab33094 bellard
{
4372 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4373 f3705d53 Avi Kivity
    MemoryRegionSection *section;
4374 733f0b02 Michael S. Tsirkin
4375 06ef3525 Avi Kivity
    section = phys_page_find(addr >> TARGET_PAGE_BITS);
4376 733f0b02 Michael S. Tsirkin
4377 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
4378 37ec01d4 Avi Kivity
        addr = section_addr(section, addr);
4379 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
4380 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
4381 06ef3525 Avi Kivity
        }
4382 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4383 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4384 1e78bcc1 Alexander Graf
            val = bswap16(val);
4385 1e78bcc1 Alexander Graf
        }
4386 1e78bcc1 Alexander Graf
#else
4387 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4388 1e78bcc1 Alexander Graf
            val = bswap16(val);
4389 1e78bcc1 Alexander Graf
        }
4390 1e78bcc1 Alexander Graf
#endif
4391 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 2);
4392 733f0b02 Michael S. Tsirkin
    } else {
4393 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4394 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4395 f3705d53 Avi Kivity
            + section_addr(section, addr);
4396 733f0b02 Michael S. Tsirkin
        /* RAM case */
4397 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4398 1e78bcc1 Alexander Graf
        switch (endian) {
4399 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4400 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
4401 1e78bcc1 Alexander Graf
            break;
4402 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4403 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
4404 1e78bcc1 Alexander Graf
            break;
4405 1e78bcc1 Alexander Graf
        default:
4406 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
4407 1e78bcc1 Alexander Graf
            break;
4408 1e78bcc1 Alexander Graf
        }
4409 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4410 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4411 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4412 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4413 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4414 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4415 733f0b02 Michael S. Tsirkin
        }
4416 733f0b02 Michael S. Tsirkin
    }
4417 aab33094 bellard
}
4418 aab33094 bellard
4419 1e78bcc1 Alexander Graf
void stw_phys(target_phys_addr_t addr, uint32_t val)
4420 1e78bcc1 Alexander Graf
{
4421 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4422 1e78bcc1 Alexander Graf
}
4423 1e78bcc1 Alexander Graf
4424 1e78bcc1 Alexander Graf
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4425 1e78bcc1 Alexander Graf
{
4426 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4427 1e78bcc1 Alexander Graf
}
4428 1e78bcc1 Alexander Graf
4429 1e78bcc1 Alexander Graf
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4430 1e78bcc1 Alexander Graf
{
4431 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4432 1e78bcc1 Alexander Graf
}
4433 1e78bcc1 Alexander Graf
4434 aab33094 bellard
/* XXX: optimize */
4435 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4436 aab33094 bellard
{
4437 aab33094 bellard
    val = tswap64(val);
4438 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4439 aab33094 bellard
}
4440 aab33094 bellard
4441 1e78bcc1 Alexander Graf
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4442 1e78bcc1 Alexander Graf
{
4443 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
4444 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4445 1e78bcc1 Alexander Graf
}
4446 1e78bcc1 Alexander Graf
4447 1e78bcc1 Alexander Graf
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4448 1e78bcc1 Alexander Graf
{
4449 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
4450 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4451 1e78bcc1 Alexander Graf
}
4452 1e78bcc1 Alexander Graf
4453 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4454 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4455 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4456 13eb76e0 bellard
{
4457 13eb76e0 bellard
    int l;
4458 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4459 9b3c35e0 j_mayer
    target_ulong page;
4460 13eb76e0 bellard
4461 13eb76e0 bellard
    while (len > 0) {
4462 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4463 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4464 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4465 13eb76e0 bellard
        if (phys_addr == -1)
4466 13eb76e0 bellard
            return -1;
4467 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4468 13eb76e0 bellard
        if (l > len)
4469 13eb76e0 bellard
            l = len;
4470 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4471 5e2972fd aliguori
        if (is_write)
4472 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4473 5e2972fd aliguori
        else
4474 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4475 13eb76e0 bellard
        len -= l;
4476 13eb76e0 bellard
        buf += l;
4477 13eb76e0 bellard
        addr += l;
4478 13eb76e0 bellard
    }
4479 13eb76e0 bellard
    return 0;
4480 13eb76e0 bellard
}
4481 a68fe89c Paul Brook
#endif
4482 13eb76e0 bellard
4483 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4484 2e70f6ef pbrook
   must be at the end of the TB */
4485 20503968 Blue Swirl
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4486 2e70f6ef pbrook
{
4487 2e70f6ef pbrook
    TranslationBlock *tb;
4488 2e70f6ef pbrook
    uint32_t n, cflags;
4489 2e70f6ef pbrook
    target_ulong pc, cs_base;
4490 2e70f6ef pbrook
    uint64_t flags;
4491 2e70f6ef pbrook
4492 20503968 Blue Swirl
    tb = tb_find_pc(retaddr);
4493 2e70f6ef pbrook
    if (!tb) {
4494 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4495 20503968 Blue Swirl
                  (void *)retaddr);
4496 2e70f6ef pbrook
    }
4497 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4498 20503968 Blue Swirl
    cpu_restore_state(tb, env, retaddr);
4499 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4500 bf20dc07 ths
       occurred.  */
4501 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4502 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4503 2e70f6ef pbrook
    n++;
4504 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4505 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4506 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4507 2e70f6ef pbrook
       branch.  */
4508 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4509 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4510 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4511 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4512 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4513 2e70f6ef pbrook
    }
4514 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4515 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4516 2e70f6ef pbrook
            && n > 1) {
4517 2e70f6ef pbrook
        env->pc -= 2;
4518 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4519 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4520 2e70f6ef pbrook
    }
4521 2e70f6ef pbrook
#endif
4522 2e70f6ef pbrook
    /* This should never happen.  */
4523 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4524 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4525 2e70f6ef pbrook
4526 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4527 2e70f6ef pbrook
    pc = tb->pc;
4528 2e70f6ef pbrook
    cs_base = tb->cs_base;
4529 2e70f6ef pbrook
    flags = tb->flags;
4530 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4531 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4532 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4533 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4534 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4535 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4536 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4537 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4538 2e70f6ef pbrook
       second new TB.  */
4539 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4540 2e70f6ef pbrook
}
4541 2e70f6ef pbrook
4542 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4543 b3755a91 Paul Brook
4544 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4545 e3db7226 bellard
{
4546 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4547 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4548 e3db7226 bellard
    TranslationBlock *tb;
4549 3b46e624 ths
4550 e3db7226 bellard
    target_code_size = 0;
4551 e3db7226 bellard
    max_target_code_size = 0;
4552 e3db7226 bellard
    cross_page = 0;
4553 e3db7226 bellard
    direct_jmp_count = 0;
4554 e3db7226 bellard
    direct_jmp2_count = 0;
4555 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4556 e3db7226 bellard
        tb = &tbs[i];
4557 e3db7226 bellard
        target_code_size += tb->size;
4558 e3db7226 bellard
        if (tb->size > max_target_code_size)
4559 e3db7226 bellard
            max_target_code_size = tb->size;
4560 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4561 e3db7226 bellard
            cross_page++;
4562 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4563 e3db7226 bellard
            direct_jmp_count++;
4564 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4565 e3db7226 bellard
                direct_jmp2_count++;
4566 e3db7226 bellard
            }
4567 e3db7226 bellard
        }
4568 e3db7226 bellard
    }
4569 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4570 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4571 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4572 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4573 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4574 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4575 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4576 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4577 e3db7226 bellard
                max_target_code_size);
4578 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4579 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4580 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4581 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4582 5fafdf24 ths
            cross_page,
4583 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4584 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4585 5fafdf24 ths
                direct_jmp_count,
4586 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4587 e3db7226 bellard
                direct_jmp2_count,
4588 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4589 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4590 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4591 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4592 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4593 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4594 e3db7226 bellard
}
4595 e3db7226 bellard
4596 d39e8222 Avi Kivity
/* NOTE: this function can trigger an exception */
4597 d39e8222 Avi Kivity
/* NOTE2: the returned address is not exactly the physical address: it
4598 d39e8222 Avi Kivity
   is the offset relative to phys_ram_base */
4599 9349b4f9 Andreas Färber
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4600 d39e8222 Avi Kivity
{
4601 d39e8222 Avi Kivity
    int mmu_idx, page_index, pd;
4602 d39e8222 Avi Kivity
    void *p;
4603 37ec01d4 Avi Kivity
    MemoryRegion *mr;
4604 d39e8222 Avi Kivity
4605 d39e8222 Avi Kivity
    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4606 d39e8222 Avi Kivity
    mmu_idx = cpu_mmu_index(env1);
4607 d39e8222 Avi Kivity
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4608 d39e8222 Avi Kivity
                 (addr & TARGET_PAGE_MASK))) {
4609 e141ab52 Blue Swirl
#ifdef CONFIG_TCG_PASS_AREG0
4610 e141ab52 Blue Swirl
        cpu_ldub_code(env1, addr);
4611 e141ab52 Blue Swirl
#else
4612 d39e8222 Avi Kivity
        ldub_code(addr);
4613 e141ab52 Blue Swirl
#endif
4614 d39e8222 Avi Kivity
    }
4615 ce5d64c2 Avi Kivity
    pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
4616 37ec01d4 Avi Kivity
    mr = iotlb_to_region(pd);
4617 37ec01d4 Avi Kivity
    if (mr != &io_mem_ram && mr != &io_mem_rom
4618 32b08980 Avi Kivity
        && mr != &io_mem_notdirty && !mr->rom_device
4619 32b08980 Avi Kivity
        && mr != &io_mem_watch) {
4620 d39e8222 Avi Kivity
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4621 d39e8222 Avi Kivity
        cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4622 d39e8222 Avi Kivity
#else
4623 d39e8222 Avi Kivity
        cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4624 d39e8222 Avi Kivity
#endif
4625 d39e8222 Avi Kivity
    }
4626 d39e8222 Avi Kivity
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4627 d39e8222 Avi Kivity
    return qemu_ram_addr_from_host_nofail(p);
4628 d39e8222 Avi Kivity
}
4629 d39e8222 Avi Kivity
4630 82afa586 Benjamin Herrenschmidt
/*
4631 82afa586 Benjamin Herrenschmidt
 * A helper function for the _utterly broken_ virtio device model to find out if
4632 82afa586 Benjamin Herrenschmidt
 * it's running on a big endian machine. Don't do this at home kids!
4633 82afa586 Benjamin Herrenschmidt
 */
4634 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void);
4635 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void)
4636 82afa586 Benjamin Herrenschmidt
{
4637 82afa586 Benjamin Herrenschmidt
#if defined(TARGET_WORDS_BIGENDIAN)
4638 82afa586 Benjamin Herrenschmidt
    return true;
4639 82afa586 Benjamin Herrenschmidt
#else
4640 82afa586 Benjamin Herrenschmidt
    return false;
4641 82afa586 Benjamin Herrenschmidt
#endif
4642 82afa586 Benjamin Herrenschmidt
}
4643 82afa586 Benjamin Herrenschmidt
4644 61382a50 bellard
#define MMUSUFFIX _cmmu
4645 3917149d Blue Swirl
#undef GETPC
4646 20503968 Blue Swirl
#define GETPC() ((uintptr_t)0)
4647 61382a50 bellard
#define env cpu_single_env
4648 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4649 61382a50 bellard
4650 61382a50 bellard
#define SHIFT 0
4651 61382a50 bellard
#include "softmmu_template.h"
4652 61382a50 bellard
4653 61382a50 bellard
#define SHIFT 1
4654 61382a50 bellard
#include "softmmu_template.h"
4655 61382a50 bellard
4656 61382a50 bellard
#define SHIFT 2
4657 61382a50 bellard
#include "softmmu_template.h"
4658 61382a50 bellard
4659 61382a50 bellard
#define SHIFT 3
4660 61382a50 bellard
#include "softmmu_template.h"
4661 61382a50 bellard
4662 61382a50 bellard
#undef env
4663 61382a50 bellard
4664 61382a50 bellard
#endif