Statistics
| Branch: | Revision:

root / exec.c @ f8d3d128

History | View | Annotate | Download (129.1 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 74576198 aliguori
#include "osdep.h"
33 7ba1e619 aliguori
#include "kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 62152b8a Avi Kivity
#include "memory.h"
37 62152b8a Avi Kivity
#include "exec-memory.h"
38 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
39 53a5960a pbrook
#include <qemu.h>
40 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 f01576f1 Juergen Lock
#include <sys/param.h>
42 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
43 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
44 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 f01576f1 Juergen Lock
#include <sys/time.h>
46 f01576f1 Juergen Lock
#include <sys/proc.h>
47 f01576f1 Juergen Lock
#include <machine/profile.h>
48 f01576f1 Juergen Lock
#define _KERNEL
49 f01576f1 Juergen Lock
#include <sys/user.h>
50 f01576f1 Juergen Lock
#undef _KERNEL
51 f01576f1 Juergen Lock
#undef sigqueue
52 f01576f1 Juergen Lock
#include <libutil.h>
53 f01576f1 Juergen Lock
#endif
54 f01576f1 Juergen Lock
#endif
55 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
56 432d268c Jun Nakajima
#include "xen-mapcache.h"
57 6506e4f9 Stefano Stabellini
#include "trace.h"
58 53a5960a pbrook
#endif
59 54936004 bellard
60 67d95c15 Avi Kivity
#define WANT_EXEC_OBSOLETE
61 67d95c15 Avi Kivity
#include "exec-obsolete.h"
62 67d95c15 Avi Kivity
63 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
64 66e85a21 bellard
//#define DEBUG_FLUSH
65 9fa3e853 bellard
//#define DEBUG_TLB
66 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
67 fd6ce8f6 bellard
68 fd6ce8f6 bellard
/* make various TB consistency checks */
69 5fafdf24 ths
//#define DEBUG_TB_CHECK
70 5fafdf24 ths
//#define DEBUG_TLB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 f8e2af11 Stefan Weil
#elif defined(_WIN32)
97 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
98 f8e2af11 Stefan Weil
#define code_gen_section                                \
99 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
100 d03d860b blueswir1
#else
101 d03d860b blueswir1
#define code_gen_section                                \
102 d03d860b blueswir1
    __attribute__((aligned (32)))
103 d03d860b blueswir1
#endif
104 d03d860b blueswir1
105 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
106 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
107 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
108 26a5f13b bellard
/* threshold to flush the translated code buffer */
109 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
110 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
111 fd6ce8f6 bellard
112 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
113 9fa3e853 bellard
int phys_ram_fd;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 85d59fef Paolo Bonzini
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117 62152b8a Avi Kivity
118 62152b8a Avi Kivity
static MemoryRegion *system_memory;
119 309cb471 Avi Kivity
static MemoryRegion *system_io;
120 62152b8a Avi Kivity
121 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
123 0e0df1e2 Avi Kivity
124 e2eef170 pbrook
#endif
125 9fa3e853 bellard
126 6a00d601 bellard
CPUState *first_cpu;
127 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
128 6a00d601 bellard
   cpu_exec() */
129 b3c4bbe5 Paolo Bonzini
DEFINE_TLS(CPUState *,cpu_single_env);
130 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
131 bf20dc07 ths
   1 = Precise instruction counting.
132 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
133 2e70f6ef pbrook
int use_icount = 0;
134 6a00d601 bellard
135 54936004 bellard
typedef struct PageDesc {
136 92e873b9 bellard
    /* list of TBs intersecting this ram page */
137 fd6ce8f6 bellard
    TranslationBlock *first_tb;
138 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
139 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
140 9fa3e853 bellard
    unsigned int code_write_count;
141 9fa3e853 bellard
    uint8_t *code_bitmap;
142 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
143 9fa3e853 bellard
    unsigned long flags;
144 9fa3e853 bellard
#endif
145 54936004 bellard
} PageDesc;
146 54936004 bellard
147 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
148 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
149 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
150 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
152 41c1b1c9 Paul Brook
#else
153 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
154 41c1b1c9 Paul Brook
#endif
155 bedb69ea j_mayer
#else
156 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
157 bedb69ea j_mayer
#endif
158 54936004 bellard
159 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
160 5cd2c5b6 Richard Henderson
#define L2_BITS 10
161 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
162 54936004 bellard
163 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
164 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
165 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
167 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 5cd2c5b6 Richard Henderson
169 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
170 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
171 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
172 5cd2c5b6 Richard Henderson
#else
173 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
174 5cd2c5b6 Richard Henderson
#endif
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
177 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
178 5cd2c5b6 Richard Henderson
#else
179 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
180 5cd2c5b6 Richard Henderson
#endif
181 5cd2c5b6 Richard Henderson
182 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
183 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
184 5cd2c5b6 Richard Henderson
185 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187 5cd2c5b6 Richard Henderson
188 83fb7adf bellard
unsigned long qemu_real_host_page_size;
189 83fb7adf bellard
unsigned long qemu_host_page_size;
190 83fb7adf bellard
unsigned long qemu_host_page_mask;
191 54936004 bellard
192 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
193 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
194 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
195 54936004 bellard
196 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
197 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
198 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
199 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
200 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
201 41c1b1c9 Paul Brook
} PhysPageDesc;
202 41c1b1c9 Paul Brook
203 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
204 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
205 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
206 6d9a1304 Paul Brook
207 e2eef170 pbrook
static void io_mem_init(void);
208 62152b8a Avi Kivity
static void memory_map_init(void);
209 e2eef170 pbrook
210 33417e70 bellard
/* io memory support */
211 a621f38d Avi Kivity
MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
212 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
213 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
214 6658ffb8 pbrook
#endif
215 33417e70 bellard
216 34865134 bellard
/* log support */
217 1e8b27ca Juha Riihimรคki
#ifdef WIN32
218 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
219 1e8b27ca Juha Riihimรคki
#else
220 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
221 1e8b27ca Juha Riihimรคki
#endif
222 34865134 bellard
FILE *logfile;
223 34865134 bellard
int loglevel;
224 e735b91c pbrook
static int log_append = 0;
225 34865134 bellard
226 e3db7226 bellard
/* statistics */
227 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
228 e3db7226 bellard
static int tlb_flush_count;
229 b3755a91 Paul Brook
#endif
230 e3db7226 bellard
static int tb_flush_count;
231 e3db7226 bellard
static int tb_phys_invalidate_count;
232 e3db7226 bellard
233 7cb69cae bellard
#ifdef _WIN32
234 7cb69cae bellard
static void map_exec(void *addr, long size)
235 7cb69cae bellard
{
236 7cb69cae bellard
    DWORD old_protect;
237 7cb69cae bellard
    VirtualProtect(addr, size,
238 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
239 7cb69cae bellard
    
240 7cb69cae bellard
}
241 7cb69cae bellard
#else
242 7cb69cae bellard
static void map_exec(void *addr, long size)
243 7cb69cae bellard
{
244 4369415f bellard
    unsigned long start, end, page_size;
245 7cb69cae bellard
    
246 4369415f bellard
    page_size = getpagesize();
247 7cb69cae bellard
    start = (unsigned long)addr;
248 4369415f bellard
    start &= ~(page_size - 1);
249 7cb69cae bellard
    
250 7cb69cae bellard
    end = (unsigned long)addr + size;
251 4369415f bellard
    end += page_size - 1;
252 4369415f bellard
    end &= ~(page_size - 1);
253 7cb69cae bellard
    
254 7cb69cae bellard
    mprotect((void *)start, end - start,
255 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
256 7cb69cae bellard
}
257 7cb69cae bellard
#endif
258 7cb69cae bellard
259 b346ff46 bellard
static void page_init(void)
260 54936004 bellard
{
261 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
262 54936004 bellard
       TARGET_PAGE_SIZE */
263 c2b48b69 aliguori
#ifdef _WIN32
264 c2b48b69 aliguori
    {
265 c2b48b69 aliguori
        SYSTEM_INFO system_info;
266 c2b48b69 aliguori
267 c2b48b69 aliguori
        GetSystemInfo(&system_info);
268 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
269 c2b48b69 aliguori
    }
270 c2b48b69 aliguori
#else
271 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
272 c2b48b69 aliguori
#endif
273 83fb7adf bellard
    if (qemu_host_page_size == 0)
274 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
275 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
277 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
278 50a9569b balrog
279 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 50a9569b balrog
    {
281 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
282 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
283 f01576f1 Juergen Lock
        int i, cnt;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
286 f01576f1 Juergen Lock
        if (freep) {
287 f01576f1 Juergen Lock
            mmap_lock();
288 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
289 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
290 f01576f1 Juergen Lock
291 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
292 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
293 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
294 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295 f01576f1 Juergen Lock
296 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
297 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
298 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 f01576f1 Juergen Lock
                    } else {
300 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 f01576f1 Juergen Lock
                        endaddr = ~0ul;
302 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 f01576f1 Juergen Lock
#endif
304 f01576f1 Juergen Lock
                    }
305 f01576f1 Juergen Lock
                }
306 f01576f1 Juergen Lock
            }
307 f01576f1 Juergen Lock
            free(freep);
308 f01576f1 Juergen Lock
            mmap_unlock();
309 f01576f1 Juergen Lock
        }
310 f01576f1 Juergen Lock
#else
311 50a9569b balrog
        FILE *f;
312 50a9569b balrog
313 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
314 5cd2c5b6 Richard Henderson
315 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
316 50a9569b balrog
        if (f) {
317 5cd2c5b6 Richard Henderson
            mmap_lock();
318 5cd2c5b6 Richard Henderson
319 50a9569b balrog
            do {
320 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
321 5cd2c5b6 Richard Henderson
                int n;
322 5cd2c5b6 Richard Henderson
323 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324 5cd2c5b6 Richard Henderson
325 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
326 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327 5cd2c5b6 Richard Henderson
328 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
329 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
330 5cd2c5b6 Richard Henderson
                    } else {
331 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
332 5cd2c5b6 Richard Henderson
                    }
333 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 50a9569b balrog
                }
335 50a9569b balrog
            } while (!feof(f));
336 5cd2c5b6 Richard Henderson
337 50a9569b balrog
            fclose(f);
338 5cd2c5b6 Richard Henderson
            mmap_unlock();
339 50a9569b balrog
        }
340 f01576f1 Juergen Lock
#endif
341 50a9569b balrog
    }
342 50a9569b balrog
#endif
343 54936004 bellard
}
344 54936004 bellard
345 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
346 54936004 bellard
{
347 41c1b1c9 Paul Brook
    PageDesc *pd;
348 41c1b1c9 Paul Brook
    void **lp;
349 41c1b1c9 Paul Brook
    int i;
350 41c1b1c9 Paul Brook
351 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
352 7267c094 Anthony Liguori
    /* We can't use g_malloc because it may recurse into a locked mutex. */
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
354 5cd2c5b6 Richard Henderson
    do {                                                \
355 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
356 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
357 5cd2c5b6 Richard Henderson
    } while (0)
358 5cd2c5b6 Richard Henderson
#else
359 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
360 7267c094 Anthony Liguori
    do { P = g_malloc0(SIZE); } while (0)
361 17e2377a pbrook
#endif
362 434929bf aliguori
363 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
364 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365 5cd2c5b6 Richard Henderson
366 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
367 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 5cd2c5b6 Richard Henderson
        void **p = *lp;
369 5cd2c5b6 Richard Henderson
370 5cd2c5b6 Richard Henderson
        if (p == NULL) {
371 5cd2c5b6 Richard Henderson
            if (!alloc) {
372 5cd2c5b6 Richard Henderson
                return NULL;
373 5cd2c5b6 Richard Henderson
            }
374 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
375 5cd2c5b6 Richard Henderson
            *lp = p;
376 17e2377a pbrook
        }
377 5cd2c5b6 Richard Henderson
378 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 5cd2c5b6 Richard Henderson
    }
380 5cd2c5b6 Richard Henderson
381 5cd2c5b6 Richard Henderson
    pd = *lp;
382 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
383 5cd2c5b6 Richard Henderson
        if (!alloc) {
384 5cd2c5b6 Richard Henderson
            return NULL;
385 5cd2c5b6 Richard Henderson
        }
386 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 5cd2c5b6 Richard Henderson
        *lp = pd;
388 54936004 bellard
    }
389 5cd2c5b6 Richard Henderson
390 5cd2c5b6 Richard Henderson
#undef ALLOC
391 5cd2c5b6 Richard Henderson
392 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
393 54936004 bellard
}
394 54936004 bellard
395 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
396 54936004 bellard
{
397 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
398 fd6ce8f6 bellard
}
399 fd6ce8f6 bellard
400 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
401 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
402 92e873b9 bellard
{
403 e3f4e2a4 pbrook
    PhysPageDesc *pd;
404 5cd2c5b6 Richard Henderson
    void **lp;
405 5cd2c5b6 Richard Henderson
    int i;
406 92e873b9 bellard
407 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
408 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
409 108c49b8 bellard
410 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
411 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 5cd2c5b6 Richard Henderson
        void **p = *lp;
413 5cd2c5b6 Richard Henderson
        if (p == NULL) {
414 5cd2c5b6 Richard Henderson
            if (!alloc) {
415 5cd2c5b6 Richard Henderson
                return NULL;
416 5cd2c5b6 Richard Henderson
            }
417 7267c094 Anthony Liguori
            *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
418 5cd2c5b6 Richard Henderson
        }
419 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
420 108c49b8 bellard
    }
421 5cd2c5b6 Richard Henderson
422 e3f4e2a4 pbrook
    pd = *lp;
423 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
424 e3f4e2a4 pbrook
        int i;
425 5ab97b7f Alex Rozenman
        int first_index = index & ~(L2_SIZE - 1);
426 5cd2c5b6 Richard Henderson
427 5cd2c5b6 Richard Henderson
        if (!alloc) {
428 108c49b8 bellard
            return NULL;
429 5cd2c5b6 Richard Henderson
        }
430 5cd2c5b6 Richard Henderson
431 7267c094 Anthony Liguori
        *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432 5cd2c5b6 Richard Henderson
433 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
434 0e0df1e2 Avi Kivity
            pd[i].phys_offset = io_mem_unassigned.ram_addr;
435 5ab97b7f Alex Rozenman
            pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
436 67c4d23c pbrook
        }
437 92e873b9 bellard
    }
438 5cd2c5b6 Richard Henderson
439 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
440 92e873b9 bellard
}
441 92e873b9 bellard
442 f1f6e3b8 Avi Kivity
static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
443 92e873b9 bellard
{
444 f1f6e3b8 Avi Kivity
    PhysPageDesc *p = phys_page_find_alloc(index, 0);
445 f1f6e3b8 Avi Kivity
446 f1f6e3b8 Avi Kivity
    if (p) {
447 f1f6e3b8 Avi Kivity
        return *p;
448 f1f6e3b8 Avi Kivity
    } else {
449 f1f6e3b8 Avi Kivity
        return (PhysPageDesc) {
450 0e0df1e2 Avi Kivity
            .phys_offset = io_mem_unassigned.ram_addr,
451 f1f6e3b8 Avi Kivity
            .region_offset = index << TARGET_PAGE_BITS,
452 f1f6e3b8 Avi Kivity
        };
453 f1f6e3b8 Avi Kivity
    }
454 92e873b9 bellard
}
455 92e873b9 bellard
456 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
457 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
458 3a7d929e bellard
                                    target_ulong vaddr);
459 c8a706fe pbrook
#define mmap_lock() do { } while(0)
460 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
461 9fa3e853 bellard
#endif
462 fd6ce8f6 bellard
463 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464 4369415f bellard
465 4369415f bellard
#if defined(CONFIG_USER_ONLY)
466 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
467 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
468 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
469 4369415f bellard
#endif
470 4369415f bellard
471 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
472 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
474 4369415f bellard
#endif
475 4369415f bellard
476 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
477 26a5f13b bellard
{
478 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
479 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
480 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
482 4369415f bellard
#else
483 26a5f13b bellard
    code_gen_buffer_size = tb_size;
484 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
485 4369415f bellard
#if defined(CONFIG_USER_ONLY)
486 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 4369415f bellard
#else
488 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
489 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
490 4369415f bellard
#endif
491 26a5f13b bellard
    }
492 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
495 26a5f13b bellard
       the host cpu and OS */
496 26a5f13b bellard
#if defined(__linux__) 
497 26a5f13b bellard
    {
498 26a5f13b bellard
        int flags;
499 141ac468 blueswir1
        void *start = NULL;
500 141ac468 blueswir1
501 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
502 26a5f13b bellard
#if defined(__x86_64__)
503 26a5f13b bellard
        flags |= MAP_32BIT;
504 26a5f13b bellard
        /* Cannot map more than that */
505 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
506 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
507 141ac468 blueswir1
#elif defined(__sparc_v9__)
508 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
509 141ac468 blueswir1
        flags |= MAP_FIXED;
510 141ac468 blueswir1
        start = (void *) 0x60000000UL;
511 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
512 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
513 1cb0661e balrog
#elif defined(__arm__)
514 5c84bd90 Aurelien Jarno
        /* Keep the buffer no bigger than 16MB to branch between blocks */
515 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
516 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
517 eba0b893 Richard Henderson
#elif defined(__s390x__)
518 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
519 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
520 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 eba0b893 Richard Henderson
        }
523 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
524 26a5f13b bellard
#endif
525 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
526 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
527 26a5f13b bellard
                               flags, -1, 0);
528 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
529 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 26a5f13b bellard
            exit(1);
531 26a5f13b bellard
        }
532 26a5f13b bellard
    }
533 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
534 9f4b09a4 Tobias Nygren
    || defined(__DragonFly__) || defined(__OpenBSD__) \
535 9f4b09a4 Tobias Nygren
    || defined(__NetBSD__)
536 06e67a82 aliguori
    {
537 06e67a82 aliguori
        int flags;
538 06e67a82 aliguori
        void *addr = NULL;
539 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 06e67a82 aliguori
#if defined(__x86_64__)
541 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 06e67a82 aliguori
         * 0x40000000 is free */
543 06e67a82 aliguori
        flags |= MAP_FIXED;
544 06e67a82 aliguori
        addr = (void *)0x40000000;
545 06e67a82 aliguori
        /* Cannot map more than that */
546 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
547 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
548 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
549 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
550 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
551 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
552 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
554 4cd31ad2 Blue Swirl
        }
555 06e67a82 aliguori
#endif
556 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
558 06e67a82 aliguori
                               flags, -1, 0);
559 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
560 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 06e67a82 aliguori
            exit(1);
562 06e67a82 aliguori
        }
563 06e67a82 aliguori
    }
564 26a5f13b bellard
#else
565 7267c094 Anthony Liguori
    code_gen_buffer = g_malloc(code_gen_buffer_size);
566 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
567 26a5f13b bellard
#endif
568 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
569 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
570 a884da8a Peter Maydell
    code_gen_buffer_max_size = code_gen_buffer_size -
571 a884da8a Peter Maydell
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
572 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
573 7267c094 Anthony Liguori
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
574 26a5f13b bellard
}
575 26a5f13b bellard
576 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
578 26a5f13b bellard
   size. */
579 d5ab9713 Jan Kiszka
void tcg_exec_init(unsigned long tb_size)
580 26a5f13b bellard
{
581 26a5f13b bellard
    cpu_gen_init();
582 26a5f13b bellard
    code_gen_alloc(tb_size);
583 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
584 4369415f bellard
    page_init();
585 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
587 9002ec79 Richard Henderson
       initialize the prologue now.  */
588 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
589 9002ec79 Richard Henderson
#endif
590 26a5f13b bellard
}
591 26a5f13b bellard
592 d5ab9713 Jan Kiszka
bool tcg_enabled(void)
593 d5ab9713 Jan Kiszka
{
594 d5ab9713 Jan Kiszka
    return code_gen_buffer != NULL;
595 d5ab9713 Jan Kiszka
}
596 d5ab9713 Jan Kiszka
597 d5ab9713 Jan Kiszka
void cpu_exec_init_all(void)
598 d5ab9713 Jan Kiszka
{
599 d5ab9713 Jan Kiszka
#if !defined(CONFIG_USER_ONLY)
600 d5ab9713 Jan Kiszka
    memory_map_init();
601 d5ab9713 Jan Kiszka
    io_mem_init();
602 d5ab9713 Jan Kiszka
#endif
603 d5ab9713 Jan Kiszka
}
604 d5ab9713 Jan Kiszka
605 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606 9656f324 pbrook
607 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
608 e7f4eff7 Juan Quintela
{
609 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
610 9656f324 pbrook
611 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 3098dba0 aurel32
       version_id is increased. */
613 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
614 9656f324 pbrook
    tlb_flush(env, 1);
615 9656f324 pbrook
616 9656f324 pbrook
    return 0;
617 9656f324 pbrook
}
618 e7f4eff7 Juan Quintela
619 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
620 e7f4eff7 Juan Quintela
    .name = "cpu_common",
621 e7f4eff7 Juan Quintela
    .version_id = 1,
622 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
623 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
624 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
625 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
626 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
627 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
628 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
629 e7f4eff7 Juan Quintela
    }
630 e7f4eff7 Juan Quintela
};
631 9656f324 pbrook
#endif
632 9656f324 pbrook
633 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
634 950f1472 Glauber Costa
{
635 950f1472 Glauber Costa
    CPUState *env = first_cpu;
636 950f1472 Glauber Costa
637 950f1472 Glauber Costa
    while (env) {
638 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
639 950f1472 Glauber Costa
            break;
640 950f1472 Glauber Costa
        env = env->next_cpu;
641 950f1472 Glauber Costa
    }
642 950f1472 Glauber Costa
643 950f1472 Glauber Costa
    return env;
644 950f1472 Glauber Costa
}
645 950f1472 Glauber Costa
646 6a00d601 bellard
void cpu_exec_init(CPUState *env)
647 fd6ce8f6 bellard
{
648 6a00d601 bellard
    CPUState **penv;
649 6a00d601 bellard
    int cpu_index;
650 6a00d601 bellard
651 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
652 c2764719 pbrook
    cpu_list_lock();
653 c2764719 pbrook
#endif
654 6a00d601 bellard
    env->next_cpu = NULL;
655 6a00d601 bellard
    penv = &first_cpu;
656 6a00d601 bellard
    cpu_index = 0;
657 6a00d601 bellard
    while (*penv != NULL) {
658 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
659 6a00d601 bellard
        cpu_index++;
660 6a00d601 bellard
    }
661 6a00d601 bellard
    env->cpu_index = cpu_index;
662 268a362c aliguori
    env->numa_node = 0;
663 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
664 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
665 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
666 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
667 dc7a09cf Jan Kiszka
#endif
668 6a00d601 bellard
    *penv = env;
669 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
670 c2764719 pbrook
    cpu_list_unlock();
671 c2764719 pbrook
#endif
672 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
673 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
675 b3c7724c pbrook
                    cpu_save, cpu_load, env);
676 b3c7724c pbrook
#endif
677 fd6ce8f6 bellard
}
678 fd6ce8f6 bellard
679 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
680 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
681 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
682 d1a1eb74 Tristan Gingold
{
683 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
684 d1a1eb74 Tristan Gingold
685 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
686 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 d1a1eb74 Tristan Gingold
        return NULL;
688 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
689 d1a1eb74 Tristan Gingold
    tb->pc = pc;
690 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
691 d1a1eb74 Tristan Gingold
    return tb;
692 d1a1eb74 Tristan Gingold
}
693 d1a1eb74 Tristan Gingold
694 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
695 d1a1eb74 Tristan Gingold
{
696 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
697 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
698 d1a1eb74 Tristan Gingold
       be the last one generated.  */
699 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
701 d1a1eb74 Tristan Gingold
        nb_tbs--;
702 d1a1eb74 Tristan Gingold
    }
703 d1a1eb74 Tristan Gingold
}
704 d1a1eb74 Tristan Gingold
705 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
706 9fa3e853 bellard
{
707 9fa3e853 bellard
    if (p->code_bitmap) {
708 7267c094 Anthony Liguori
        g_free(p->code_bitmap);
709 9fa3e853 bellard
        p->code_bitmap = NULL;
710 9fa3e853 bellard
    }
711 9fa3e853 bellard
    p->code_write_count = 0;
712 9fa3e853 bellard
}
713 9fa3e853 bellard
714 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715 5cd2c5b6 Richard Henderson
716 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
717 fd6ce8f6 bellard
{
718 5cd2c5b6 Richard Henderson
    int i;
719 fd6ce8f6 bellard
720 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
721 5cd2c5b6 Richard Henderson
        return;
722 5cd2c5b6 Richard Henderson
    }
723 5cd2c5b6 Richard Henderson
    if (level == 0) {
724 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
725 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
726 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
727 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
728 fd6ce8f6 bellard
        }
729 5cd2c5b6 Richard Henderson
    } else {
730 5cd2c5b6 Richard Henderson
        void **pp = *lp;
731 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
732 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
733 5cd2c5b6 Richard Henderson
        }
734 5cd2c5b6 Richard Henderson
    }
735 5cd2c5b6 Richard Henderson
}
736 5cd2c5b6 Richard Henderson
737 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
738 5cd2c5b6 Richard Henderson
{
739 5cd2c5b6 Richard Henderson
    int i;
740 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
741 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
742 fd6ce8f6 bellard
    }
743 fd6ce8f6 bellard
}
744 fd6ce8f6 bellard
745 fd6ce8f6 bellard
/* flush all the translation blocks */
746 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
747 6a00d601 bellard
void tb_flush(CPUState *env1)
748 fd6ce8f6 bellard
{
749 6a00d601 bellard
    CPUState *env;
750 0124311e bellard
#if defined(DEBUG_FLUSH)
751 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
753 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
754 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
755 fd6ce8f6 bellard
#endif
756 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
757 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
758 a208e54a pbrook
759 fd6ce8f6 bellard
    nb_tbs = 0;
760 3b46e624 ths
761 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 6a00d601 bellard
    }
764 9fa3e853 bellard
765 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
766 fd6ce8f6 bellard
    page_flush_tb();
767 9fa3e853 bellard
768 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
769 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
770 d4e8164f bellard
       expensive */
771 e3db7226 bellard
    tb_flush_count++;
772 fd6ce8f6 bellard
}
773 fd6ce8f6 bellard
774 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
775 fd6ce8f6 bellard
776 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
777 fd6ce8f6 bellard
{
778 fd6ce8f6 bellard
    TranslationBlock *tb;
779 fd6ce8f6 bellard
    int i;
780 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
781 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
783 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
785 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
786 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
787 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
788 fd6ce8f6 bellard
            }
789 fd6ce8f6 bellard
        }
790 fd6ce8f6 bellard
    }
791 fd6ce8f6 bellard
}
792 fd6ce8f6 bellard
793 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
794 fd6ce8f6 bellard
static void tb_page_check(void)
795 fd6ce8f6 bellard
{
796 fd6ce8f6 bellard
    TranslationBlock *tb;
797 fd6ce8f6 bellard
    int i, flags1, flags2;
798 3b46e624 ths
799 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
801 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
802 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
803 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
805 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
806 fd6ce8f6 bellard
            }
807 fd6ce8f6 bellard
        }
808 fd6ce8f6 bellard
    }
809 fd6ce8f6 bellard
}
810 fd6ce8f6 bellard
811 fd6ce8f6 bellard
#endif
812 fd6ce8f6 bellard
813 fd6ce8f6 bellard
/* invalidate one TB */
814 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 fd6ce8f6 bellard
                             int next_offset)
816 fd6ce8f6 bellard
{
817 fd6ce8f6 bellard
    TranslationBlock *tb1;
818 fd6ce8f6 bellard
    for(;;) {
819 fd6ce8f6 bellard
        tb1 = *ptb;
820 fd6ce8f6 bellard
        if (tb1 == tb) {
821 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 fd6ce8f6 bellard
            break;
823 fd6ce8f6 bellard
        }
824 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 fd6ce8f6 bellard
    }
826 fd6ce8f6 bellard
}
827 fd6ce8f6 bellard
828 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829 9fa3e853 bellard
{
830 9fa3e853 bellard
    TranslationBlock *tb1;
831 9fa3e853 bellard
    unsigned int n1;
832 9fa3e853 bellard
833 9fa3e853 bellard
    for(;;) {
834 9fa3e853 bellard
        tb1 = *ptb;
835 9fa3e853 bellard
        n1 = (long)tb1 & 3;
836 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 9fa3e853 bellard
        if (tb1 == tb) {
838 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
839 9fa3e853 bellard
            break;
840 9fa3e853 bellard
        }
841 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
842 9fa3e853 bellard
    }
843 9fa3e853 bellard
}
844 9fa3e853 bellard
845 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846 d4e8164f bellard
{
847 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
848 d4e8164f bellard
    unsigned int n1;
849 d4e8164f bellard
850 d4e8164f bellard
    ptb = &tb->jmp_next[n];
851 d4e8164f bellard
    tb1 = *ptb;
852 d4e8164f bellard
    if (tb1) {
853 d4e8164f bellard
        /* find tb(n) in circular list */
854 d4e8164f bellard
        for(;;) {
855 d4e8164f bellard
            tb1 = *ptb;
856 d4e8164f bellard
            n1 = (long)tb1 & 3;
857 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 d4e8164f bellard
            if (n1 == n && tb1 == tb)
859 d4e8164f bellard
                break;
860 d4e8164f bellard
            if (n1 == 2) {
861 d4e8164f bellard
                ptb = &tb1->jmp_first;
862 d4e8164f bellard
            } else {
863 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
864 d4e8164f bellard
            }
865 d4e8164f bellard
        }
866 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
867 d4e8164f bellard
        *ptb = tb->jmp_next[n];
868 d4e8164f bellard
869 d4e8164f bellard
        tb->jmp_next[n] = NULL;
870 d4e8164f bellard
    }
871 d4e8164f bellard
}
872 d4e8164f bellard
873 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
874 d4e8164f bellard
   another TB */
875 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
876 d4e8164f bellard
{
877 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878 d4e8164f bellard
}
879 d4e8164f bellard
880 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
881 fd6ce8f6 bellard
{
882 6a00d601 bellard
    CPUState *env;
883 8a40a180 bellard
    PageDesc *p;
884 d4e8164f bellard
    unsigned int h, n1;
885 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
886 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
887 3b46e624 ths
888 8a40a180 bellard
    /* remove the TB from the hash list */
889 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
891 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
892 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
893 8a40a180 bellard
894 8a40a180 bellard
    /* remove the TB from the page list */
895 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
896 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
898 8a40a180 bellard
        invalidate_page_bitmap(p);
899 8a40a180 bellard
    }
900 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
903 8a40a180 bellard
        invalidate_page_bitmap(p);
904 8a40a180 bellard
    }
905 8a40a180 bellard
906 36bdbe54 bellard
    tb_invalidated_flag = 1;
907 59817ccb bellard
908 fd6ce8f6 bellard
    /* remove the TB from the hash list */
909 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
910 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
912 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
913 6a00d601 bellard
    }
914 d4e8164f bellard
915 d4e8164f bellard
    /* suppress this TB from the two jump lists */
916 d4e8164f bellard
    tb_jmp_remove(tb, 0);
917 d4e8164f bellard
    tb_jmp_remove(tb, 1);
918 d4e8164f bellard
919 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
920 d4e8164f bellard
    tb1 = tb->jmp_first;
921 d4e8164f bellard
    for(;;) {
922 d4e8164f bellard
        n1 = (long)tb1 & 3;
923 d4e8164f bellard
        if (n1 == 2)
924 d4e8164f bellard
            break;
925 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
927 d4e8164f bellard
        tb_reset_jump(tb1, n1);
928 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
929 d4e8164f bellard
        tb1 = tb2;
930 d4e8164f bellard
    }
931 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932 9fa3e853 bellard
933 e3db7226 bellard
    tb_phys_invalidate_count++;
934 9fa3e853 bellard
}
935 9fa3e853 bellard
936 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
937 9fa3e853 bellard
{
938 9fa3e853 bellard
    int end, mask, end1;
939 9fa3e853 bellard
940 9fa3e853 bellard
    end = start + len;
941 9fa3e853 bellard
    tab += start >> 3;
942 9fa3e853 bellard
    mask = 0xff << (start & 7);
943 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
944 9fa3e853 bellard
        if (start < end) {
945 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
946 9fa3e853 bellard
            *tab |= mask;
947 9fa3e853 bellard
        }
948 9fa3e853 bellard
    } else {
949 9fa3e853 bellard
        *tab++ |= mask;
950 9fa3e853 bellard
        start = (start + 8) & ~7;
951 9fa3e853 bellard
        end1 = end & ~7;
952 9fa3e853 bellard
        while (start < end1) {
953 9fa3e853 bellard
            *tab++ = 0xff;
954 9fa3e853 bellard
            start += 8;
955 9fa3e853 bellard
        }
956 9fa3e853 bellard
        if (start < end) {
957 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
958 9fa3e853 bellard
            *tab |= mask;
959 9fa3e853 bellard
        }
960 9fa3e853 bellard
    }
961 9fa3e853 bellard
}
962 9fa3e853 bellard
963 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
964 9fa3e853 bellard
{
965 9fa3e853 bellard
    int n, tb_start, tb_end;
966 9fa3e853 bellard
    TranslationBlock *tb;
967 3b46e624 ths
968 7267c094 Anthony Liguori
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
969 9fa3e853 bellard
970 9fa3e853 bellard
    tb = p->first_tb;
971 9fa3e853 bellard
    while (tb != NULL) {
972 9fa3e853 bellard
        n = (long)tb & 3;
973 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
974 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
975 9fa3e853 bellard
        if (n == 0) {
976 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
977 9fa3e853 bellard
               it is not a problem */
978 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 9fa3e853 bellard
            tb_end = tb_start + tb->size;
980 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
981 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
982 9fa3e853 bellard
        } else {
983 9fa3e853 bellard
            tb_start = 0;
984 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 9fa3e853 bellard
        }
986 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 9fa3e853 bellard
        tb = tb->page_next[n];
988 9fa3e853 bellard
    }
989 9fa3e853 bellard
}
990 9fa3e853 bellard
991 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
992 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
993 2e70f6ef pbrook
                              int flags, int cflags)
994 d720b93d bellard
{
995 d720b93d bellard
    TranslationBlock *tb;
996 d720b93d bellard
    uint8_t *tc_ptr;
997 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
998 41c1b1c9 Paul Brook
    target_ulong virt_page2;
999 d720b93d bellard
    int code_gen_size;
1000 d720b93d bellard
1001 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
1002 c27004ec bellard
    tb = tb_alloc(pc);
1003 d720b93d bellard
    if (!tb) {
1004 d720b93d bellard
        /* flush must be done */
1005 d720b93d bellard
        tb_flush(env);
1006 d720b93d bellard
        /* cannot fail at this point */
1007 c27004ec bellard
        tb = tb_alloc(pc);
1008 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
1009 2e70f6ef pbrook
        tb_invalidated_flag = 1;
1010 d720b93d bellard
    }
1011 d720b93d bellard
    tc_ptr = code_gen_ptr;
1012 d720b93d bellard
    tb->tc_ptr = tc_ptr;
1013 d720b93d bellard
    tb->cs_base = cs_base;
1014 d720b93d bellard
    tb->flags = flags;
1015 d720b93d bellard
    tb->cflags = cflags;
1016 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
1017 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1018 3b46e624 ths
1019 d720b93d bellard
    /* check next page if needed */
1020 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1021 d720b93d bellard
    phys_page2 = -1;
1022 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1023 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1024 d720b93d bellard
    }
1025 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1026 2e70f6ef pbrook
    return tb;
1027 d720b93d bellard
}
1028 3b46e624 ths
1029 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1030 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1031 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1032 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1033 d720b93d bellard
   TB if code is modified inside this TB. */
1034 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1035 d720b93d bellard
                                   int is_cpu_write_access)
1036 d720b93d bellard
{
1037 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1038 d720b93d bellard
    CPUState *env = cpu_single_env;
1039 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1040 6b917547 aliguori
    PageDesc *p;
1041 6b917547 aliguori
    int n;
1042 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1043 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1044 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1045 6b917547 aliguori
    int current_tb_modified = 0;
1046 6b917547 aliguori
    target_ulong current_pc = 0;
1047 6b917547 aliguori
    target_ulong current_cs_base = 0;
1048 6b917547 aliguori
    int current_flags = 0;
1049 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1050 9fa3e853 bellard
1051 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1052 5fafdf24 ths
    if (!p)
1053 9fa3e853 bellard
        return;
1054 5fafdf24 ths
    if (!p->code_bitmap &&
1055 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 d720b93d bellard
        is_cpu_write_access) {
1057 9fa3e853 bellard
        /* build code bitmap */
1058 9fa3e853 bellard
        build_page_bitmap(p);
1059 9fa3e853 bellard
    }
1060 9fa3e853 bellard
1061 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1062 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 9fa3e853 bellard
    tb = p->first_tb;
1064 9fa3e853 bellard
    while (tb != NULL) {
1065 9fa3e853 bellard
        n = (long)tb & 3;
1066 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1067 9fa3e853 bellard
        tb_next = tb->page_next[n];
1068 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1069 9fa3e853 bellard
        if (n == 0) {
1070 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1071 9fa3e853 bellard
               it is not a problem */
1072 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1074 9fa3e853 bellard
        } else {
1075 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1076 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 9fa3e853 bellard
        }
1078 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1079 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1080 d720b93d bellard
            if (current_tb_not_found) {
1081 d720b93d bellard
                current_tb_not_found = 0;
1082 d720b93d bellard
                current_tb = NULL;
1083 2e70f6ef pbrook
                if (env->mem_io_pc) {
1084 d720b93d bellard
                    /* now we have a real cpu fault */
1085 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1086 d720b93d bellard
                }
1087 d720b93d bellard
            }
1088 d720b93d bellard
            if (current_tb == tb &&
1089 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1090 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1091 d720b93d bellard
                its execution. We could be more precise by checking
1092 d720b93d bellard
                that the modification is after the current PC, but it
1093 d720b93d bellard
                would require a specialized function to partially
1094 d720b93d bellard
                restore the CPU state */
1095 3b46e624 ths
1096 d720b93d bellard
                current_tb_modified = 1;
1097 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1098 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 6b917547 aliguori
                                     &current_flags);
1100 d720b93d bellard
            }
1101 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1102 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1103 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1104 6f5a9f7e bellard
            saved_tb = NULL;
1105 6f5a9f7e bellard
            if (env) {
1106 6f5a9f7e bellard
                saved_tb = env->current_tb;
1107 6f5a9f7e bellard
                env->current_tb = NULL;
1108 6f5a9f7e bellard
            }
1109 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1110 6f5a9f7e bellard
            if (env) {
1111 6f5a9f7e bellard
                env->current_tb = saved_tb;
1112 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1113 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1114 6f5a9f7e bellard
            }
1115 9fa3e853 bellard
        }
1116 9fa3e853 bellard
        tb = tb_next;
1117 9fa3e853 bellard
    }
1118 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1119 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1120 9fa3e853 bellard
    if (!p->first_tb) {
1121 9fa3e853 bellard
        invalidate_page_bitmap(p);
1122 d720b93d bellard
        if (is_cpu_write_access) {
1123 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1124 d720b93d bellard
        }
1125 d720b93d bellard
    }
1126 d720b93d bellard
#endif
1127 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1128 d720b93d bellard
    if (current_tb_modified) {
1129 d720b93d bellard
        /* we generate a block containing just the instruction
1130 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1131 d720b93d bellard
           itself */
1132 ea1c1802 bellard
        env->current_tb = NULL;
1133 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1134 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1135 9fa3e853 bellard
    }
1136 fd6ce8f6 bellard
#endif
1137 9fa3e853 bellard
}
1138 fd6ce8f6 bellard
1139 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1140 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1141 9fa3e853 bellard
{
1142 9fa3e853 bellard
    PageDesc *p;
1143 9fa3e853 bellard
    int offset, b;
1144 59817ccb bellard
#if 0
1145 a4193c8a bellard
    if (1) {
1146 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1148 93fcfe39 aliguori
                  cpu_single_env->eip,
1149 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1150 59817ccb bellard
    }
1151 59817ccb bellard
#endif
1152 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1153 5fafdf24 ths
    if (!p)
1154 9fa3e853 bellard
        return;
1155 9fa3e853 bellard
    if (p->code_bitmap) {
1156 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1157 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1159 9fa3e853 bellard
            goto do_invalidate;
1160 9fa3e853 bellard
    } else {
1161 9fa3e853 bellard
    do_invalidate:
1162 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1163 9fa3e853 bellard
    }
1164 9fa3e853 bellard
}
1165 9fa3e853 bellard
1166 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1167 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1168 d720b93d bellard
                                    unsigned long pc, void *puc)
1169 9fa3e853 bellard
{
1170 6b917547 aliguori
    TranslationBlock *tb;
1171 9fa3e853 bellard
    PageDesc *p;
1172 6b917547 aliguori
    int n;
1173 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1174 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1175 d720b93d bellard
    CPUState *env = cpu_single_env;
1176 6b917547 aliguori
    int current_tb_modified = 0;
1177 6b917547 aliguori
    target_ulong current_pc = 0;
1178 6b917547 aliguori
    target_ulong current_cs_base = 0;
1179 6b917547 aliguori
    int current_flags = 0;
1180 d720b93d bellard
#endif
1181 9fa3e853 bellard
1182 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1183 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1184 5fafdf24 ths
    if (!p)
1185 9fa3e853 bellard
        return;
1186 9fa3e853 bellard
    tb = p->first_tb;
1187 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1188 d720b93d bellard
    if (tb && pc != 0) {
1189 d720b93d bellard
        current_tb = tb_find_pc(pc);
1190 d720b93d bellard
    }
1191 d720b93d bellard
#endif
1192 9fa3e853 bellard
    while (tb != NULL) {
1193 9fa3e853 bellard
        n = (long)tb & 3;
1194 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1195 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1196 d720b93d bellard
        if (current_tb == tb &&
1197 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1198 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1199 d720b93d bellard
                   its execution. We could be more precise by checking
1200 d720b93d bellard
                   that the modification is after the current PC, but it
1201 d720b93d bellard
                   would require a specialized function to partially
1202 d720b93d bellard
                   restore the CPU state */
1203 3b46e624 ths
1204 d720b93d bellard
            current_tb_modified = 1;
1205 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1206 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 6b917547 aliguori
                                 &current_flags);
1208 d720b93d bellard
        }
1209 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1210 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1211 9fa3e853 bellard
        tb = tb->page_next[n];
1212 9fa3e853 bellard
    }
1213 fd6ce8f6 bellard
    p->first_tb = NULL;
1214 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1215 d720b93d bellard
    if (current_tb_modified) {
1216 d720b93d bellard
        /* we generate a block containing just the instruction
1217 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1218 d720b93d bellard
           itself */
1219 ea1c1802 bellard
        env->current_tb = NULL;
1220 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1221 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1222 d720b93d bellard
    }
1223 d720b93d bellard
#endif
1224 fd6ce8f6 bellard
}
1225 9fa3e853 bellard
#endif
1226 fd6ce8f6 bellard
1227 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1228 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1229 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1230 fd6ce8f6 bellard
{
1231 fd6ce8f6 bellard
    PageDesc *p;
1232 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1233 4429ab44 Juan Quintela
    bool page_already_protected;
1234 4429ab44 Juan Quintela
#endif
1235 9fa3e853 bellard
1236 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1237 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1238 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1239 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1240 4429ab44 Juan Quintela
    page_already_protected = p->first_tb != NULL;
1241 4429ab44 Juan Quintela
#endif
1242 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1243 9fa3e853 bellard
    invalidate_page_bitmap(p);
1244 fd6ce8f6 bellard
1245 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1246 d720b93d bellard
1247 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1248 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1249 53a5960a pbrook
        target_ulong addr;
1250 53a5960a pbrook
        PageDesc *p2;
1251 9fa3e853 bellard
        int prot;
1252 9fa3e853 bellard
1253 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1254 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1255 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1256 fd6ce8f6 bellard
        prot = 0;
1257 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1259 53a5960a pbrook
1260 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 53a5960a pbrook
            if (!p2)
1262 53a5960a pbrook
                continue;
1263 53a5960a pbrook
            prot |= p2->flags;
1264 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1265 53a5960a pbrook
          }
1266 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1267 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1269 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1270 53a5960a pbrook
               page_addr);
1271 fd6ce8f6 bellard
#endif
1272 fd6ce8f6 bellard
    }
1273 9fa3e853 bellard
#else
1274 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1275 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1276 9fa3e853 bellard
       allocated in a physical page */
1277 4429ab44 Juan Quintela
    if (!page_already_protected) {
1278 6a00d601 bellard
        tlb_protect_code(page_addr);
1279 9fa3e853 bellard
    }
1280 9fa3e853 bellard
#endif
1281 d720b93d bellard
1282 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1283 fd6ce8f6 bellard
}
1284 fd6ce8f6 bellard
1285 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1286 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1287 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1288 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1289 d4e8164f bellard
{
1290 9fa3e853 bellard
    unsigned int h;
1291 9fa3e853 bellard
    TranslationBlock **ptb;
1292 9fa3e853 bellard
1293 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1294 c8a706fe pbrook
       before we are done.  */
1295 c8a706fe pbrook
    mmap_lock();
1296 9fa3e853 bellard
    /* add in the physical hash table */
1297 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1298 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1299 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1300 9fa3e853 bellard
    *ptb = tb;
1301 fd6ce8f6 bellard
1302 fd6ce8f6 bellard
    /* add in the page list */
1303 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 9fa3e853 bellard
    if (phys_page2 != -1)
1305 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1306 9fa3e853 bellard
    else
1307 9fa3e853 bellard
        tb->page_addr[1] = -1;
1308 9fa3e853 bellard
1309 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1311 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1312 d4e8164f bellard
1313 d4e8164f bellard
    /* init original jump addresses */
1314 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1315 d4e8164f bellard
        tb_reset_jump(tb, 0);
1316 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1317 d4e8164f bellard
        tb_reset_jump(tb, 1);
1318 8a40a180 bellard
1319 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1320 8a40a180 bellard
    tb_page_check();
1321 8a40a180 bellard
#endif
1322 c8a706fe pbrook
    mmap_unlock();
1323 fd6ce8f6 bellard
}
1324 fd6ce8f6 bellard
1325 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1327 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328 fd6ce8f6 bellard
{
1329 9fa3e853 bellard
    int m_min, m_max, m;
1330 9fa3e853 bellard
    unsigned long v;
1331 9fa3e853 bellard
    TranslationBlock *tb;
1332 a513fe19 bellard
1333 a513fe19 bellard
    if (nb_tbs <= 0)
1334 a513fe19 bellard
        return NULL;
1335 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1337 a513fe19 bellard
        return NULL;
1338 a513fe19 bellard
    /* binary search (cf Knuth) */
1339 a513fe19 bellard
    m_min = 0;
1340 a513fe19 bellard
    m_max = nb_tbs - 1;
1341 a513fe19 bellard
    while (m_min <= m_max) {
1342 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1343 a513fe19 bellard
        tb = &tbs[m];
1344 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1345 a513fe19 bellard
        if (v == tc_ptr)
1346 a513fe19 bellard
            return tb;
1347 a513fe19 bellard
        else if (tc_ptr < v) {
1348 a513fe19 bellard
            m_max = m - 1;
1349 a513fe19 bellard
        } else {
1350 a513fe19 bellard
            m_min = m + 1;
1351 a513fe19 bellard
        }
1352 5fafdf24 ths
    }
1353 a513fe19 bellard
    return &tbs[m_max];
1354 a513fe19 bellard
}
1355 7501267e bellard
1356 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1357 ea041c0e bellard
1358 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359 ea041c0e bellard
{
1360 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1361 ea041c0e bellard
    unsigned int n1;
1362 ea041c0e bellard
1363 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1364 ea041c0e bellard
    if (tb1 != NULL) {
1365 ea041c0e bellard
        /* find head of list */
1366 ea041c0e bellard
        for(;;) {
1367 ea041c0e bellard
            n1 = (long)tb1 & 3;
1368 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 ea041c0e bellard
            if (n1 == 2)
1370 ea041c0e bellard
                break;
1371 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1372 ea041c0e bellard
        }
1373 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1374 ea041c0e bellard
        tb_next = tb1;
1375 ea041c0e bellard
1376 ea041c0e bellard
        /* remove tb from the jmp_first list */
1377 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1378 ea041c0e bellard
        for(;;) {
1379 ea041c0e bellard
            tb1 = *ptb;
1380 ea041c0e bellard
            n1 = (long)tb1 & 3;
1381 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1383 ea041c0e bellard
                break;
1384 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1385 ea041c0e bellard
        }
1386 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1387 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1388 3b46e624 ths
1389 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1390 ea041c0e bellard
        tb_reset_jump(tb, n);
1391 ea041c0e bellard
1392 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1393 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1394 ea041c0e bellard
    }
1395 ea041c0e bellard
}
1396 ea041c0e bellard
1397 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1398 ea041c0e bellard
{
1399 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1400 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1401 ea041c0e bellard
}
1402 ea041c0e bellard
1403 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1404 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1405 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406 94df27fd Paul Brook
{
1407 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408 94df27fd Paul Brook
}
1409 94df27fd Paul Brook
#else
1410 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411 d720b93d bellard
{
1412 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1413 9b3c35e0 j_mayer
    target_ulong pd;
1414 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1415 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
1416 d720b93d bellard
1417 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1418 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1419 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
1420 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1421 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1422 d720b93d bellard
}
1423 c27004ec bellard
#endif
1424 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1425 d720b93d bellard
1426 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1427 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428 c527ee8f Paul Brook
1429 c527ee8f Paul Brook
{
1430 c527ee8f Paul Brook
}
1431 c527ee8f Paul Brook
1432 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1434 c527ee8f Paul Brook
{
1435 c527ee8f Paul Brook
    return -ENOSYS;
1436 c527ee8f Paul Brook
}
1437 c527ee8f Paul Brook
#else
1438 6658ffb8 pbrook
/* Add a watchpoint.  */
1439 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1441 6658ffb8 pbrook
{
1442 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1443 c0ce998e aliguori
    CPUWatchpoint *wp;
1444 6658ffb8 pbrook
1445 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 b4051334 aliguori
        return -EINVAL;
1450 b4051334 aliguori
    }
1451 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
1452 a1d1bb31 aliguori
1453 a1d1bb31 aliguori
    wp->vaddr = addr;
1454 b4051334 aliguori
    wp->len_mask = len_mask;
1455 a1d1bb31 aliguori
    wp->flags = flags;
1456 a1d1bb31 aliguori
1457 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1458 c0ce998e aliguori
    if (flags & BP_GDB)
1459 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1460 c0ce998e aliguori
    else
1461 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1462 6658ffb8 pbrook
1463 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1464 a1d1bb31 aliguori
1465 a1d1bb31 aliguori
    if (watchpoint)
1466 a1d1bb31 aliguori
        *watchpoint = wp;
1467 a1d1bb31 aliguori
    return 0;
1468 6658ffb8 pbrook
}
1469 6658ffb8 pbrook
1470 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1471 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 a1d1bb31 aliguori
                          int flags)
1473 6658ffb8 pbrook
{
1474 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1475 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1476 6658ffb8 pbrook
1477 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1478 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1479 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1480 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1481 6658ffb8 pbrook
            return 0;
1482 6658ffb8 pbrook
        }
1483 6658ffb8 pbrook
    }
1484 a1d1bb31 aliguori
    return -ENOENT;
1485 6658ffb8 pbrook
}
1486 6658ffb8 pbrook
1487 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1488 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489 a1d1bb31 aliguori
{
1490 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1491 7d03f82f edgar_igl
1492 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1493 a1d1bb31 aliguori
1494 7267c094 Anthony Liguori
    g_free(watchpoint);
1495 a1d1bb31 aliguori
}
1496 a1d1bb31 aliguori
1497 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1498 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499 a1d1bb31 aliguori
{
1500 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1501 a1d1bb31 aliguori
1502 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1503 a1d1bb31 aliguori
        if (wp->flags & mask)
1504 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1505 c0ce998e aliguori
    }
1506 7d03f82f edgar_igl
}
1507 c527ee8f Paul Brook
#endif
1508 7d03f82f edgar_igl
1509 a1d1bb31 aliguori
/* Add a breakpoint.  */
1510 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1512 4c3a88a2 bellard
{
1513 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1514 c0ce998e aliguori
    CPUBreakpoint *bp;
1515 3b46e624 ths
1516 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
1517 4c3a88a2 bellard
1518 a1d1bb31 aliguori
    bp->pc = pc;
1519 a1d1bb31 aliguori
    bp->flags = flags;
1520 a1d1bb31 aliguori
1521 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1522 c0ce998e aliguori
    if (flags & BP_GDB)
1523 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1524 c0ce998e aliguori
    else
1525 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1526 3b46e624 ths
1527 d720b93d bellard
    breakpoint_invalidate(env, pc);
1528 a1d1bb31 aliguori
1529 a1d1bb31 aliguori
    if (breakpoint)
1530 a1d1bb31 aliguori
        *breakpoint = bp;
1531 4c3a88a2 bellard
    return 0;
1532 4c3a88a2 bellard
#else
1533 a1d1bb31 aliguori
    return -ENOSYS;
1534 4c3a88a2 bellard
#endif
1535 4c3a88a2 bellard
}
1536 4c3a88a2 bellard
1537 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1538 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539 a1d1bb31 aliguori
{
1540 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1541 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1542 a1d1bb31 aliguori
1543 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1544 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1545 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1546 a1d1bb31 aliguori
            return 0;
1547 a1d1bb31 aliguori
        }
1548 7d03f82f edgar_igl
    }
1549 a1d1bb31 aliguori
    return -ENOENT;
1550 a1d1bb31 aliguori
#else
1551 a1d1bb31 aliguori
    return -ENOSYS;
1552 7d03f82f edgar_igl
#endif
1553 7d03f82f edgar_igl
}
1554 7d03f82f edgar_igl
1555 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1556 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1557 4c3a88a2 bellard
{
1558 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1559 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1560 d720b93d bellard
1561 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1562 a1d1bb31 aliguori
1563 7267c094 Anthony Liguori
    g_free(breakpoint);
1564 a1d1bb31 aliguori
#endif
1565 a1d1bb31 aliguori
}
1566 a1d1bb31 aliguori
1567 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1568 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569 a1d1bb31 aliguori
{
1570 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1571 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1572 a1d1bb31 aliguori
1573 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1574 a1d1bb31 aliguori
        if (bp->flags & mask)
1575 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1576 c0ce998e aliguori
    }
1577 4c3a88a2 bellard
#endif
1578 4c3a88a2 bellard
}
1579 4c3a88a2 bellard
1580 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 c33a346e bellard
   CPU loop after each instruction */
1582 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1583 c33a346e bellard
{
1584 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1585 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1586 c33a346e bellard
        env->singlestep_enabled = enabled;
1587 e22a25c9 aliguori
        if (kvm_enabled())
1588 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1589 e22a25c9 aliguori
        else {
1590 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1591 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1592 e22a25c9 aliguori
            tb_flush(env);
1593 e22a25c9 aliguori
        }
1594 c33a346e bellard
    }
1595 c33a346e bellard
#endif
1596 c33a346e bellard
}
1597 c33a346e bellard
1598 34865134 bellard
/* enable or disable low levels log */
1599 34865134 bellard
void cpu_set_log(int log_flags)
1600 34865134 bellard
{
1601 34865134 bellard
    loglevel = log_flags;
1602 34865134 bellard
    if (loglevel && !logfile) {
1603 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1604 34865134 bellard
        if (!logfile) {
1605 34865134 bellard
            perror(logfilename);
1606 34865134 bellard
            _exit(1);
1607 34865134 bellard
        }
1608 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1609 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 9fa3e853 bellard
        {
1611 b55266b5 blueswir1
            static char logfile_buf[4096];
1612 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 9fa3e853 bellard
        }
1614 daf767b1 Stefan Weil
#elif defined(_WIN32)
1615 daf767b1 Stefan Weil
        /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 daf767b1 Stefan Weil
        setvbuf(logfile, NULL, _IONBF, 0);
1617 daf767b1 Stefan Weil
#else
1618 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1619 9fa3e853 bellard
#endif
1620 e735b91c pbrook
        log_append = 1;
1621 e735b91c pbrook
    }
1622 e735b91c pbrook
    if (!loglevel && logfile) {
1623 e735b91c pbrook
        fclose(logfile);
1624 e735b91c pbrook
        logfile = NULL;
1625 34865134 bellard
    }
1626 34865134 bellard
}
1627 34865134 bellard
1628 34865134 bellard
void cpu_set_log_filename(const char *filename)
1629 34865134 bellard
{
1630 34865134 bellard
    logfilename = strdup(filename);
1631 e735b91c pbrook
    if (logfile) {
1632 e735b91c pbrook
        fclose(logfile);
1633 e735b91c pbrook
        logfile = NULL;
1634 e735b91c pbrook
    }
1635 e735b91c pbrook
    cpu_set_log(loglevel);
1636 34865134 bellard
}
1637 c33a346e bellard
1638 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1639 ea041c0e bellard
{
1640 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1641 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1642 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1643 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1644 ea041c0e bellard
    TranslationBlock *tb;
1645 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1646 59817ccb bellard
1647 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1648 3098dba0 aurel32
    tb = env->current_tb;
1649 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1650 3098dba0 aurel32
       all the potentially executing TB */
1651 f76cfe56 Riku Voipio
    if (tb) {
1652 3098dba0 aurel32
        env->current_tb = NULL;
1653 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1654 be214e6c aurel32
    }
1655 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1656 3098dba0 aurel32
}
1657 3098dba0 aurel32
1658 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1659 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1660 ec6959d0 Jan Kiszka
static void tcg_handle_interrupt(CPUState *env, int mask)
1661 3098dba0 aurel32
{
1662 3098dba0 aurel32
    int old_mask;
1663 be214e6c aurel32
1664 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1665 68a79315 bellard
    env->interrupt_request |= mask;
1666 3098dba0 aurel32
1667 8edac960 aliguori
    /*
1668 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1669 8edac960 aliguori
     * case its halted.
1670 8edac960 aliguori
     */
1671 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1672 8edac960 aliguori
        qemu_cpu_kick(env);
1673 8edac960 aliguori
        return;
1674 8edac960 aliguori
    }
1675 8edac960 aliguori
1676 2e70f6ef pbrook
    if (use_icount) {
1677 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1678 2e70f6ef pbrook
        if (!can_do_io(env)
1679 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1680 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1681 2e70f6ef pbrook
        }
1682 2e70f6ef pbrook
    } else {
1683 3098dba0 aurel32
        cpu_unlink_tb(env);
1684 ea041c0e bellard
    }
1685 ea041c0e bellard
}
1686 ea041c0e bellard
1687 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688 ec6959d0 Jan Kiszka
1689 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1690 97ffbd8d Jan Kiszka
1691 97ffbd8d Jan Kiszka
void cpu_interrupt(CPUState *env, int mask)
1692 97ffbd8d Jan Kiszka
{
1693 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1694 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1695 97ffbd8d Jan Kiszka
}
1696 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1697 97ffbd8d Jan Kiszka
1698 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1699 b54ad049 bellard
{
1700 b54ad049 bellard
    env->interrupt_request &= ~mask;
1701 b54ad049 bellard
}
1702 b54ad049 bellard
1703 3098dba0 aurel32
void cpu_exit(CPUState *env)
1704 3098dba0 aurel32
{
1705 3098dba0 aurel32
    env->exit_request = 1;
1706 3098dba0 aurel32
    cpu_unlink_tb(env);
1707 3098dba0 aurel32
}
1708 3098dba0 aurel32
1709 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1710 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1711 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1712 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1713 f193c797 bellard
      "show target assembly code for each compiled TB" },
1714 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1715 57fec1fe bellard
      "show micro ops for each compiled TB" },
1716 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1717 e01a1157 blueswir1
      "show micro ops "
1718 e01a1157 blueswir1
#ifdef TARGET_I386
1719 e01a1157 blueswir1
      "before eflags optimization and "
1720 f193c797 bellard
#endif
1721 e01a1157 blueswir1
      "after liveness analysis" },
1722 f193c797 bellard
    { CPU_LOG_INT, "int",
1723 f193c797 bellard
      "show interrupts/exceptions in short format" },
1724 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1725 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1726 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1727 e91c8a77 ths
      "show CPU state before block translation" },
1728 f193c797 bellard
#ifdef TARGET_I386
1729 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1730 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1731 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1732 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1733 f193c797 bellard
#endif
1734 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1735 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1736 fd872598 bellard
      "show all i/o ports accesses" },
1737 8e3a9fd2 bellard
#endif
1738 f193c797 bellard
    { 0, NULL, NULL },
1739 f193c797 bellard
};
1740 f193c797 bellard
1741 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1742 f193c797 bellard
{
1743 f193c797 bellard
    if (strlen(s2) != n)
1744 f193c797 bellard
        return 0;
1745 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1746 f193c797 bellard
}
1747 3b46e624 ths
1748 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1749 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1750 f193c797 bellard
{
1751 c7cd6a37 blueswir1
    const CPULogItem *item;
1752 f193c797 bellard
    int mask;
1753 f193c797 bellard
    const char *p, *p1;
1754 f193c797 bellard
1755 f193c797 bellard
    p = str;
1756 f193c797 bellard
    mask = 0;
1757 f193c797 bellard
    for(;;) {
1758 f193c797 bellard
        p1 = strchr(p, ',');
1759 f193c797 bellard
        if (!p1)
1760 f193c797 bellard
            p1 = p + strlen(p);
1761 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1762 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1763 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1764 9742bf26 Yoshiaki Tamura
            }
1765 9742bf26 Yoshiaki Tamura
        } else {
1766 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1767 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1768 9742bf26 Yoshiaki Tamura
                    goto found;
1769 9742bf26 Yoshiaki Tamura
            }
1770 9742bf26 Yoshiaki Tamura
            return 0;
1771 f193c797 bellard
        }
1772 f193c797 bellard
    found:
1773 f193c797 bellard
        mask |= item->mask;
1774 f193c797 bellard
        if (*p1 != ',')
1775 f193c797 bellard
            break;
1776 f193c797 bellard
        p = p1 + 1;
1777 f193c797 bellard
    }
1778 f193c797 bellard
    return mask;
1779 f193c797 bellard
}
1780 ea041c0e bellard
1781 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1782 7501267e bellard
{
1783 7501267e bellard
    va_list ap;
1784 493ae1f0 pbrook
    va_list ap2;
1785 7501267e bellard
1786 7501267e bellard
    va_start(ap, fmt);
1787 493ae1f0 pbrook
    va_copy(ap2, ap);
1788 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1789 7501267e bellard
    vfprintf(stderr, fmt, ap);
1790 7501267e bellard
    fprintf(stderr, "\n");
1791 7501267e bellard
#ifdef TARGET_I386
1792 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793 7fe48483 bellard
#else
1794 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1795 7501267e bellard
#endif
1796 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1797 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1798 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1799 93fcfe39 aliguori
        qemu_log("\n");
1800 f9373291 j_mayer
#ifdef TARGET_I386
1801 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1802 f9373291 j_mayer
#else
1803 93fcfe39 aliguori
        log_cpu_state(env, 0);
1804 f9373291 j_mayer
#endif
1805 31b1a7b4 aliguori
        qemu_log_flush();
1806 93fcfe39 aliguori
        qemu_log_close();
1807 924edcae balrog
    }
1808 493ae1f0 pbrook
    va_end(ap2);
1809 f9373291 j_mayer
    va_end(ap);
1810 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1811 fd052bf6 Riku Voipio
    {
1812 fd052bf6 Riku Voipio
        struct sigaction act;
1813 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1814 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1815 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1816 fd052bf6 Riku Voipio
    }
1817 fd052bf6 Riku Voipio
#endif
1818 7501267e bellard
    abort();
1819 7501267e bellard
}
1820 7501267e bellard
1821 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1822 c5be9f08 ths
{
1823 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1824 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1825 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1826 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1827 5a38f081 aliguori
    CPUBreakpoint *bp;
1828 5a38f081 aliguori
    CPUWatchpoint *wp;
1829 5a38f081 aliguori
#endif
1830 5a38f081 aliguori
1831 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1832 5a38f081 aliguori
1833 5a38f081 aliguori
    /* Preserve chaining and index. */
1834 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1835 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1836 5a38f081 aliguori
1837 5a38f081 aliguori
    /* Clone all break/watchpoints.
1838 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1839 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1840 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1841 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1842 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1843 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1844 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 5a38f081 aliguori
    }
1846 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1847 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 5a38f081 aliguori
                              wp->flags, NULL);
1849 5a38f081 aliguori
    }
1850 5a38f081 aliguori
#endif
1851 5a38f081 aliguori
1852 c5be9f08 ths
    return new_env;
1853 c5be9f08 ths
}
1854 c5be9f08 ths
1855 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1856 0124311e bellard
1857 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858 5c751e99 edgar_igl
{
1859 5c751e99 edgar_igl
    unsigned int i;
1860 5c751e99 edgar_igl
1861 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1862 5c751e99 edgar_igl
       overlap the flushed page.  */
1863 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1865 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1866 5c751e99 edgar_igl
1867 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1868 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1869 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1870 5c751e99 edgar_igl
}
1871 5c751e99 edgar_igl
1872 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1873 08738984 Igor Kovalenko
    .addr_read  = -1,
1874 08738984 Igor Kovalenko
    .addr_write = -1,
1875 08738984 Igor Kovalenko
    .addr_code  = -1,
1876 08738984 Igor Kovalenko
    .addend     = -1,
1877 08738984 Igor Kovalenko
};
1878 08738984 Igor Kovalenko
1879 771124e1 Peter Maydell
/* NOTE:
1880 771124e1 Peter Maydell
 * If flush_global is true (the usual case), flush all tlb entries.
1881 771124e1 Peter Maydell
 * If flush_global is false, flush (at least) all tlb entries not
1882 771124e1 Peter Maydell
 * marked global.
1883 771124e1 Peter Maydell
 *
1884 771124e1 Peter Maydell
 * Since QEMU doesn't currently implement a global/not-global flag
1885 771124e1 Peter Maydell
 * for tlb entries, at the moment tlb_flush() will also flush all
1886 771124e1 Peter Maydell
 * tlb entries in the flush_global == false case. This is OK because
1887 771124e1 Peter Maydell
 * CPU architectures generally permit an implementation to drop
1888 771124e1 Peter Maydell
 * entries from the TLB at any time, so flushing more entries than
1889 771124e1 Peter Maydell
 * required is only an efficiency issue, not a correctness issue.
1890 771124e1 Peter Maydell
 */
1891 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1892 33417e70 bellard
{
1893 33417e70 bellard
    int i;
1894 0124311e bellard
1895 9fa3e853 bellard
#if defined(DEBUG_TLB)
1896 9fa3e853 bellard
    printf("tlb_flush:\n");
1897 9fa3e853 bellard
#endif
1898 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1899 0124311e bellard
       links while we are modifying them */
1900 0124311e bellard
    env->current_tb = NULL;
1901 0124311e bellard
1902 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1903 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1904 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1905 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1906 cfde4bd9 Isaku Yamahata
        }
1907 33417e70 bellard
    }
1908 9fa3e853 bellard
1909 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1910 9fa3e853 bellard
1911 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1912 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1913 e3db7226 bellard
    tlb_flush_count++;
1914 33417e70 bellard
}
1915 33417e70 bellard
1916 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1917 61382a50 bellard
{
1918 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1919 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1920 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1921 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1922 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1923 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1924 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1925 84b7b8e7 bellard
    }
1926 61382a50 bellard
}
1927 61382a50 bellard
1928 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1929 33417e70 bellard
{
1930 8a40a180 bellard
    int i;
1931 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1932 0124311e bellard
1933 9fa3e853 bellard
#if defined(DEBUG_TLB)
1934 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1935 9fa3e853 bellard
#endif
1936 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1937 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1938 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1939 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1940 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1941 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1942 d4c430a8 Paul Brook
#endif
1943 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1944 d4c430a8 Paul Brook
        return;
1945 d4c430a8 Paul Brook
    }
1946 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1947 0124311e bellard
       links while we are modifying them */
1948 0124311e bellard
    env->current_tb = NULL;
1949 61382a50 bellard
1950 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1951 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1952 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1953 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1954 0124311e bellard
1955 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1956 9fa3e853 bellard
}
1957 9fa3e853 bellard
1958 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1959 9fa3e853 bellard
   can be detected */
1960 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1961 9fa3e853 bellard
{
1962 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1963 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1964 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1965 9fa3e853 bellard
}
1966 9fa3e853 bellard
1967 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1968 3a7d929e bellard
   tested for self modifying code */
1969 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1970 3a7d929e bellard
                                    target_ulong vaddr)
1971 9fa3e853 bellard
{
1972 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1973 1ccde1cb bellard
}
1974 1ccde1cb bellard
1975 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1976 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1977 1ccde1cb bellard
{
1978 1ccde1cb bellard
    unsigned long addr;
1979 0e0df1e2 Avi Kivity
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1980 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1981 1ccde1cb bellard
        if ((addr - start) < length) {
1982 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1983 1ccde1cb bellard
        }
1984 1ccde1cb bellard
    }
1985 1ccde1cb bellard
}
1986 1ccde1cb bellard
1987 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1988 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1989 0a962c02 bellard
                                     int dirty_flags)
1990 1ccde1cb bellard
{
1991 1ccde1cb bellard
    CPUState *env;
1992 4f2ac237 bellard
    unsigned long length, start1;
1993 f7c11b53 Yoshiaki Tamura
    int i;
1994 1ccde1cb bellard
1995 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1996 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1997 1ccde1cb bellard
1998 1ccde1cb bellard
    length = end - start;
1999 1ccde1cb bellard
    if (length == 0)
2000 1ccde1cb bellard
        return;
2001 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2002 f23db169 bellard
2003 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2004 1ccde1cb bellard
       when accessing the range */
2005 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2006 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
2007 5579c7f3 pbrook
       address comparisons below.  */
2008 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2009 5579c7f3 pbrook
            != (end - 1) - start) {
2010 5579c7f3 pbrook
        abort();
2011 5579c7f3 pbrook
    }
2012 5579c7f3 pbrook
2013 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2014 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2015 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2016 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2017 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2018 cfde4bd9 Isaku Yamahata
                                      start1, length);
2019 cfde4bd9 Isaku Yamahata
        }
2020 6a00d601 bellard
    }
2021 1ccde1cb bellard
}
2022 1ccde1cb bellard
2023 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2024 74576198 aliguori
{
2025 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2026 74576198 aliguori
    in_migration = enable;
2027 f6f3fbca Michael S. Tsirkin
    return ret;
2028 74576198 aliguori
}
2029 74576198 aliguori
2030 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2031 3a7d929e bellard
{
2032 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2033 5579c7f3 pbrook
    void *p;
2034 3a7d929e bellard
2035 0e0df1e2 Avi Kivity
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2036 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2037 5579c7f3 pbrook
            + tlb_entry->addend);
2038 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2039 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2040 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2041 3a7d929e bellard
        }
2042 3a7d929e bellard
    }
2043 3a7d929e bellard
}
2044 3a7d929e bellard
2045 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2046 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2047 3a7d929e bellard
{
2048 3a7d929e bellard
    int i;
2049 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2050 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2052 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2053 cfde4bd9 Isaku Yamahata
    }
2054 3a7d929e bellard
}
2055 3a7d929e bellard
2056 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2057 1ccde1cb bellard
{
2058 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2059 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2060 1ccde1cb bellard
}
2061 1ccde1cb bellard
2062 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2063 0f459d16 pbrook
   so that it is no longer dirty */
2064 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2065 1ccde1cb bellard
{
2066 1ccde1cb bellard
    int i;
2067 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2068 1ccde1cb bellard
2069 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2070 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2071 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2072 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2073 9fa3e853 bellard
}
2074 9fa3e853 bellard
2075 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2076 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2077 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2078 d4c430a8 Paul Brook
                               target_ulong size)
2079 d4c430a8 Paul Brook
{
2080 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2081 d4c430a8 Paul Brook
2082 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2083 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2084 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2085 d4c430a8 Paul Brook
        return;
2086 d4c430a8 Paul Brook
    }
2087 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2088 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2089 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2090 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2091 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2092 d4c430a8 Paul Brook
        mask <<= 1;
2093 d4c430a8 Paul Brook
    }
2094 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2095 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2096 d4c430a8 Paul Brook
}
2097 d4c430a8 Paul Brook
2098 1d393fa2 Avi Kivity
static bool is_ram_rom(ram_addr_t pd)
2099 1d393fa2 Avi Kivity
{
2100 1d393fa2 Avi Kivity
    pd &= ~TARGET_PAGE_MASK;
2101 0e0df1e2 Avi Kivity
    return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2102 1d393fa2 Avi Kivity
}
2103 1d393fa2 Avi Kivity
2104 75c578dc Avi Kivity
static bool is_romd(ram_addr_t pd)
2105 75c578dc Avi Kivity
{
2106 75c578dc Avi Kivity
    MemoryRegion *mr;
2107 75c578dc Avi Kivity
2108 75c578dc Avi Kivity
    pd &= ~TARGET_PAGE_MASK;
2109 11c7ef0c Avi Kivity
    mr = io_mem_region[pd];
2110 75c578dc Avi Kivity
    return mr->rom_device && mr->readable;
2111 75c578dc Avi Kivity
}
2112 75c578dc Avi Kivity
2113 1d393fa2 Avi Kivity
static bool is_ram_rom_romd(ram_addr_t pd)
2114 1d393fa2 Avi Kivity
{
2115 75c578dc Avi Kivity
    return is_ram_rom(pd) || is_romd(pd);
2116 1d393fa2 Avi Kivity
}
2117 1d393fa2 Avi Kivity
2118 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2119 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2120 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2121 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2122 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2123 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2124 9fa3e853 bellard
{
2125 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
2126 4f2ac237 bellard
    unsigned long pd;
2127 9fa3e853 bellard
    unsigned int index;
2128 4f2ac237 bellard
    target_ulong address;
2129 0f459d16 pbrook
    target_ulong code_address;
2130 355b1943 Paul Brook
    unsigned long addend;
2131 84b7b8e7 bellard
    CPUTLBEntry *te;
2132 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2133 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2134 9fa3e853 bellard
2135 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2136 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2137 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2138 d4c430a8 Paul Brook
    }
2139 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2140 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
2141 9fa3e853 bellard
#if defined(DEBUG_TLB)
2142 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2143 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2144 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2145 9fa3e853 bellard
#endif
2146 9fa3e853 bellard
2147 0f459d16 pbrook
    address = vaddr;
2148 1d393fa2 Avi Kivity
    if (!is_ram_rom_romd(pd)) {
2149 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2150 0f459d16 pbrook
        address |= TLB_MMIO;
2151 0f459d16 pbrook
    }
2152 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2153 1d393fa2 Avi Kivity
    if (is_ram_rom(pd)) {
2154 0f459d16 pbrook
        /* Normal RAM.  */
2155 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2156 0e0df1e2 Avi Kivity
        if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2157 0e0df1e2 Avi Kivity
            iotlb |= io_mem_notdirty.ram_addr;
2158 0f459d16 pbrook
        else
2159 0e0df1e2 Avi Kivity
            iotlb |= io_mem_rom.ram_addr;
2160 0f459d16 pbrook
    } else {
2161 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2162 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2163 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2164 0f459d16 pbrook
           and avoid full address decoding in every device.
2165 0f459d16 pbrook
           We can't use the high bits of pd for this because
2166 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2167 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2168 f1f6e3b8 Avi Kivity
        iotlb += p.region_offset;
2169 0f459d16 pbrook
    }
2170 0f459d16 pbrook
2171 0f459d16 pbrook
    code_address = address;
2172 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2173 0f459d16 pbrook
       watchpoint trap routines.  */
2174 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2175 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2176 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2177 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2178 1ec9b909 Avi Kivity
                iotlb = io_mem_watch.ram_addr + paddr;
2179 bf298f83 Jun Koi
                address |= TLB_MMIO;
2180 bf298f83 Jun Koi
                break;
2181 bf298f83 Jun Koi
            }
2182 6658ffb8 pbrook
        }
2183 0f459d16 pbrook
    }
2184 d79acba4 balrog
2185 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2186 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2187 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2188 0f459d16 pbrook
    te->addend = addend - vaddr;
2189 0f459d16 pbrook
    if (prot & PAGE_READ) {
2190 0f459d16 pbrook
        te->addr_read = address;
2191 0f459d16 pbrook
    } else {
2192 0f459d16 pbrook
        te->addr_read = -1;
2193 0f459d16 pbrook
    }
2194 5c751e99 edgar_igl
2195 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2196 0f459d16 pbrook
        te->addr_code = code_address;
2197 0f459d16 pbrook
    } else {
2198 0f459d16 pbrook
        te->addr_code = -1;
2199 0f459d16 pbrook
    }
2200 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2201 75c578dc Avi Kivity
        if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2202 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2203 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2204 0e0df1e2 Avi Kivity
        } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2205 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2206 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2207 9fa3e853 bellard
        } else {
2208 0f459d16 pbrook
            te->addr_write = address;
2209 9fa3e853 bellard
        }
2210 0f459d16 pbrook
    } else {
2211 0f459d16 pbrook
        te->addr_write = -1;
2212 9fa3e853 bellard
    }
2213 9fa3e853 bellard
}
2214 9fa3e853 bellard
2215 0124311e bellard
#else
2216 0124311e bellard
2217 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2218 0124311e bellard
{
2219 0124311e bellard
}
2220 0124311e bellard
2221 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2222 0124311e bellard
{
2223 0124311e bellard
}
2224 0124311e bellard
2225 edf8e2af Mika Westerberg
/*
2226 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2227 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2228 edf8e2af Mika Westerberg
 */
2229 5cd2c5b6 Richard Henderson
2230 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2231 5cd2c5b6 Richard Henderson
{
2232 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2233 5cd2c5b6 Richard Henderson
    void *priv;
2234 5cd2c5b6 Richard Henderson
    unsigned long start;
2235 5cd2c5b6 Richard Henderson
    int prot;
2236 5cd2c5b6 Richard Henderson
};
2237 5cd2c5b6 Richard Henderson
2238 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2239 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2240 5cd2c5b6 Richard Henderson
{
2241 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2242 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2243 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2244 5cd2c5b6 Richard Henderson
            return rc;
2245 5cd2c5b6 Richard Henderson
        }
2246 5cd2c5b6 Richard Henderson
    }
2247 5cd2c5b6 Richard Henderson
2248 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2249 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2250 5cd2c5b6 Richard Henderson
2251 5cd2c5b6 Richard Henderson
    return 0;
2252 5cd2c5b6 Richard Henderson
}
2253 5cd2c5b6 Richard Henderson
2254 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2255 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2256 5cd2c5b6 Richard Henderson
{
2257 b480d9b7 Paul Brook
    abi_ulong pa;
2258 5cd2c5b6 Richard Henderson
    int i, rc;
2259 5cd2c5b6 Richard Henderson
2260 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2261 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2262 5cd2c5b6 Richard Henderson
    }
2263 5cd2c5b6 Richard Henderson
2264 5cd2c5b6 Richard Henderson
    if (level == 0) {
2265 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2266 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2267 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2268 5cd2c5b6 Richard Henderson
2269 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2270 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2271 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2272 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2273 5cd2c5b6 Richard Henderson
                    return rc;
2274 9fa3e853 bellard
                }
2275 9fa3e853 bellard
            }
2276 5cd2c5b6 Richard Henderson
        }
2277 5cd2c5b6 Richard Henderson
    } else {
2278 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2279 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2280 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2281 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2282 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2283 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2284 5cd2c5b6 Richard Henderson
                return rc;
2285 5cd2c5b6 Richard Henderson
            }
2286 5cd2c5b6 Richard Henderson
        }
2287 5cd2c5b6 Richard Henderson
    }
2288 5cd2c5b6 Richard Henderson
2289 5cd2c5b6 Richard Henderson
    return 0;
2290 5cd2c5b6 Richard Henderson
}
2291 5cd2c5b6 Richard Henderson
2292 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2293 5cd2c5b6 Richard Henderson
{
2294 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2295 5cd2c5b6 Richard Henderson
    unsigned long i;
2296 5cd2c5b6 Richard Henderson
2297 5cd2c5b6 Richard Henderson
    data.fn = fn;
2298 5cd2c5b6 Richard Henderson
    data.priv = priv;
2299 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2300 5cd2c5b6 Richard Henderson
    data.prot = 0;
2301 5cd2c5b6 Richard Henderson
2302 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2303 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2304 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2305 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2306 5cd2c5b6 Richard Henderson
            return rc;
2307 9fa3e853 bellard
        }
2308 33417e70 bellard
    }
2309 5cd2c5b6 Richard Henderson
2310 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2311 edf8e2af Mika Westerberg
}
2312 edf8e2af Mika Westerberg
2313 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2314 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2315 edf8e2af Mika Westerberg
{
2316 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2317 edf8e2af Mika Westerberg
2318 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2319 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2320 edf8e2af Mika Westerberg
        start, end, end - start,
2321 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2322 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2323 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2324 edf8e2af Mika Westerberg
2325 edf8e2af Mika Westerberg
    return (0);
2326 edf8e2af Mika Westerberg
}
2327 edf8e2af Mika Westerberg
2328 edf8e2af Mika Westerberg
/* dump memory mappings */
2329 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2330 edf8e2af Mika Westerberg
{
2331 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2332 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2333 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2334 33417e70 bellard
}
2335 33417e70 bellard
2336 53a5960a pbrook
int page_get_flags(target_ulong address)
2337 33417e70 bellard
{
2338 9fa3e853 bellard
    PageDesc *p;
2339 9fa3e853 bellard
2340 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2341 33417e70 bellard
    if (!p)
2342 9fa3e853 bellard
        return 0;
2343 9fa3e853 bellard
    return p->flags;
2344 9fa3e853 bellard
}
2345 9fa3e853 bellard
2346 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2347 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2348 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2349 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2350 9fa3e853 bellard
{
2351 376a7909 Richard Henderson
    target_ulong addr, len;
2352 376a7909 Richard Henderson
2353 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2354 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2355 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2356 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2357 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2358 376a7909 Richard Henderson
#endif
2359 376a7909 Richard Henderson
    assert(start < end);
2360 9fa3e853 bellard
2361 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2362 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2363 376a7909 Richard Henderson
2364 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2365 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2366 376a7909 Richard Henderson
    }
2367 376a7909 Richard Henderson
2368 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2369 376a7909 Richard Henderson
         len != 0;
2370 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2371 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2372 376a7909 Richard Henderson
2373 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2374 376a7909 Richard Henderson
           the code inside.  */
2375 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2376 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2377 9fa3e853 bellard
            p->first_tb) {
2378 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2379 9fa3e853 bellard
        }
2380 9fa3e853 bellard
        p->flags = flags;
2381 9fa3e853 bellard
    }
2382 33417e70 bellard
}
2383 33417e70 bellard
2384 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2385 3d97b40b ths
{
2386 3d97b40b ths
    PageDesc *p;
2387 3d97b40b ths
    target_ulong end;
2388 3d97b40b ths
    target_ulong addr;
2389 3d97b40b ths
2390 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2391 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2392 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2393 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2395 376a7909 Richard Henderson
#endif
2396 376a7909 Richard Henderson
2397 3e0650a9 Richard Henderson
    if (len == 0) {
2398 3e0650a9 Richard Henderson
        return 0;
2399 3e0650a9 Richard Henderson
    }
2400 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2401 376a7909 Richard Henderson
        /* We've wrapped around.  */
2402 55f280c9 balrog
        return -1;
2403 376a7909 Richard Henderson
    }
2404 55f280c9 balrog
2405 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2406 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2407 3d97b40b ths
2408 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2409 376a7909 Richard Henderson
         len != 0;
2410 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2412 3d97b40b ths
        if( !p )
2413 3d97b40b ths
            return -1;
2414 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2415 3d97b40b ths
            return -1;
2416 3d97b40b ths
2417 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2418 3d97b40b ths
            return -1;
2419 dae3270c bellard
        if (flags & PAGE_WRITE) {
2420 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2421 dae3270c bellard
                return -1;
2422 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2423 dae3270c bellard
               contains translated code */
2424 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2425 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2426 dae3270c bellard
                    return -1;
2427 dae3270c bellard
            }
2428 dae3270c bellard
            return 0;
2429 dae3270c bellard
        }
2430 3d97b40b ths
    }
2431 3d97b40b ths
    return 0;
2432 3d97b40b ths
}
2433 3d97b40b ths
2434 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2435 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2436 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2437 9fa3e853 bellard
{
2438 45d679d6 Aurelien Jarno
    unsigned int prot;
2439 45d679d6 Aurelien Jarno
    PageDesc *p;
2440 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2441 9fa3e853 bellard
2442 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2443 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2444 c8a706fe pbrook
       practice it seems to be ok.  */
2445 c8a706fe pbrook
    mmap_lock();
2446 c8a706fe pbrook
2447 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2448 45d679d6 Aurelien Jarno
    if (!p) {
2449 c8a706fe pbrook
        mmap_unlock();
2450 9fa3e853 bellard
        return 0;
2451 c8a706fe pbrook
    }
2452 45d679d6 Aurelien Jarno
2453 9fa3e853 bellard
    /* if the page was really writable, then we change its
2454 9fa3e853 bellard
       protection back to writable */
2455 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2456 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2457 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2458 45d679d6 Aurelien Jarno
2459 45d679d6 Aurelien Jarno
        prot = 0;
2460 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2461 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2462 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2463 45d679d6 Aurelien Jarno
            prot |= p->flags;
2464 45d679d6 Aurelien Jarno
2465 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2466 9fa3e853 bellard
               the corresponding translated code. */
2467 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2468 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2469 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2470 9fa3e853 bellard
#endif
2471 9fa3e853 bellard
        }
2472 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2473 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2474 45d679d6 Aurelien Jarno
2475 45d679d6 Aurelien Jarno
        mmap_unlock();
2476 45d679d6 Aurelien Jarno
        return 1;
2477 9fa3e853 bellard
    }
2478 c8a706fe pbrook
    mmap_unlock();
2479 9fa3e853 bellard
    return 0;
2480 9fa3e853 bellard
}
2481 9fa3e853 bellard
2482 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2483 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2484 1ccde1cb bellard
{
2485 1ccde1cb bellard
}
2486 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2487 9fa3e853 bellard
2488 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2489 8da3ff18 pbrook
2490 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2491 c04b2b78 Paul Brook
typedef struct subpage_t {
2492 70c68e44 Avi Kivity
    MemoryRegion iomem;
2493 c04b2b78 Paul Brook
    target_phys_addr_t base;
2494 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2495 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2496 c04b2b78 Paul Brook
} subpage_t;
2497 c04b2b78 Paul Brook
2498 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2499 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2500 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2501 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2502 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2503 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2504 db7b5426 blueswir1
                      need_subpage)                                     \
2505 db7b5426 blueswir1
    do {                                                                \
2506 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2507 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2508 db7b5426 blueswir1
        else {                                                          \
2509 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2510 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2511 db7b5426 blueswir1
                need_subpage = 1;                                       \
2512 db7b5426 blueswir1
        }                                                               \
2513 db7b5426 blueswir1
                                                                        \
2514 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2515 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2516 db7b5426 blueswir1
        else {                                                          \
2517 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2518 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2519 db7b5426 blueswir1
                need_subpage = 1;                                       \
2520 db7b5426 blueswir1
        }                                                               \
2521 db7b5426 blueswir1
    } while (0)
2522 db7b5426 blueswir1
2523 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2524 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2525 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2526 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2527 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2528 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2529 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2530 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2531 dd81124b Avi Kivity
void cpu_register_physical_memory_log(MemoryRegionSection *section,
2532 dd81124b Avi Kivity
                                      bool readable, bool readonly)
2533 33417e70 bellard
{
2534 dd81124b Avi Kivity
    target_phys_addr_t start_addr = section->offset_within_address_space;
2535 dd81124b Avi Kivity
    ram_addr_t size = section->size;
2536 dd81124b Avi Kivity
    ram_addr_t phys_offset = section->mr->ram_addr;
2537 dd81124b Avi Kivity
    ram_addr_t region_offset = section->offset_within_region;
2538 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2539 92e873b9 bellard
    PhysPageDesc *p;
2540 9d42037b bellard
    CPUState *env;
2541 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2542 f6405247 Richard Henderson
    subpage_t *subpage;
2543 33417e70 bellard
2544 dd81124b Avi Kivity
    if (memory_region_is_ram(section->mr)) {
2545 dd81124b Avi Kivity
        phys_offset += region_offset;
2546 dd81124b Avi Kivity
        region_offset = 0;
2547 dd81124b Avi Kivity
    }
2548 dd81124b Avi Kivity
2549 dd81124b Avi Kivity
    if (readonly) {
2550 dd81124b Avi Kivity
        phys_offset |= io_mem_rom.ram_addr;
2551 dd81124b Avi Kivity
    }
2552 dd81124b Avi Kivity
2553 3b8e6a2d Edgar E. Iglesias
    assert(size);
2554 f6f3fbca Michael S. Tsirkin
2555 0e0df1e2 Avi Kivity
    if (phys_offset == io_mem_unassigned.ram_addr) {
2556 67c4d23c pbrook
        region_offset = start_addr;
2557 67c4d23c pbrook
    }
2558 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2559 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2560 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2561 3b8e6a2d Edgar E. Iglesias
2562 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2563 3b8e6a2d Edgar E. Iglesias
    do {
2564 f1f6e3b8 Avi Kivity
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2565 0e0df1e2 Avi Kivity
        if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2566 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2567 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2568 db7b5426 blueswir1
            int need_subpage = 0;
2569 11c7ef0c Avi Kivity
            MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
2570 db7b5426 blueswir1
2571 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2572 db7b5426 blueswir1
                          need_subpage);
2573 f6405247 Richard Henderson
            if (need_subpage) {
2574 b3b00c78 Avi Kivity
                if (!(mr->subpage)) {
2575 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2576 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2577 8da3ff18 pbrook
                                           p->region_offset);
2578 db7b5426 blueswir1
                } else {
2579 a621f38d Avi Kivity
                    subpage = container_of(mr, subpage_t, iomem);
2580 db7b5426 blueswir1
                }
2581 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2582 8da3ff18 pbrook
                                 region_offset);
2583 8da3ff18 pbrook
                p->region_offset = 0;
2584 db7b5426 blueswir1
            } else {
2585 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2586 2774c6d0 Avi Kivity
                p->region_offset = region_offset;
2587 1d393fa2 Avi Kivity
                if (is_ram_rom_romd(phys_offset))
2588 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2589 db7b5426 blueswir1
            }
2590 db7b5426 blueswir1
        } else {
2591 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2592 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2593 8da3ff18 pbrook
            p->region_offset = region_offset;
2594 1d393fa2 Avi Kivity
            if (is_ram_rom_romd(phys_offset)) {
2595 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2596 0e8f0967 pbrook
            } else {
2597 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2598 db7b5426 blueswir1
                int need_subpage = 0;
2599 db7b5426 blueswir1
2600 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2601 db7b5426 blueswir1
                              end_addr2, need_subpage);
2602 db7b5426 blueswir1
2603 f6405247 Richard Henderson
                if (need_subpage) {
2604 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2605 0e0df1e2 Avi Kivity
                                           &p->phys_offset,
2606 0e0df1e2 Avi Kivity
                                           io_mem_unassigned.ram_addr,
2607 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2608 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2609 8da3ff18 pbrook
                                     phys_offset, region_offset);
2610 8da3ff18 pbrook
                    p->region_offset = 0;
2611 db7b5426 blueswir1
                }
2612 db7b5426 blueswir1
            }
2613 db7b5426 blueswir1
        }
2614 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2615 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2616 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2617 3b46e624 ths
2618 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2619 9d42037b bellard
       reset the modified entries */
2620 9d42037b bellard
    /* XXX: slow ! */
2621 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2622 9d42037b bellard
        tlb_flush(env, 1);
2623 9d42037b bellard
    }
2624 33417e70 bellard
}
2625 33417e70 bellard
2626 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2627 f65ed4c1 aliguori
{
2628 f65ed4c1 aliguori
    if (kvm_enabled())
2629 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2630 f65ed4c1 aliguori
}
2631 f65ed4c1 aliguori
2632 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2633 f65ed4c1 aliguori
{
2634 f65ed4c1 aliguori
    if (kvm_enabled())
2635 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2636 f65ed4c1 aliguori
}
2637 f65ed4c1 aliguori
2638 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2639 62a2744c Sheng Yang
{
2640 62a2744c Sheng Yang
    if (kvm_enabled())
2641 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2642 62a2744c Sheng Yang
}
2643 62a2744c Sheng Yang
2644 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2645 c902760f Marcelo Tosatti
2646 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2647 c902760f Marcelo Tosatti
2648 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2649 c902760f Marcelo Tosatti
2650 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2651 c902760f Marcelo Tosatti
{
2652 c902760f Marcelo Tosatti
    struct statfs fs;
2653 c902760f Marcelo Tosatti
    int ret;
2654 c902760f Marcelo Tosatti
2655 c902760f Marcelo Tosatti
    do {
2656 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2657 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2658 c902760f Marcelo Tosatti
2659 c902760f Marcelo Tosatti
    if (ret != 0) {
2660 9742bf26 Yoshiaki Tamura
        perror(path);
2661 9742bf26 Yoshiaki Tamura
        return 0;
2662 c902760f Marcelo Tosatti
    }
2663 c902760f Marcelo Tosatti
2664 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2665 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2666 c902760f Marcelo Tosatti
2667 c902760f Marcelo Tosatti
    return fs.f_bsize;
2668 c902760f Marcelo Tosatti
}
2669 c902760f Marcelo Tosatti
2670 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2671 04b16653 Alex Williamson
                            ram_addr_t memory,
2672 04b16653 Alex Williamson
                            const char *path)
2673 c902760f Marcelo Tosatti
{
2674 c902760f Marcelo Tosatti
    char *filename;
2675 c902760f Marcelo Tosatti
    void *area;
2676 c902760f Marcelo Tosatti
    int fd;
2677 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2678 c902760f Marcelo Tosatti
    int flags;
2679 c902760f Marcelo Tosatti
#endif
2680 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2681 c902760f Marcelo Tosatti
2682 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2683 c902760f Marcelo Tosatti
    if (!hpagesize) {
2684 9742bf26 Yoshiaki Tamura
        return NULL;
2685 c902760f Marcelo Tosatti
    }
2686 c902760f Marcelo Tosatti
2687 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2688 c902760f Marcelo Tosatti
        return NULL;
2689 c902760f Marcelo Tosatti
    }
2690 c902760f Marcelo Tosatti
2691 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2692 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2693 c902760f Marcelo Tosatti
        return NULL;
2694 c902760f Marcelo Tosatti
    }
2695 c902760f Marcelo Tosatti
2696 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2697 9742bf26 Yoshiaki Tamura
        return NULL;
2698 c902760f Marcelo Tosatti
    }
2699 c902760f Marcelo Tosatti
2700 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2701 c902760f Marcelo Tosatti
    if (fd < 0) {
2702 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2703 9742bf26 Yoshiaki Tamura
        free(filename);
2704 9742bf26 Yoshiaki Tamura
        return NULL;
2705 c902760f Marcelo Tosatti
    }
2706 c902760f Marcelo Tosatti
    unlink(filename);
2707 c902760f Marcelo Tosatti
    free(filename);
2708 c902760f Marcelo Tosatti
2709 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2710 c902760f Marcelo Tosatti
2711 c902760f Marcelo Tosatti
    /*
2712 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2713 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2714 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2715 c902760f Marcelo Tosatti
     * mmap will fail.
2716 c902760f Marcelo Tosatti
     */
2717 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2718 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2719 c902760f Marcelo Tosatti
2720 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2721 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2722 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2723 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2724 c902760f Marcelo Tosatti
     */
2725 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2726 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2727 c902760f Marcelo Tosatti
#else
2728 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2729 c902760f Marcelo Tosatti
#endif
2730 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2731 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2732 9742bf26 Yoshiaki Tamura
        close(fd);
2733 9742bf26 Yoshiaki Tamura
        return (NULL);
2734 c902760f Marcelo Tosatti
    }
2735 04b16653 Alex Williamson
    block->fd = fd;
2736 c902760f Marcelo Tosatti
    return area;
2737 c902760f Marcelo Tosatti
}
2738 c902760f Marcelo Tosatti
#endif
2739 c902760f Marcelo Tosatti
2740 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2741 d17b5288 Alex Williamson
{
2742 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2743 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2744 04b16653 Alex Williamson
2745 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2746 04b16653 Alex Williamson
        return 0;
2747 04b16653 Alex Williamson
2748 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2749 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
2750 04b16653 Alex Williamson
2751 04b16653 Alex Williamson
        end = block->offset + block->length;
2752 04b16653 Alex Williamson
2753 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2754 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2755 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2756 04b16653 Alex Williamson
            }
2757 04b16653 Alex Williamson
        }
2758 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2759 3e837b2c Alex Williamson
            offset = end;
2760 04b16653 Alex Williamson
            mingap = next - end;
2761 04b16653 Alex Williamson
        }
2762 04b16653 Alex Williamson
    }
2763 3e837b2c Alex Williamson
2764 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
2765 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2766 3e837b2c Alex Williamson
                (uint64_t)size);
2767 3e837b2c Alex Williamson
        abort();
2768 3e837b2c Alex Williamson
    }
2769 3e837b2c Alex Williamson
2770 04b16653 Alex Williamson
    return offset;
2771 04b16653 Alex Williamson
}
2772 04b16653 Alex Williamson
2773 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2774 04b16653 Alex Williamson
{
2775 d17b5288 Alex Williamson
    RAMBlock *block;
2776 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2777 d17b5288 Alex Williamson
2778 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2779 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2780 d17b5288 Alex Williamson
2781 d17b5288 Alex Williamson
    return last;
2782 d17b5288 Alex Williamson
}
2783 d17b5288 Alex Williamson
2784 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2785 84b89d78 Cam Macdonell
{
2786 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2787 84b89d78 Cam Macdonell
2788 c5705a77 Avi Kivity
    new_block = NULL;
2789 c5705a77 Avi Kivity
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2790 c5705a77 Avi Kivity
        if (block->offset == addr) {
2791 c5705a77 Avi Kivity
            new_block = block;
2792 c5705a77 Avi Kivity
            break;
2793 c5705a77 Avi Kivity
        }
2794 c5705a77 Avi Kivity
    }
2795 c5705a77 Avi Kivity
    assert(new_block);
2796 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
2797 84b89d78 Cam Macdonell
2798 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2799 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2800 84b89d78 Cam Macdonell
        if (id) {
2801 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2802 7267c094 Anthony Liguori
            g_free(id);
2803 84b89d78 Cam Macdonell
        }
2804 84b89d78 Cam Macdonell
    }
2805 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2806 84b89d78 Cam Macdonell
2807 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2808 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2809 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2810 84b89d78 Cam Macdonell
                    new_block->idstr);
2811 84b89d78 Cam Macdonell
            abort();
2812 84b89d78 Cam Macdonell
        }
2813 84b89d78 Cam Macdonell
    }
2814 c5705a77 Avi Kivity
}
2815 c5705a77 Avi Kivity
2816 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2817 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
2818 c5705a77 Avi Kivity
{
2819 c5705a77 Avi Kivity
    RAMBlock *new_block;
2820 c5705a77 Avi Kivity
2821 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
2822 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
2823 84b89d78 Cam Macdonell
2824 7c637366 Avi Kivity
    new_block->mr = mr;
2825 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2826 6977dfe6 Yoshiaki Tamura
    if (host) {
2827 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2828 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2829 6977dfe6 Yoshiaki Tamura
    } else {
2830 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2831 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2832 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2833 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2834 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2835 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2836 6977dfe6 Yoshiaki Tamura
            }
2837 c902760f Marcelo Tosatti
#else
2838 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2839 6977dfe6 Yoshiaki Tamura
            exit(1);
2840 c902760f Marcelo Tosatti
#endif
2841 6977dfe6 Yoshiaki Tamura
        } else {
2842 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2843 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2844 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2845 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2846 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2847 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2848 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2849 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2850 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2851 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2852 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2853 fb8b2735 Alexander Graf
                abort();
2854 fb8b2735 Alexander Graf
            }
2855 6b02494d Alexander Graf
#else
2856 868bb33f Jan Kiszka
            if (xen_enabled()) {
2857 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
2858 432d268c Jun Nakajima
            } else {
2859 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2860 432d268c Jun Nakajima
            }
2861 6b02494d Alexander Graf
#endif
2862 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2863 6977dfe6 Yoshiaki Tamura
        }
2864 c902760f Marcelo Tosatti
    }
2865 94a6b54f pbrook
    new_block->length = size;
2866 94a6b54f pbrook
2867 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2868 94a6b54f pbrook
2869 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2870 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2871 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2872 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2873 94a6b54f pbrook
2874 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2875 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2876 6f0437e8 Jan Kiszka
2877 94a6b54f pbrook
    return new_block->offset;
2878 94a6b54f pbrook
}
2879 e9a1ab19 bellard
2880 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2881 6977dfe6 Yoshiaki Tamura
{
2882 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
2883 6977dfe6 Yoshiaki Tamura
}
2884 6977dfe6 Yoshiaki Tamura
2885 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2886 1f2e98b6 Alex Williamson
{
2887 1f2e98b6 Alex Williamson
    RAMBlock *block;
2888 1f2e98b6 Alex Williamson
2889 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2891 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2892 7267c094 Anthony Liguori
            g_free(block);
2893 1f2e98b6 Alex Williamson
            return;
2894 1f2e98b6 Alex Williamson
        }
2895 1f2e98b6 Alex Williamson
    }
2896 1f2e98b6 Alex Williamson
}
2897 1f2e98b6 Alex Williamson
2898 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2899 e9a1ab19 bellard
{
2900 04b16653 Alex Williamson
    RAMBlock *block;
2901 04b16653 Alex Williamson
2902 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2903 04b16653 Alex Williamson
        if (addr == block->offset) {
2904 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2905 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2906 cd19cfa2 Huang Ying
                ;
2907 cd19cfa2 Huang Ying
            } else if (mem_path) {
2908 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2909 04b16653 Alex Williamson
                if (block->fd) {
2910 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2911 04b16653 Alex Williamson
                    close(block->fd);
2912 04b16653 Alex Williamson
                } else {
2913 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2914 04b16653 Alex Williamson
                }
2915 fd28aa13 Jan Kiszka
#else
2916 fd28aa13 Jan Kiszka
                abort();
2917 04b16653 Alex Williamson
#endif
2918 04b16653 Alex Williamson
            } else {
2919 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2920 04b16653 Alex Williamson
                munmap(block->host, block->length);
2921 04b16653 Alex Williamson
#else
2922 868bb33f Jan Kiszka
                if (xen_enabled()) {
2923 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
2924 432d268c Jun Nakajima
                } else {
2925 432d268c Jun Nakajima
                    qemu_vfree(block->host);
2926 432d268c Jun Nakajima
                }
2927 04b16653 Alex Williamson
#endif
2928 04b16653 Alex Williamson
            }
2929 7267c094 Anthony Liguori
            g_free(block);
2930 04b16653 Alex Williamson
            return;
2931 04b16653 Alex Williamson
        }
2932 04b16653 Alex Williamson
    }
2933 04b16653 Alex Williamson
2934 e9a1ab19 bellard
}
2935 e9a1ab19 bellard
2936 cd19cfa2 Huang Ying
#ifndef _WIN32
2937 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2938 cd19cfa2 Huang Ying
{
2939 cd19cfa2 Huang Ying
    RAMBlock *block;
2940 cd19cfa2 Huang Ying
    ram_addr_t offset;
2941 cd19cfa2 Huang Ying
    int flags;
2942 cd19cfa2 Huang Ying
    void *area, *vaddr;
2943 cd19cfa2 Huang Ying
2944 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2945 cd19cfa2 Huang Ying
        offset = addr - block->offset;
2946 cd19cfa2 Huang Ying
        if (offset < block->length) {
2947 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
2948 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2949 cd19cfa2 Huang Ying
                ;
2950 cd19cfa2 Huang Ying
            } else {
2951 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
2952 cd19cfa2 Huang Ying
                munmap(vaddr, length);
2953 cd19cfa2 Huang Ying
                if (mem_path) {
2954 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
2955 cd19cfa2 Huang Ying
                    if (block->fd) {
2956 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
2957 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2958 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
2959 cd19cfa2 Huang Ying
#else
2960 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
2961 cd19cfa2 Huang Ying
#endif
2962 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2963 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
2964 cd19cfa2 Huang Ying
                    } else {
2965 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2966 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2967 cd19cfa2 Huang Ying
                                    flags, -1, 0);
2968 cd19cfa2 Huang Ying
                    }
2969 fd28aa13 Jan Kiszka
#else
2970 fd28aa13 Jan Kiszka
                    abort();
2971 cd19cfa2 Huang Ying
#endif
2972 cd19cfa2 Huang Ying
                } else {
2973 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2974 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2975 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2976 cd19cfa2 Huang Ying
                                flags, -1, 0);
2977 cd19cfa2 Huang Ying
#else
2978 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980 cd19cfa2 Huang Ying
                                flags, -1, 0);
2981 cd19cfa2 Huang Ying
#endif
2982 cd19cfa2 Huang Ying
                }
2983 cd19cfa2 Huang Ying
                if (area != vaddr) {
2984 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
2985 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2986 cd19cfa2 Huang Ying
                            length, addr);
2987 cd19cfa2 Huang Ying
                    exit(1);
2988 cd19cfa2 Huang Ying
                }
2989 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2990 cd19cfa2 Huang Ying
            }
2991 cd19cfa2 Huang Ying
            return;
2992 cd19cfa2 Huang Ying
        }
2993 cd19cfa2 Huang Ying
    }
2994 cd19cfa2 Huang Ying
}
2995 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
2996 cd19cfa2 Huang Ying
2997 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2998 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2999 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3000 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3001 5579c7f3 pbrook

3002 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3003 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3004 5579c7f3 pbrook
 */
3005 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3006 dc828ca1 pbrook
{
3007 94a6b54f pbrook
    RAMBlock *block;
3008 94a6b54f pbrook
3009 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3010 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3011 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3012 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3013 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3014 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3015 7d82af38 Vincent Palatin
            }
3016 868bb33f Jan Kiszka
            if (xen_enabled()) {
3017 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3018 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3019 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3020 432d268c Jun Nakajima
                 */
3021 432d268c Jun Nakajima
                if (block->offset == 0) {
3022 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
3023 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3024 e41d7c69 Jan Kiszka
                    block->host =
3025 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
3026 432d268c Jun Nakajima
                }
3027 432d268c Jun Nakajima
            }
3028 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3029 f471a17e Alex Williamson
        }
3030 94a6b54f pbrook
    }
3031 f471a17e Alex Williamson
3032 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033 f471a17e Alex Williamson
    abort();
3034 f471a17e Alex Williamson
3035 f471a17e Alex Williamson
    return NULL;
3036 dc828ca1 pbrook
}
3037 dc828ca1 pbrook
3038 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3039 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3040 b2e0a138 Michael S. Tsirkin
 */
3041 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3042 b2e0a138 Michael S. Tsirkin
{
3043 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3044 b2e0a138 Michael S. Tsirkin
3045 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3046 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3047 868bb33f Jan Kiszka
            if (xen_enabled()) {
3048 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3049 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3050 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3051 432d268c Jun Nakajima
                 */
3052 432d268c Jun Nakajima
                if (block->offset == 0) {
3053 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
3054 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3055 e41d7c69 Jan Kiszka
                    block->host =
3056 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
3057 432d268c Jun Nakajima
                }
3058 432d268c Jun Nakajima
            }
3059 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3060 b2e0a138 Michael S. Tsirkin
        }
3061 b2e0a138 Michael S. Tsirkin
    }
3062 b2e0a138 Michael S. Tsirkin
3063 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 b2e0a138 Michael S. Tsirkin
    abort();
3065 b2e0a138 Michael S. Tsirkin
3066 b2e0a138 Michael S. Tsirkin
    return NULL;
3067 b2e0a138 Michael S. Tsirkin
}
3068 b2e0a138 Michael S. Tsirkin
3069 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3070 38bee5dc Stefano Stabellini
 * but takes a size argument */
3071 8ab934f9 Stefano Stabellini
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3072 38bee5dc Stefano Stabellini
{
3073 8ab934f9 Stefano Stabellini
    if (*size == 0) {
3074 8ab934f9 Stefano Stabellini
        return NULL;
3075 8ab934f9 Stefano Stabellini
    }
3076 868bb33f Jan Kiszka
    if (xen_enabled()) {
3077 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
3078 868bb33f Jan Kiszka
    } else {
3079 38bee5dc Stefano Stabellini
        RAMBlock *block;
3080 38bee5dc Stefano Stabellini
3081 38bee5dc Stefano Stabellini
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3082 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
3083 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
3084 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
3085 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
3086 38bee5dc Stefano Stabellini
            }
3087 38bee5dc Stefano Stabellini
        }
3088 38bee5dc Stefano Stabellini
3089 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090 38bee5dc Stefano Stabellini
        abort();
3091 38bee5dc Stefano Stabellini
    }
3092 38bee5dc Stefano Stabellini
}
3093 38bee5dc Stefano Stabellini
3094 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
3095 050a0ddf Anthony PERARD
{
3096 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
3097 050a0ddf Anthony PERARD
}
3098 050a0ddf Anthony PERARD
3099 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3100 5579c7f3 pbrook
{
3101 94a6b54f pbrook
    RAMBlock *block;
3102 94a6b54f pbrook
    uint8_t *host = ptr;
3103 94a6b54f pbrook
3104 868bb33f Jan Kiszka
    if (xen_enabled()) {
3105 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
3106 712c2b41 Stefano Stabellini
        return 0;
3107 712c2b41 Stefano Stabellini
    }
3108 712c2b41 Stefano Stabellini
3109 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3110 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
3111 432d268c Jun Nakajima
        if (block->host == NULL) {
3112 432d268c Jun Nakajima
            continue;
3113 432d268c Jun Nakajima
        }
3114 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3115 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3116 e890261f Marcelo Tosatti
            return 0;
3117 f471a17e Alex Williamson
        }
3118 94a6b54f pbrook
    }
3119 432d268c Jun Nakajima
3120 e890261f Marcelo Tosatti
    return -1;
3121 e890261f Marcelo Tosatti
}
3122 f471a17e Alex Williamson
3123 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3124 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3125 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3126 e890261f Marcelo Tosatti
{
3127 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3128 f471a17e Alex Williamson
3129 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3130 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3131 e890261f Marcelo Tosatti
        abort();
3132 e890261f Marcelo Tosatti
    }
3133 e890261f Marcelo Tosatti
    return ram_addr;
3134 5579c7f3 pbrook
}
3135 5579c7f3 pbrook
3136 0e0df1e2 Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3137 0e0df1e2 Avi Kivity
                                    unsigned size)
3138 e18231a3 blueswir1
{
3139 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3140 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3141 e18231a3 blueswir1
#endif
3142 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3144 e18231a3 blueswir1
#endif
3145 e18231a3 blueswir1
    return 0;
3146 e18231a3 blueswir1
}
3147 e18231a3 blueswir1
3148 0e0df1e2 Avi Kivity
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3149 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
3150 e18231a3 blueswir1
{
3151 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3152 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3153 e18231a3 blueswir1
#endif
3154 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3155 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3156 b4f0a316 blueswir1
#endif
3157 33417e70 bellard
}
3158 33417e70 bellard
3159 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
3160 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
3161 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
3162 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3163 0e0df1e2 Avi Kivity
};
3164 e18231a3 blueswir1
3165 0e0df1e2 Avi Kivity
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3166 0e0df1e2 Avi Kivity
                               unsigned size)
3167 e18231a3 blueswir1
{
3168 0e0df1e2 Avi Kivity
    abort();
3169 e18231a3 blueswir1
}
3170 e18231a3 blueswir1
3171 0e0df1e2 Avi Kivity
static void error_mem_write(void *opaque, target_phys_addr_t addr,
3172 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
3173 e18231a3 blueswir1
{
3174 0e0df1e2 Avi Kivity
    abort();
3175 33417e70 bellard
}
3176 33417e70 bellard
3177 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
3178 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3179 0e0df1e2 Avi Kivity
    .write = error_mem_write,
3180 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3181 33417e70 bellard
};
3182 33417e70 bellard
3183 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
3184 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3185 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
3186 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3187 33417e70 bellard
};
3188 33417e70 bellard
3189 0e0df1e2 Avi Kivity
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3190 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
3191 9fa3e853 bellard
{
3192 3a7d929e bellard
    int dirty_flags;
3193 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3194 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3195 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3196 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
3197 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3198 9fa3e853 bellard
#endif
3199 3a7d929e bellard
    }
3200 0e0df1e2 Avi Kivity
    switch (size) {
3201 0e0df1e2 Avi Kivity
    case 1:
3202 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
3203 0e0df1e2 Avi Kivity
        break;
3204 0e0df1e2 Avi Kivity
    case 2:
3205 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
3206 0e0df1e2 Avi Kivity
        break;
3207 0e0df1e2 Avi Kivity
    case 4:
3208 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
3209 0e0df1e2 Avi Kivity
        break;
3210 0e0df1e2 Avi Kivity
    default:
3211 0e0df1e2 Avi Kivity
        abort();
3212 3a7d929e bellard
    }
3213 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3214 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3215 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3216 f23db169 bellard
       flushed */
3217 f23db169 bellard
    if (dirty_flags == 0xff)
3218 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3219 9fa3e853 bellard
}
3220 9fa3e853 bellard
3221 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
3222 0e0df1e2 Avi Kivity
    .read = error_mem_read,
3223 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
3224 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3225 1ccde1cb bellard
};
3226 1ccde1cb bellard
3227 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3228 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3229 0f459d16 pbrook
{
3230 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3231 06d55cc1 aliguori
    target_ulong pc, cs_base;
3232 06d55cc1 aliguori
    TranslationBlock *tb;
3233 0f459d16 pbrook
    target_ulong vaddr;
3234 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3235 06d55cc1 aliguori
    int cpu_flags;
3236 0f459d16 pbrook
3237 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3238 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3239 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3240 06d55cc1 aliguori
         * current instruction. */
3241 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3242 06d55cc1 aliguori
        return;
3243 06d55cc1 aliguori
    }
3244 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3245 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3246 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3247 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3248 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3249 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3250 6e140f28 aliguori
                env->watchpoint_hit = wp;
3251 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3252 6e140f28 aliguori
                if (!tb) {
3253 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3254 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3255 6e140f28 aliguori
                }
3256 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3257 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3258 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3259 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3260 6e140f28 aliguori
                } else {
3261 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3262 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3263 6e140f28 aliguori
                }
3264 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3265 06d55cc1 aliguori
            }
3266 6e140f28 aliguori
        } else {
3267 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3268 0f459d16 pbrook
        }
3269 0f459d16 pbrook
    }
3270 0f459d16 pbrook
}
3271 0f459d16 pbrook
3272 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3273 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3274 6658ffb8 pbrook
   phys routines.  */
3275 1ec9b909 Avi Kivity
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3276 1ec9b909 Avi Kivity
                               unsigned size)
3277 6658ffb8 pbrook
{
3278 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3279 1ec9b909 Avi Kivity
    switch (size) {
3280 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
3281 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
3282 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
3283 1ec9b909 Avi Kivity
    default: abort();
3284 1ec9b909 Avi Kivity
    }
3285 6658ffb8 pbrook
}
3286 6658ffb8 pbrook
3287 1ec9b909 Avi Kivity
static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3288 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
3289 6658ffb8 pbrook
{
3290 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3291 1ec9b909 Avi Kivity
    switch (size) {
3292 1ec9b909 Avi Kivity
    case 1: stb_phys(addr, val);
3293 1ec9b909 Avi Kivity
    case 2: stw_phys(addr, val);
3294 1ec9b909 Avi Kivity
    case 4: stl_phys(addr, val);
3295 1ec9b909 Avi Kivity
    default: abort();
3296 1ec9b909 Avi Kivity
    }
3297 6658ffb8 pbrook
}
3298 6658ffb8 pbrook
3299 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
3300 1ec9b909 Avi Kivity
    .read = watch_mem_read,
3301 1ec9b909 Avi Kivity
    .write = watch_mem_write,
3302 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3303 6658ffb8 pbrook
};
3304 6658ffb8 pbrook
3305 70c68e44 Avi Kivity
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3306 70c68e44 Avi Kivity
                             unsigned len)
3307 db7b5426 blueswir1
{
3308 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3309 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3310 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3311 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3312 db7b5426 blueswir1
           mmio, len, addr, idx);
3313 db7b5426 blueswir1
#endif
3314 db7b5426 blueswir1
3315 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3316 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3317 70c68e44 Avi Kivity
    return io_mem_read(idx, addr, len);
3318 db7b5426 blueswir1
}
3319 db7b5426 blueswir1
3320 70c68e44 Avi Kivity
static void subpage_write(void *opaque, target_phys_addr_t addr,
3321 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
3322 db7b5426 blueswir1
{
3323 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
3324 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3325 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3326 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3327 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
3328 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3329 db7b5426 blueswir1
#endif
3330 f6405247 Richard Henderson
3331 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3332 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3333 70c68e44 Avi Kivity
    io_mem_write(idx, addr, value, len);
3334 db7b5426 blueswir1
}
3335 db7b5426 blueswir1
3336 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
3337 70c68e44 Avi Kivity
    .read = subpage_read,
3338 70c68e44 Avi Kivity
    .write = subpage_write,
3339 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3340 db7b5426 blueswir1
};
3341 db7b5426 blueswir1
3342 de712f94 Avi Kivity
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3343 de712f94 Avi Kivity
                                 unsigned size)
3344 56384e8b Andreas Fรคrber
{
3345 56384e8b Andreas Fรคrber
    ram_addr_t raddr = addr;
3346 56384e8b Andreas Fรคrber
    void *ptr = qemu_get_ram_ptr(raddr);
3347 de712f94 Avi Kivity
    switch (size) {
3348 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
3349 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
3350 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
3351 de712f94 Avi Kivity
    default: abort();
3352 de712f94 Avi Kivity
    }
3353 56384e8b Andreas Fรคrber
}
3354 56384e8b Andreas Fรคrber
3355 de712f94 Avi Kivity
static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3356 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
3357 56384e8b Andreas Fรคrber
{
3358 56384e8b Andreas Fรคrber
    ram_addr_t raddr = addr;
3359 56384e8b Andreas Fรคrber
    void *ptr = qemu_get_ram_ptr(raddr);
3360 de712f94 Avi Kivity
    switch (size) {
3361 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
3362 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
3363 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
3364 de712f94 Avi Kivity
    default: abort();
3365 de712f94 Avi Kivity
    }
3366 56384e8b Andreas Fรคrber
}
3367 56384e8b Andreas Fรคrber
3368 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
3369 de712f94 Avi Kivity
    .read = subpage_ram_read,
3370 de712f94 Avi Kivity
    .write = subpage_ram_write,
3371 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
3372 56384e8b Andreas Fรคrber
};
3373 56384e8b Andreas Fรคrber
3374 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3375 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3376 db7b5426 blueswir1
{
3377 db7b5426 blueswir1
    int idx, eidx;
3378 db7b5426 blueswir1
3379 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3380 db7b5426 blueswir1
        return -1;
3381 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3382 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3383 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3384 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3385 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3386 db7b5426 blueswir1
#endif
3387 0e0df1e2 Avi Kivity
    if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3388 de712f94 Avi Kivity
        memory = io_mem_subpage_ram.ram_addr;
3389 56384e8b Andreas Fรคrber
    }
3390 11c7ef0c Avi Kivity
    memory &= IO_MEM_NB_ENTRIES - 1;
3391 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3392 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3393 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3394 db7b5426 blueswir1
    }
3395 db7b5426 blueswir1
3396 db7b5426 blueswir1
    return 0;
3397 db7b5426 blueswir1
}
3398 db7b5426 blueswir1
3399 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3400 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3401 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3402 db7b5426 blueswir1
{
3403 c227f099 Anthony Liguori
    subpage_t *mmio;
3404 db7b5426 blueswir1
    int subpage_memory;
3405 db7b5426 blueswir1
3406 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
3407 1eec614b aliguori
3408 1eec614b aliguori
    mmio->base = base;
3409 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3410 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
3411 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
3412 70c68e44 Avi Kivity
    subpage_memory = mmio->iomem.ram_addr;
3413 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3414 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3415 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3416 db7b5426 blueswir1
#endif
3417 b3b00c78 Avi Kivity
    *phys = subpage_memory;
3418 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3419 db7b5426 blueswir1
3420 db7b5426 blueswir1
    return mmio;
3421 db7b5426 blueswir1
}
3422 db7b5426 blueswir1
3423 88715657 aliguori
static int get_free_io_mem_idx(void)
3424 88715657 aliguori
{
3425 88715657 aliguori
    int i;
3426 88715657 aliguori
3427 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3428 88715657 aliguori
        if (!io_mem_used[i]) {
3429 88715657 aliguori
            io_mem_used[i] = 1;
3430 88715657 aliguori
            return i;
3431 88715657 aliguori
        }
3432 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3433 88715657 aliguori
    return -1;
3434 88715657 aliguori
}
3435 88715657 aliguori
3436 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3437 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3438 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3439 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3440 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3441 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3442 4254fab8 blueswir1
   returned if error. */
3443 a621f38d Avi Kivity
static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3444 33417e70 bellard
{
3445 33417e70 bellard
    if (io_index <= 0) {
3446 88715657 aliguori
        io_index = get_free_io_mem_idx();
3447 88715657 aliguori
        if (io_index == -1)
3448 88715657 aliguori
            return io_index;
3449 33417e70 bellard
    } else {
3450 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3451 33417e70 bellard
            return -1;
3452 33417e70 bellard
    }
3453 b5ff1b31 bellard
3454 a621f38d Avi Kivity
    io_mem_region[io_index] = mr;
3455 f6405247 Richard Henderson
3456 11c7ef0c Avi Kivity
    return io_index;
3457 33417e70 bellard
}
3458 61382a50 bellard
3459 a621f38d Avi Kivity
int cpu_register_io_memory(MemoryRegion *mr)
3460 1eed09cb Avi Kivity
{
3461 a621f38d Avi Kivity
    return cpu_register_io_memory_fixed(0, mr);
3462 1eed09cb Avi Kivity
}
3463 1eed09cb Avi Kivity
3464 11c7ef0c Avi Kivity
void cpu_unregister_io_memory(int io_index)
3465 88715657 aliguori
{
3466 a621f38d Avi Kivity
    io_mem_region[io_index] = NULL;
3467 88715657 aliguori
    io_mem_used[io_index] = 0;
3468 88715657 aliguori
}
3469 88715657 aliguori
3470 e9179ce1 Avi Kivity
static void io_mem_init(void)
3471 e9179ce1 Avi Kivity
{
3472 e9179ce1 Avi Kivity
    int i;
3473 e9179ce1 Avi Kivity
3474 0e0df1e2 Avi Kivity
    /* Must be first: */
3475 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3476 0e0df1e2 Avi Kivity
    assert(io_mem_ram.ram_addr == 0);
3477 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3478 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3479 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
3480 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3481 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
3482 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3483 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
3484 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3485 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3486 e9179ce1 Avi Kivity
3487 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3488 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
3489 e9179ce1 Avi Kivity
}
3490 e9179ce1 Avi Kivity
3491 62152b8a Avi Kivity
static void memory_map_init(void)
3492 62152b8a Avi Kivity
{
3493 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
3494 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
3495 62152b8a Avi Kivity
    set_system_memory_map(system_memory);
3496 309cb471 Avi Kivity
3497 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
3498 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
3499 309cb471 Avi Kivity
    set_system_io_map(system_io);
3500 62152b8a Avi Kivity
}
3501 62152b8a Avi Kivity
3502 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
3503 62152b8a Avi Kivity
{
3504 62152b8a Avi Kivity
    return system_memory;
3505 62152b8a Avi Kivity
}
3506 62152b8a Avi Kivity
3507 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
3508 309cb471 Avi Kivity
{
3509 309cb471 Avi Kivity
    return system_io;
3510 309cb471 Avi Kivity
}
3511 309cb471 Avi Kivity
3512 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3513 e2eef170 pbrook
3514 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3515 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3516 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3517 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3518 13eb76e0 bellard
{
3519 13eb76e0 bellard
    int l, flags;
3520 13eb76e0 bellard
    target_ulong page;
3521 53a5960a pbrook
    void * p;
3522 13eb76e0 bellard
3523 13eb76e0 bellard
    while (len > 0) {
3524 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3525 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3526 13eb76e0 bellard
        if (l > len)
3527 13eb76e0 bellard
            l = len;
3528 13eb76e0 bellard
        flags = page_get_flags(page);
3529 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3530 a68fe89c Paul Brook
            return -1;
3531 13eb76e0 bellard
        if (is_write) {
3532 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3533 a68fe89c Paul Brook
                return -1;
3534 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3535 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3536 a68fe89c Paul Brook
                return -1;
3537 72fb7daa aurel32
            memcpy(p, buf, l);
3538 72fb7daa aurel32
            unlock_user(p, addr, l);
3539 13eb76e0 bellard
        } else {
3540 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3541 a68fe89c Paul Brook
                return -1;
3542 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3543 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3544 a68fe89c Paul Brook
                return -1;
3545 72fb7daa aurel32
            memcpy(buf, p, l);
3546 5b257578 aurel32
            unlock_user(p, addr, 0);
3547 13eb76e0 bellard
        }
3548 13eb76e0 bellard
        len -= l;
3549 13eb76e0 bellard
        buf += l;
3550 13eb76e0 bellard
        addr += l;
3551 13eb76e0 bellard
    }
3552 a68fe89c Paul Brook
    return 0;
3553 13eb76e0 bellard
}
3554 8df1cd07 bellard
3555 13eb76e0 bellard
#else
3556 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3557 13eb76e0 bellard
                            int len, int is_write)
3558 13eb76e0 bellard
{
3559 13eb76e0 bellard
    int l, io_index;
3560 13eb76e0 bellard
    uint8_t *ptr;
3561 13eb76e0 bellard
    uint32_t val;
3562 c227f099 Anthony Liguori
    target_phys_addr_t page;
3563 8ca5692d Anthony PERARD
    ram_addr_t pd;
3564 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3565 3b46e624 ths
3566 13eb76e0 bellard
    while (len > 0) {
3567 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3568 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3569 13eb76e0 bellard
        if (l > len)
3570 13eb76e0 bellard
            l = len;
3571 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3572 f1f6e3b8 Avi Kivity
        pd = p.phys_offset;
3573 3b46e624 ths
3574 13eb76e0 bellard
        if (is_write) {
3575 0e0df1e2 Avi Kivity
            if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3576 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3577 11c7ef0c Avi Kivity
                io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3578 f1f6e3b8 Avi Kivity
                addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3579 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3580 6a00d601 bellard
                   potential bugs */
3581 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3582 1c213d19 bellard
                    /* 32 bit write access */
3583 c27004ec bellard
                    val = ldl_p(buf);
3584 acbbec5d Avi Kivity
                    io_mem_write(io_index, addr1, val, 4);
3585 13eb76e0 bellard
                    l = 4;
3586 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3587 1c213d19 bellard
                    /* 16 bit write access */
3588 c27004ec bellard
                    val = lduw_p(buf);
3589 acbbec5d Avi Kivity
                    io_mem_write(io_index, addr1, val, 2);
3590 13eb76e0 bellard
                    l = 2;
3591 13eb76e0 bellard
                } else {
3592 1c213d19 bellard
                    /* 8 bit write access */
3593 c27004ec bellard
                    val = ldub_p(buf);
3594 acbbec5d Avi Kivity
                    io_mem_write(io_index, addr1, val, 1);
3595 13eb76e0 bellard
                    l = 1;
3596 13eb76e0 bellard
                }
3597 13eb76e0 bellard
            } else {
3598 8ca5692d Anthony PERARD
                ram_addr_t addr1;
3599 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3600 13eb76e0 bellard
                /* RAM case */
3601 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3602 13eb76e0 bellard
                memcpy(ptr, buf, l);
3603 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3604 3a7d929e bellard
                    /* invalidate code */
3605 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3606 3a7d929e bellard
                    /* set dirty bit */
3607 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3608 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3609 3a7d929e bellard
                }
3610 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3611 13eb76e0 bellard
            }
3612 13eb76e0 bellard
        } else {
3613 1d393fa2 Avi Kivity
            if (!is_ram_rom_romd(pd)) {
3614 f1f6e3b8 Avi Kivity
                target_phys_addr_t addr1;
3615 13eb76e0 bellard
                /* I/O case */
3616 11c7ef0c Avi Kivity
                io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3617 f1f6e3b8 Avi Kivity
                addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3618 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3619 13eb76e0 bellard
                    /* 32 bit read access */
3620 acbbec5d Avi Kivity
                    val = io_mem_read(io_index, addr1, 4);
3621 c27004ec bellard
                    stl_p(buf, val);
3622 13eb76e0 bellard
                    l = 4;
3623 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3624 13eb76e0 bellard
                    /* 16 bit read access */
3625 acbbec5d Avi Kivity
                    val = io_mem_read(io_index, addr1, 2);
3626 c27004ec bellard
                    stw_p(buf, val);
3627 13eb76e0 bellard
                    l = 2;
3628 13eb76e0 bellard
                } else {
3629 1c213d19 bellard
                    /* 8 bit read access */
3630 acbbec5d Avi Kivity
                    val = io_mem_read(io_index, addr1, 1);
3631 c27004ec bellard
                    stb_p(buf, val);
3632 13eb76e0 bellard
                    l = 1;
3633 13eb76e0 bellard
                }
3634 13eb76e0 bellard
            } else {
3635 13eb76e0 bellard
                /* RAM case */
3636 050a0ddf Anthony PERARD
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3637 050a0ddf Anthony PERARD
                memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3638 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3639 13eb76e0 bellard
            }
3640 13eb76e0 bellard
        }
3641 13eb76e0 bellard
        len -= l;
3642 13eb76e0 bellard
        buf += l;
3643 13eb76e0 bellard
        addr += l;
3644 13eb76e0 bellard
    }
3645 13eb76e0 bellard
}
3646 8df1cd07 bellard
3647 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3648 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3649 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3650 d0ecd2aa bellard
{
3651 d0ecd2aa bellard
    int l;
3652 d0ecd2aa bellard
    uint8_t *ptr;
3653 c227f099 Anthony Liguori
    target_phys_addr_t page;
3654 d0ecd2aa bellard
    unsigned long pd;
3655 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3656 3b46e624 ths
3657 d0ecd2aa bellard
    while (len > 0) {
3658 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3659 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3660 d0ecd2aa bellard
        if (l > len)
3661 d0ecd2aa bellard
            l = len;
3662 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3663 f1f6e3b8 Avi Kivity
        pd = p.phys_offset;
3664 3b46e624 ths
3665 1d393fa2 Avi Kivity
        if (!is_ram_rom_romd(pd)) {
3666 d0ecd2aa bellard
            /* do nothing */
3667 d0ecd2aa bellard
        } else {
3668 d0ecd2aa bellard
            unsigned long addr1;
3669 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3670 d0ecd2aa bellard
            /* ROM/RAM case */
3671 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3672 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3673 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3674 d0ecd2aa bellard
        }
3675 d0ecd2aa bellard
        len -= l;
3676 d0ecd2aa bellard
        buf += l;
3677 d0ecd2aa bellard
        addr += l;
3678 d0ecd2aa bellard
    }
3679 d0ecd2aa bellard
}
3680 d0ecd2aa bellard
3681 6d16c2f8 aliguori
typedef struct {
3682 6d16c2f8 aliguori
    void *buffer;
3683 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3684 c227f099 Anthony Liguori
    target_phys_addr_t len;
3685 6d16c2f8 aliguori
} BounceBuffer;
3686 6d16c2f8 aliguori
3687 6d16c2f8 aliguori
static BounceBuffer bounce;
3688 6d16c2f8 aliguori
3689 ba223c29 aliguori
typedef struct MapClient {
3690 ba223c29 aliguori
    void *opaque;
3691 ba223c29 aliguori
    void (*callback)(void *opaque);
3692 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3693 ba223c29 aliguori
} MapClient;
3694 ba223c29 aliguori
3695 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3696 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3697 ba223c29 aliguori
3698 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3699 ba223c29 aliguori
{
3700 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
3701 ba223c29 aliguori
3702 ba223c29 aliguori
    client->opaque = opaque;
3703 ba223c29 aliguori
    client->callback = callback;
3704 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3705 ba223c29 aliguori
    return client;
3706 ba223c29 aliguori
}
3707 ba223c29 aliguori
3708 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3709 ba223c29 aliguori
{
3710 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3711 ba223c29 aliguori
3712 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3713 7267c094 Anthony Liguori
    g_free(client);
3714 ba223c29 aliguori
}
3715 ba223c29 aliguori
3716 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3717 ba223c29 aliguori
{
3718 ba223c29 aliguori
    MapClient *client;
3719 ba223c29 aliguori
3720 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3721 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3722 ba223c29 aliguori
        client->callback(client->opaque);
3723 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3724 ba223c29 aliguori
    }
3725 ba223c29 aliguori
}
3726 ba223c29 aliguori
3727 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3728 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3729 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3730 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3731 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3732 ba223c29 aliguori
 * likely to succeed.
3733 6d16c2f8 aliguori
 */
3734 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3735 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3736 6d16c2f8 aliguori
                              int is_write)
3737 6d16c2f8 aliguori
{
3738 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3739 38bee5dc Stefano Stabellini
    target_phys_addr_t todo = 0;
3740 6d16c2f8 aliguori
    int l;
3741 c227f099 Anthony Liguori
    target_phys_addr_t page;
3742 6d16c2f8 aliguori
    unsigned long pd;
3743 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3744 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
3745 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
3746 8ab934f9 Stefano Stabellini
    void *ret;
3747 6d16c2f8 aliguori
3748 6d16c2f8 aliguori
    while (len > 0) {
3749 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3750 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3751 6d16c2f8 aliguori
        if (l > len)
3752 6d16c2f8 aliguori
            l = len;
3753 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3754 f1f6e3b8 Avi Kivity
        pd = p.phys_offset;
3755 6d16c2f8 aliguori
3756 0e0df1e2 Avi Kivity
        if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3757 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
3758 6d16c2f8 aliguori
                break;
3759 6d16c2f8 aliguori
            }
3760 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3761 6d16c2f8 aliguori
            bounce.addr = addr;
3762 6d16c2f8 aliguori
            bounce.len = l;
3763 6d16c2f8 aliguori
            if (!is_write) {
3764 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
3765 6d16c2f8 aliguori
            }
3766 38bee5dc Stefano Stabellini
3767 38bee5dc Stefano Stabellini
            *plen = l;
3768 38bee5dc Stefano Stabellini
            return bounce.buffer;
3769 6d16c2f8 aliguori
        }
3770 8ab934f9 Stefano Stabellini
        if (!todo) {
3771 8ab934f9 Stefano Stabellini
            raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3772 8ab934f9 Stefano Stabellini
        }
3773 6d16c2f8 aliguori
3774 6d16c2f8 aliguori
        len -= l;
3775 6d16c2f8 aliguori
        addr += l;
3776 38bee5dc Stefano Stabellini
        todo += l;
3777 6d16c2f8 aliguori
    }
3778 8ab934f9 Stefano Stabellini
    rlen = todo;
3779 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
3780 8ab934f9 Stefano Stabellini
    *plen = rlen;
3781 8ab934f9 Stefano Stabellini
    return ret;
3782 6d16c2f8 aliguori
}
3783 6d16c2f8 aliguori
3784 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3785 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3786 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3787 6d16c2f8 aliguori
 */
3788 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3789 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3790 6d16c2f8 aliguori
{
3791 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3792 6d16c2f8 aliguori
        if (is_write) {
3793 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3794 6d16c2f8 aliguori
            while (access_len) {
3795 6d16c2f8 aliguori
                unsigned l;
3796 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3797 6d16c2f8 aliguori
                if (l > access_len)
3798 6d16c2f8 aliguori
                    l = access_len;
3799 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3800 6d16c2f8 aliguori
                    /* invalidate code */
3801 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3802 6d16c2f8 aliguori
                    /* set dirty bit */
3803 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3804 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3805 6d16c2f8 aliguori
                }
3806 6d16c2f8 aliguori
                addr1 += l;
3807 6d16c2f8 aliguori
                access_len -= l;
3808 6d16c2f8 aliguori
            }
3809 6d16c2f8 aliguori
        }
3810 868bb33f Jan Kiszka
        if (xen_enabled()) {
3811 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
3812 050a0ddf Anthony PERARD
        }
3813 6d16c2f8 aliguori
        return;
3814 6d16c2f8 aliguori
    }
3815 6d16c2f8 aliguori
    if (is_write) {
3816 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3817 6d16c2f8 aliguori
    }
3818 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3819 6d16c2f8 aliguori
    bounce.buffer = NULL;
3820 ba223c29 aliguori
    cpu_notify_map_clients();
3821 6d16c2f8 aliguori
}
3822 d0ecd2aa bellard
3823 8df1cd07 bellard
/* warning: addr must be aligned */
3824 1e78bcc1 Alexander Graf
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3825 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3826 8df1cd07 bellard
{
3827 8df1cd07 bellard
    int io_index;
3828 8df1cd07 bellard
    uint8_t *ptr;
3829 8df1cd07 bellard
    uint32_t val;
3830 8df1cd07 bellard
    unsigned long pd;
3831 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3832 8df1cd07 bellard
3833 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3834 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
3835 3b46e624 ths
3836 1d393fa2 Avi Kivity
    if (!is_ram_rom_romd(pd)) {
3837 8df1cd07 bellard
        /* I/O case */
3838 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3839 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3840 acbbec5d Avi Kivity
        val = io_mem_read(io_index, addr, 4);
3841 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3842 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3843 1e78bcc1 Alexander Graf
            val = bswap32(val);
3844 1e78bcc1 Alexander Graf
        }
3845 1e78bcc1 Alexander Graf
#else
3846 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3847 1e78bcc1 Alexander Graf
            val = bswap32(val);
3848 1e78bcc1 Alexander Graf
        }
3849 1e78bcc1 Alexander Graf
#endif
3850 8df1cd07 bellard
    } else {
3851 8df1cd07 bellard
        /* RAM case */
3852 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3853 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3854 1e78bcc1 Alexander Graf
        switch (endian) {
3855 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3856 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
3857 1e78bcc1 Alexander Graf
            break;
3858 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3859 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
3860 1e78bcc1 Alexander Graf
            break;
3861 1e78bcc1 Alexander Graf
        default:
3862 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
3863 1e78bcc1 Alexander Graf
            break;
3864 1e78bcc1 Alexander Graf
        }
3865 8df1cd07 bellard
    }
3866 8df1cd07 bellard
    return val;
3867 8df1cd07 bellard
}
3868 8df1cd07 bellard
3869 1e78bcc1 Alexander Graf
uint32_t ldl_phys(target_phys_addr_t addr)
3870 1e78bcc1 Alexander Graf
{
3871 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3872 1e78bcc1 Alexander Graf
}
3873 1e78bcc1 Alexander Graf
3874 1e78bcc1 Alexander Graf
uint32_t ldl_le_phys(target_phys_addr_t addr)
3875 1e78bcc1 Alexander Graf
{
3876 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3877 1e78bcc1 Alexander Graf
}
3878 1e78bcc1 Alexander Graf
3879 1e78bcc1 Alexander Graf
uint32_t ldl_be_phys(target_phys_addr_t addr)
3880 1e78bcc1 Alexander Graf
{
3881 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3882 1e78bcc1 Alexander Graf
}
3883 1e78bcc1 Alexander Graf
3884 84b7b8e7 bellard
/* warning: addr must be aligned */
3885 1e78bcc1 Alexander Graf
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3886 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
3887 84b7b8e7 bellard
{
3888 84b7b8e7 bellard
    int io_index;
3889 84b7b8e7 bellard
    uint8_t *ptr;
3890 84b7b8e7 bellard
    uint64_t val;
3891 84b7b8e7 bellard
    unsigned long pd;
3892 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3893 84b7b8e7 bellard
3894 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3895 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
3896 3b46e624 ths
3897 1d393fa2 Avi Kivity
    if (!is_ram_rom_romd(pd)) {
3898 84b7b8e7 bellard
        /* I/O case */
3899 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3900 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3901 1e78bcc1 Alexander Graf
3902 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
3903 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
3904 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3905 acbbec5d Avi Kivity
        val = io_mem_read(io_index, addr, 4) << 32;
3906 acbbec5d Avi Kivity
        val |= io_mem_read(io_index, addr + 4, 4);
3907 84b7b8e7 bellard
#else
3908 acbbec5d Avi Kivity
        val = io_mem_read(io_index, addr, 4);
3909 acbbec5d Avi Kivity
        val |= io_mem_read(io_index, addr + 4, 4) << 32;
3910 84b7b8e7 bellard
#endif
3911 84b7b8e7 bellard
    } else {
3912 84b7b8e7 bellard
        /* RAM case */
3913 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3914 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3915 1e78bcc1 Alexander Graf
        switch (endian) {
3916 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3917 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
3918 1e78bcc1 Alexander Graf
            break;
3919 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3920 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
3921 1e78bcc1 Alexander Graf
            break;
3922 1e78bcc1 Alexander Graf
        default:
3923 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
3924 1e78bcc1 Alexander Graf
            break;
3925 1e78bcc1 Alexander Graf
        }
3926 84b7b8e7 bellard
    }
3927 84b7b8e7 bellard
    return val;
3928 84b7b8e7 bellard
}
3929 84b7b8e7 bellard
3930 1e78bcc1 Alexander Graf
uint64_t ldq_phys(target_phys_addr_t addr)
3931 1e78bcc1 Alexander Graf
{
3932 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3933 1e78bcc1 Alexander Graf
}
3934 1e78bcc1 Alexander Graf
3935 1e78bcc1 Alexander Graf
uint64_t ldq_le_phys(target_phys_addr_t addr)
3936 1e78bcc1 Alexander Graf
{
3937 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3938 1e78bcc1 Alexander Graf
}
3939 1e78bcc1 Alexander Graf
3940 1e78bcc1 Alexander Graf
uint64_t ldq_be_phys(target_phys_addr_t addr)
3941 1e78bcc1 Alexander Graf
{
3942 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3943 1e78bcc1 Alexander Graf
}
3944 1e78bcc1 Alexander Graf
3945 aab33094 bellard
/* XXX: optimize */
3946 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3947 aab33094 bellard
{
3948 aab33094 bellard
    uint8_t val;
3949 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3950 aab33094 bellard
    return val;
3951 aab33094 bellard
}
3952 aab33094 bellard
3953 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3954 1e78bcc1 Alexander Graf
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3955 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
3956 aab33094 bellard
{
3957 733f0b02 Michael S. Tsirkin
    int io_index;
3958 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3959 733f0b02 Michael S. Tsirkin
    uint64_t val;
3960 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3961 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
3962 733f0b02 Michael S. Tsirkin
3963 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3964 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
3965 733f0b02 Michael S. Tsirkin
3966 1d393fa2 Avi Kivity
    if (!is_ram_rom_romd(pd)) {
3967 733f0b02 Michael S. Tsirkin
        /* I/O case */
3968 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3969 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3970 acbbec5d Avi Kivity
        val = io_mem_read(io_index, addr, 2);
3971 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
3972 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
3973 1e78bcc1 Alexander Graf
            val = bswap16(val);
3974 1e78bcc1 Alexander Graf
        }
3975 1e78bcc1 Alexander Graf
#else
3976 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
3977 1e78bcc1 Alexander Graf
            val = bswap16(val);
3978 1e78bcc1 Alexander Graf
        }
3979 1e78bcc1 Alexander Graf
#endif
3980 733f0b02 Michael S. Tsirkin
    } else {
3981 733f0b02 Michael S. Tsirkin
        /* RAM case */
3982 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3983 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3984 1e78bcc1 Alexander Graf
        switch (endian) {
3985 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
3986 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
3987 1e78bcc1 Alexander Graf
            break;
3988 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
3989 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
3990 1e78bcc1 Alexander Graf
            break;
3991 1e78bcc1 Alexander Graf
        default:
3992 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
3993 1e78bcc1 Alexander Graf
            break;
3994 1e78bcc1 Alexander Graf
        }
3995 733f0b02 Michael S. Tsirkin
    }
3996 733f0b02 Michael S. Tsirkin
    return val;
3997 aab33094 bellard
}
3998 aab33094 bellard
3999 1e78bcc1 Alexander Graf
uint32_t lduw_phys(target_phys_addr_t addr)
4000 1e78bcc1 Alexander Graf
{
4001 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4002 1e78bcc1 Alexander Graf
}
4003 1e78bcc1 Alexander Graf
4004 1e78bcc1 Alexander Graf
uint32_t lduw_le_phys(target_phys_addr_t addr)
4005 1e78bcc1 Alexander Graf
{
4006 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4007 1e78bcc1 Alexander Graf
}
4008 1e78bcc1 Alexander Graf
4009 1e78bcc1 Alexander Graf
uint32_t lduw_be_phys(target_phys_addr_t addr)
4010 1e78bcc1 Alexander Graf
{
4011 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4012 1e78bcc1 Alexander Graf
}
4013 1e78bcc1 Alexander Graf
4014 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4015 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4016 8df1cd07 bellard
   bits are used to track modified PTEs */
4017 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4018 8df1cd07 bellard
{
4019 8df1cd07 bellard
    int io_index;
4020 8df1cd07 bellard
    uint8_t *ptr;
4021 8df1cd07 bellard
    unsigned long pd;
4022 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
4023 8df1cd07 bellard
4024 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4025 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
4026 3b46e624 ths
4027 0e0df1e2 Avi Kivity
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4028 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4029 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4030 acbbec5d Avi Kivity
        io_mem_write(io_index, addr, val, 4);
4031 8df1cd07 bellard
    } else {
4032 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4033 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4034 8df1cd07 bellard
        stl_p(ptr, val);
4035 74576198 aliguori
4036 74576198 aliguori
        if (unlikely(in_migration)) {
4037 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4038 74576198 aliguori
                /* invalidate code */
4039 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4040 74576198 aliguori
                /* set dirty bit */
4041 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4042 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4043 74576198 aliguori
            }
4044 74576198 aliguori
        }
4045 8df1cd07 bellard
    }
4046 8df1cd07 bellard
}
4047 8df1cd07 bellard
4048 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4049 bc98a7ef j_mayer
{
4050 bc98a7ef j_mayer
    int io_index;
4051 bc98a7ef j_mayer
    uint8_t *ptr;
4052 bc98a7ef j_mayer
    unsigned long pd;
4053 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
4054 bc98a7ef j_mayer
4055 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4056 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
4057 3b46e624 ths
4058 0e0df1e2 Avi Kivity
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4059 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4060 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4061 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4062 acbbec5d Avi Kivity
        io_mem_write(io_index, addr, val >> 32, 4);
4063 acbbec5d Avi Kivity
        io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4064 bc98a7ef j_mayer
#else
4065 acbbec5d Avi Kivity
        io_mem_write(io_index, addr, (uint32_t)val, 4);
4066 acbbec5d Avi Kivity
        io_mem_write(io_index, addr + 4, val >> 32, 4);
4067 bc98a7ef j_mayer
#endif
4068 bc98a7ef j_mayer
    } else {
4069 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4070 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4071 bc98a7ef j_mayer
        stq_p(ptr, val);
4072 bc98a7ef j_mayer
    }
4073 bc98a7ef j_mayer
}
4074 bc98a7ef j_mayer
4075 8df1cd07 bellard
/* warning: addr must be aligned */
4076 1e78bcc1 Alexander Graf
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4077 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4078 8df1cd07 bellard
{
4079 8df1cd07 bellard
    int io_index;
4080 8df1cd07 bellard
    uint8_t *ptr;
4081 8df1cd07 bellard
    unsigned long pd;
4082 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
4083 8df1cd07 bellard
4084 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4085 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
4086 3b46e624 ths
4087 0e0df1e2 Avi Kivity
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4088 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4089 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4090 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4091 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4092 1e78bcc1 Alexander Graf
            val = bswap32(val);
4093 1e78bcc1 Alexander Graf
        }
4094 1e78bcc1 Alexander Graf
#else
4095 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4096 1e78bcc1 Alexander Graf
            val = bswap32(val);
4097 1e78bcc1 Alexander Graf
        }
4098 1e78bcc1 Alexander Graf
#endif
4099 acbbec5d Avi Kivity
        io_mem_write(io_index, addr, val, 4);
4100 8df1cd07 bellard
    } else {
4101 8df1cd07 bellard
        unsigned long addr1;
4102 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4103 8df1cd07 bellard
        /* RAM case */
4104 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4105 1e78bcc1 Alexander Graf
        switch (endian) {
4106 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4107 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
4108 1e78bcc1 Alexander Graf
            break;
4109 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4110 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
4111 1e78bcc1 Alexander Graf
            break;
4112 1e78bcc1 Alexander Graf
        default:
4113 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
4114 1e78bcc1 Alexander Graf
            break;
4115 1e78bcc1 Alexander Graf
        }
4116 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4117 3a7d929e bellard
            /* invalidate code */
4118 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4119 3a7d929e bellard
            /* set dirty bit */
4120 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4121 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4122 3a7d929e bellard
        }
4123 8df1cd07 bellard
    }
4124 8df1cd07 bellard
}
4125 8df1cd07 bellard
4126 1e78bcc1 Alexander Graf
void stl_phys(target_phys_addr_t addr, uint32_t val)
4127 1e78bcc1 Alexander Graf
{
4128 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4129 1e78bcc1 Alexander Graf
}
4130 1e78bcc1 Alexander Graf
4131 1e78bcc1 Alexander Graf
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4132 1e78bcc1 Alexander Graf
{
4133 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4134 1e78bcc1 Alexander Graf
}
4135 1e78bcc1 Alexander Graf
4136 1e78bcc1 Alexander Graf
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4137 1e78bcc1 Alexander Graf
{
4138 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4139 1e78bcc1 Alexander Graf
}
4140 1e78bcc1 Alexander Graf
4141 aab33094 bellard
/* XXX: optimize */
4142 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4143 aab33094 bellard
{
4144 aab33094 bellard
    uint8_t v = val;
4145 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4146 aab33094 bellard
}
4147 aab33094 bellard
4148 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4149 1e78bcc1 Alexander Graf
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4150 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4151 aab33094 bellard
{
4152 733f0b02 Michael S. Tsirkin
    int io_index;
4153 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4154 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4155 f1f6e3b8 Avi Kivity
    PhysPageDesc p;
4156 733f0b02 Michael S. Tsirkin
4157 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4158 f1f6e3b8 Avi Kivity
    pd = p.phys_offset;
4159 733f0b02 Michael S. Tsirkin
4160 0e0df1e2 Avi Kivity
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4161 11c7ef0c Avi Kivity
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4162 f1f6e3b8 Avi Kivity
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4163 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4164 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4165 1e78bcc1 Alexander Graf
            val = bswap16(val);
4166 1e78bcc1 Alexander Graf
        }
4167 1e78bcc1 Alexander Graf
#else
4168 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4169 1e78bcc1 Alexander Graf
            val = bswap16(val);
4170 1e78bcc1 Alexander Graf
        }
4171 1e78bcc1 Alexander Graf
#endif
4172 acbbec5d Avi Kivity
        io_mem_write(io_index, addr, val, 2);
4173 733f0b02 Michael S. Tsirkin
    } else {
4174 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4175 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4176 733f0b02 Michael S. Tsirkin
        /* RAM case */
4177 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4178 1e78bcc1 Alexander Graf
        switch (endian) {
4179 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4180 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
4181 1e78bcc1 Alexander Graf
            break;
4182 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4183 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
4184 1e78bcc1 Alexander Graf
            break;
4185 1e78bcc1 Alexander Graf
        default:
4186 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
4187 1e78bcc1 Alexander Graf
            break;
4188 1e78bcc1 Alexander Graf
        }
4189 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4190 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4191 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4192 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4193 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4194 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4195 733f0b02 Michael S. Tsirkin
        }
4196 733f0b02 Michael S. Tsirkin
    }
4197 aab33094 bellard
}
4198 aab33094 bellard
4199 1e78bcc1 Alexander Graf
void stw_phys(target_phys_addr_t addr, uint32_t val)
4200 1e78bcc1 Alexander Graf
{
4201 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4202 1e78bcc1 Alexander Graf
}
4203 1e78bcc1 Alexander Graf
4204 1e78bcc1 Alexander Graf
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4205 1e78bcc1 Alexander Graf
{
4206 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4207 1e78bcc1 Alexander Graf
}
4208 1e78bcc1 Alexander Graf
4209 1e78bcc1 Alexander Graf
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4210 1e78bcc1 Alexander Graf
{
4211 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4212 1e78bcc1 Alexander Graf
}
4213 1e78bcc1 Alexander Graf
4214 aab33094 bellard
/* XXX: optimize */
4215 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4216 aab33094 bellard
{
4217 aab33094 bellard
    val = tswap64(val);
4218 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4219 aab33094 bellard
}
4220 aab33094 bellard
4221 1e78bcc1 Alexander Graf
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4222 1e78bcc1 Alexander Graf
{
4223 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
4224 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4225 1e78bcc1 Alexander Graf
}
4226 1e78bcc1 Alexander Graf
4227 1e78bcc1 Alexander Graf
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4228 1e78bcc1 Alexander Graf
{
4229 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
4230 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4231 1e78bcc1 Alexander Graf
}
4232 1e78bcc1 Alexander Graf
4233 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4234 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4235 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4236 13eb76e0 bellard
{
4237 13eb76e0 bellard
    int l;
4238 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4239 9b3c35e0 j_mayer
    target_ulong page;
4240 13eb76e0 bellard
4241 13eb76e0 bellard
    while (len > 0) {
4242 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4243 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4244 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4245 13eb76e0 bellard
        if (phys_addr == -1)
4246 13eb76e0 bellard
            return -1;
4247 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4248 13eb76e0 bellard
        if (l > len)
4249 13eb76e0 bellard
            l = len;
4250 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4251 5e2972fd aliguori
        if (is_write)
4252 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4253 5e2972fd aliguori
        else
4254 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4255 13eb76e0 bellard
        len -= l;
4256 13eb76e0 bellard
        buf += l;
4257 13eb76e0 bellard
        addr += l;
4258 13eb76e0 bellard
    }
4259 13eb76e0 bellard
    return 0;
4260 13eb76e0 bellard
}
4261 a68fe89c Paul Brook
#endif
4262 13eb76e0 bellard
4263 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4264 2e70f6ef pbrook
   must be at the end of the TB */
4265 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4266 2e70f6ef pbrook
{
4267 2e70f6ef pbrook
    TranslationBlock *tb;
4268 2e70f6ef pbrook
    uint32_t n, cflags;
4269 2e70f6ef pbrook
    target_ulong pc, cs_base;
4270 2e70f6ef pbrook
    uint64_t flags;
4271 2e70f6ef pbrook
4272 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4273 2e70f6ef pbrook
    if (!tb) {
4274 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4275 2e70f6ef pbrook
                  retaddr);
4276 2e70f6ef pbrook
    }
4277 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4278 618ba8e6 Stefan Weil
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4279 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4280 bf20dc07 ths
       occurred.  */
4281 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4282 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4283 2e70f6ef pbrook
    n++;
4284 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4285 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4286 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4287 2e70f6ef pbrook
       branch.  */
4288 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4289 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4290 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4291 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4292 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4293 2e70f6ef pbrook
    }
4294 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4295 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4296 2e70f6ef pbrook
            && n > 1) {
4297 2e70f6ef pbrook
        env->pc -= 2;
4298 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4299 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4300 2e70f6ef pbrook
    }
4301 2e70f6ef pbrook
#endif
4302 2e70f6ef pbrook
    /* This should never happen.  */
4303 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4304 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4305 2e70f6ef pbrook
4306 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4307 2e70f6ef pbrook
    pc = tb->pc;
4308 2e70f6ef pbrook
    cs_base = tb->cs_base;
4309 2e70f6ef pbrook
    flags = tb->flags;
4310 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4311 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4312 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4313 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4314 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4315 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4316 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4317 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4318 2e70f6ef pbrook
       second new TB.  */
4319 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4320 2e70f6ef pbrook
}
4321 2e70f6ef pbrook
4322 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4323 b3755a91 Paul Brook
4324 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4325 e3db7226 bellard
{
4326 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4327 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4328 e3db7226 bellard
    TranslationBlock *tb;
4329 3b46e624 ths
4330 e3db7226 bellard
    target_code_size = 0;
4331 e3db7226 bellard
    max_target_code_size = 0;
4332 e3db7226 bellard
    cross_page = 0;
4333 e3db7226 bellard
    direct_jmp_count = 0;
4334 e3db7226 bellard
    direct_jmp2_count = 0;
4335 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4336 e3db7226 bellard
        tb = &tbs[i];
4337 e3db7226 bellard
        target_code_size += tb->size;
4338 e3db7226 bellard
        if (tb->size > max_target_code_size)
4339 e3db7226 bellard
            max_target_code_size = tb->size;
4340 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4341 e3db7226 bellard
            cross_page++;
4342 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4343 e3db7226 bellard
            direct_jmp_count++;
4344 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4345 e3db7226 bellard
                direct_jmp2_count++;
4346 e3db7226 bellard
            }
4347 e3db7226 bellard
        }
4348 e3db7226 bellard
    }
4349 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4350 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4351 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4352 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4353 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4354 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4355 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4356 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4357 e3db7226 bellard
                max_target_code_size);
4358 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4359 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4360 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4361 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4362 5fafdf24 ths
            cross_page,
4363 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4364 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4365 5fafdf24 ths
                direct_jmp_count,
4366 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4367 e3db7226 bellard
                direct_jmp2_count,
4368 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4369 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4370 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4371 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4372 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4373 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4374 e3db7226 bellard
}
4375 e3db7226 bellard
4376 d39e8222 Avi Kivity
/* NOTE: this function can trigger an exception */
4377 d39e8222 Avi Kivity
/* NOTE2: the returned address is not exactly the physical address: it
4378 d39e8222 Avi Kivity
   is the offset relative to phys_ram_base */
4379 d39e8222 Avi Kivity
tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4380 d39e8222 Avi Kivity
{
4381 d39e8222 Avi Kivity
    int mmu_idx, page_index, pd;
4382 d39e8222 Avi Kivity
    void *p;
4383 d39e8222 Avi Kivity
4384 d39e8222 Avi Kivity
    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4385 d39e8222 Avi Kivity
    mmu_idx = cpu_mmu_index(env1);
4386 d39e8222 Avi Kivity
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4387 d39e8222 Avi Kivity
                 (addr & TARGET_PAGE_MASK))) {
4388 d39e8222 Avi Kivity
        ldub_code(addr);
4389 d39e8222 Avi Kivity
    }
4390 d39e8222 Avi Kivity
    pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4391 0e0df1e2 Avi Kivity
    if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4392 75c578dc Avi Kivity
        && !is_romd(pd)) {
4393 d39e8222 Avi Kivity
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4394 d39e8222 Avi Kivity
        cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4395 d39e8222 Avi Kivity
#else
4396 d39e8222 Avi Kivity
        cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4397 d39e8222 Avi Kivity
#endif
4398 d39e8222 Avi Kivity
    }
4399 d39e8222 Avi Kivity
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4400 d39e8222 Avi Kivity
    return qemu_ram_addr_from_host_nofail(p);
4401 d39e8222 Avi Kivity
}
4402 d39e8222 Avi Kivity
4403 82afa586 Benjamin Herrenschmidt
/*
4404 82afa586 Benjamin Herrenschmidt
 * A helper function for the _utterly broken_ virtio device model to find out if
4405 82afa586 Benjamin Herrenschmidt
 * it's running on a big endian machine. Don't do this at home kids!
4406 82afa586 Benjamin Herrenschmidt
 */
4407 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void);
4408 82afa586 Benjamin Herrenschmidt
bool virtio_is_big_endian(void)
4409 82afa586 Benjamin Herrenschmidt
{
4410 82afa586 Benjamin Herrenschmidt
#if defined(TARGET_WORDS_BIGENDIAN)
4411 82afa586 Benjamin Herrenschmidt
    return true;
4412 82afa586 Benjamin Herrenschmidt
#else
4413 82afa586 Benjamin Herrenschmidt
    return false;
4414 82afa586 Benjamin Herrenschmidt
#endif
4415 82afa586 Benjamin Herrenschmidt
}
4416 82afa586 Benjamin Herrenschmidt
4417 61382a50 bellard
#define MMUSUFFIX _cmmu
4418 3917149d Blue Swirl
#undef GETPC
4419 61382a50 bellard
#define GETPC() NULL
4420 61382a50 bellard
#define env cpu_single_env
4421 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4422 61382a50 bellard
4423 61382a50 bellard
#define SHIFT 0
4424 61382a50 bellard
#include "softmmu_template.h"
4425 61382a50 bellard
4426 61382a50 bellard
#define SHIFT 1
4427 61382a50 bellard
#include "softmmu_template.h"
4428 61382a50 bellard
4429 61382a50 bellard
#define SHIFT 2
4430 61382a50 bellard
#include "softmmu_template.h"
4431 61382a50 bellard
4432 61382a50 bellard
#define SHIFT 3
4433 61382a50 bellard
#include "softmmu_template.h"
4434 61382a50 bellard
4435 61382a50 bellard
#undef env
4436 61382a50 bellard
4437 61382a50 bellard
#endif