Statistics
| Branch: | Revision:

root / exec.c @ 9002ec79

History | View | Annotate | Download (119.4 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 29e922b6 Blue Swirl
#include "qemu-timer.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 fd052bf6 Riku Voipio
#include <signal.h>
45 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 f01576f1 Juergen Lock
#include <sys/param.h>
47 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
48 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
49 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
50 f01576f1 Juergen Lock
#include <sys/time.h>
51 f01576f1 Juergen Lock
#include <sys/proc.h>
52 f01576f1 Juergen Lock
#include <machine/profile.h>
53 f01576f1 Juergen Lock
#define _KERNEL
54 f01576f1 Juergen Lock
#include <sys/user.h>
55 f01576f1 Juergen Lock
#undef _KERNEL
56 f01576f1 Juergen Lock
#undef sigqueue
57 f01576f1 Juergen Lock
#include <libutil.h>
58 f01576f1 Juergen Lock
#endif
59 f01576f1 Juergen Lock
#endif
60 53a5960a pbrook
#endif
61 54936004 bellard
62 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
63 66e85a21 bellard
//#define DEBUG_FLUSH
64 9fa3e853 bellard
//#define DEBUG_TLB
65 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
66 fd6ce8f6 bellard
67 fd6ce8f6 bellard
/* make various TB consistency checks */
68 5fafdf24 ths
//#define DEBUG_TB_CHECK
69 5fafdf24 ths
//#define DEBUG_TLB_CHECK
70 fd6ce8f6 bellard
71 1196be37 ths
//#define DEBUG_IOPORT
72 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
73 1196be37 ths
74 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
75 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
76 99773bd4 pbrook
#undef DEBUG_TB_CHECK
77 99773bd4 pbrook
#endif
78 99773bd4 pbrook
79 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
80 9fa3e853 bellard
81 bdaf78e0 blueswir1
static TranslationBlock *tbs;
82 26a5f13b bellard
int code_gen_max_blocks;
83 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 bdaf78e0 blueswir1
static int nb_tbs;
85 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
86 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 fd6ce8f6 bellard
88 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
89 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
91 d03d860b blueswir1
 section close to code segment. */
92 d03d860b blueswir1
#define code_gen_section                                \
93 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
94 d03d860b blueswir1
    __attribute__((aligned (32)))
95 f8e2af11 Stefan Weil
#elif defined(_WIN32)
96 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 94a6b54f pbrook
typedef struct RAMBlock {
117 94a6b54f pbrook
    uint8_t *host;
118 c227f099 Anthony Liguori
    ram_addr_t offset;
119 c227f099 Anthony Liguori
    ram_addr_t length;
120 94a6b54f pbrook
    struct RAMBlock *next;
121 94a6b54f pbrook
} RAMBlock;
122 94a6b54f pbrook
123 94a6b54f pbrook
static RAMBlock *ram_blocks;
124 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
126 94a6b54f pbrook
   of this variable will break.  */
127 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
128 e2eef170 pbrook
#endif
129 9fa3e853 bellard
130 6a00d601 bellard
CPUState *first_cpu;
131 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
132 6a00d601 bellard
   cpu_exec() */
133 5fafdf24 ths
CPUState *cpu_single_env;
134 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
135 bf20dc07 ths
   1 = Precise instruction counting.
136 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
137 2e70f6ef pbrook
int use_icount = 0;
138 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
139 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
140 2e70f6ef pbrook
int64_t qemu_icount;
141 6a00d601 bellard
142 54936004 bellard
typedef struct PageDesc {
143 92e873b9 bellard
    /* list of TBs intersecting this ram page */
144 fd6ce8f6 bellard
    TranslationBlock *first_tb;
145 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
146 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
147 9fa3e853 bellard
    unsigned int code_write_count;
148 9fa3e853 bellard
    uint8_t *code_bitmap;
149 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
150 9fa3e853 bellard
    unsigned long flags;
151 9fa3e853 bellard
#endif
152 54936004 bellard
} PageDesc;
153 54936004 bellard
154 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
155 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
156 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
157 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
159 41c1b1c9 Paul Brook
#else
160 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
161 41c1b1c9 Paul Brook
#endif
162 bedb69ea j_mayer
#else
163 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
164 bedb69ea j_mayer
#endif
165 54936004 bellard
166 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
167 5cd2c5b6 Richard Henderson
#define L2_BITS 10
168 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
169 54936004 bellard
170 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
171 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
172 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
174 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
177 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
178 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
179 5cd2c5b6 Richard Henderson
#else
180 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
181 5cd2c5b6 Richard Henderson
#endif
182 5cd2c5b6 Richard Henderson
183 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
184 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
185 5cd2c5b6 Richard Henderson
#else
186 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
187 5cd2c5b6 Richard Henderson
#endif
188 5cd2c5b6 Richard Henderson
189 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
190 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
191 5cd2c5b6 Richard Henderson
192 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194 5cd2c5b6 Richard Henderson
195 83fb7adf bellard
unsigned long qemu_real_host_page_size;
196 83fb7adf bellard
unsigned long qemu_host_page_bits;
197 83fb7adf bellard
unsigned long qemu_host_page_size;
198 83fb7adf bellard
unsigned long qemu_host_page_mask;
199 54936004 bellard
200 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
201 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
202 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
203 54936004 bellard
204 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
205 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
206 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
207 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
208 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
209 41c1b1c9 Paul Brook
} PhysPageDesc;
210 41c1b1c9 Paul Brook
211 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
212 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
213 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
214 6d9a1304 Paul Brook
215 e2eef170 pbrook
static void io_mem_init(void);
216 e2eef170 pbrook
217 33417e70 bellard
/* io memory support */
218 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
222 6658ffb8 pbrook
static int io_mem_watch;
223 6658ffb8 pbrook
#endif
224 33417e70 bellard
225 34865134 bellard
/* log support */
226 1e8b27ca Juha Riihimรคki
#ifdef WIN32
227 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
228 1e8b27ca Juha Riihimรคki
#else
229 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
230 1e8b27ca Juha Riihimรคki
#endif
231 34865134 bellard
FILE *logfile;
232 34865134 bellard
int loglevel;
233 e735b91c pbrook
static int log_append = 0;
234 34865134 bellard
235 e3db7226 bellard
/* statistics */
236 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
237 e3db7226 bellard
static int tlb_flush_count;
238 b3755a91 Paul Brook
#endif
239 e3db7226 bellard
static int tb_flush_count;
240 e3db7226 bellard
static int tb_phys_invalidate_count;
241 e3db7226 bellard
242 7cb69cae bellard
#ifdef _WIN32
243 7cb69cae bellard
static void map_exec(void *addr, long size)
244 7cb69cae bellard
{
245 7cb69cae bellard
    DWORD old_protect;
246 7cb69cae bellard
    VirtualProtect(addr, size,
247 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
248 7cb69cae bellard
    
249 7cb69cae bellard
}
250 7cb69cae bellard
#else
251 7cb69cae bellard
static void map_exec(void *addr, long size)
252 7cb69cae bellard
{
253 4369415f bellard
    unsigned long start, end, page_size;
254 7cb69cae bellard
    
255 4369415f bellard
    page_size = getpagesize();
256 7cb69cae bellard
    start = (unsigned long)addr;
257 4369415f bellard
    start &= ~(page_size - 1);
258 7cb69cae bellard
    
259 7cb69cae bellard
    end = (unsigned long)addr + size;
260 4369415f bellard
    end += page_size - 1;
261 4369415f bellard
    end &= ~(page_size - 1);
262 7cb69cae bellard
    
263 7cb69cae bellard
    mprotect((void *)start, end - start,
264 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
265 7cb69cae bellard
}
266 7cb69cae bellard
#endif
267 7cb69cae bellard
268 b346ff46 bellard
static void page_init(void)
269 54936004 bellard
{
270 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
271 54936004 bellard
       TARGET_PAGE_SIZE */
272 c2b48b69 aliguori
#ifdef _WIN32
273 c2b48b69 aliguori
    {
274 c2b48b69 aliguori
        SYSTEM_INFO system_info;
275 c2b48b69 aliguori
276 c2b48b69 aliguori
        GetSystemInfo(&system_info);
277 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
278 c2b48b69 aliguori
    }
279 c2b48b69 aliguori
#else
280 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
281 c2b48b69 aliguori
#endif
282 83fb7adf bellard
    if (qemu_host_page_size == 0)
283 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
284 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
286 83fb7adf bellard
    qemu_host_page_bits = 0;
287 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 83fb7adf bellard
        qemu_host_page_bits++;
289 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
290 50a9569b balrog
291 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
292 50a9569b balrog
    {
293 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
294 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
295 f01576f1 Juergen Lock
        int i, cnt;
296 f01576f1 Juergen Lock
297 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
298 f01576f1 Juergen Lock
        if (freep) {
299 f01576f1 Juergen Lock
            mmap_lock();
300 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
301 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
302 f01576f1 Juergen Lock
303 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
304 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
305 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
306 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307 f01576f1 Juergen Lock
308 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
309 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
310 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
311 f01576f1 Juergen Lock
                    } else {
312 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313 f01576f1 Juergen Lock
                        endaddr = ~0ul;
314 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
315 f01576f1 Juergen Lock
#endif
316 f01576f1 Juergen Lock
                    }
317 f01576f1 Juergen Lock
                }
318 f01576f1 Juergen Lock
            }
319 f01576f1 Juergen Lock
            free(freep);
320 f01576f1 Juergen Lock
            mmap_unlock();
321 f01576f1 Juergen Lock
        }
322 f01576f1 Juergen Lock
#else
323 50a9569b balrog
        FILE *f;
324 50a9569b balrog
325 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
326 5cd2c5b6 Richard Henderson
327 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
328 50a9569b balrog
        if (f) {
329 5cd2c5b6 Richard Henderson
            mmap_lock();
330 5cd2c5b6 Richard Henderson
331 50a9569b balrog
            do {
332 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
333 5cd2c5b6 Richard Henderson
                int n;
334 5cd2c5b6 Richard Henderson
335 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
336 5cd2c5b6 Richard Henderson
337 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
338 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
339 5cd2c5b6 Richard Henderson
340 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
341 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
342 5cd2c5b6 Richard Henderson
                    } else {
343 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
344 5cd2c5b6 Richard Henderson
                    }
345 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
346 50a9569b balrog
                }
347 50a9569b balrog
            } while (!feof(f));
348 5cd2c5b6 Richard Henderson
349 50a9569b balrog
            fclose(f);
350 5cd2c5b6 Richard Henderson
            mmap_unlock();
351 50a9569b balrog
        }
352 f01576f1 Juergen Lock
#endif
353 50a9569b balrog
    }
354 50a9569b balrog
#endif
355 54936004 bellard
}
356 54936004 bellard
357 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
358 54936004 bellard
{
359 41c1b1c9 Paul Brook
    PageDesc *pd;
360 41c1b1c9 Paul Brook
    void **lp;
361 41c1b1c9 Paul Brook
    int i;
362 41c1b1c9 Paul Brook
363 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
364 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
365 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
366 5cd2c5b6 Richard Henderson
    do {                                                \
367 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
368 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
369 5cd2c5b6 Richard Henderson
    } while (0)
370 5cd2c5b6 Richard Henderson
#else
371 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
372 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
373 17e2377a pbrook
#endif
374 434929bf aliguori
375 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
376 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
377 5cd2c5b6 Richard Henderson
378 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
379 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
380 5cd2c5b6 Richard Henderson
        void **p = *lp;
381 5cd2c5b6 Richard Henderson
382 5cd2c5b6 Richard Henderson
        if (p == NULL) {
383 5cd2c5b6 Richard Henderson
            if (!alloc) {
384 5cd2c5b6 Richard Henderson
                return NULL;
385 5cd2c5b6 Richard Henderson
            }
386 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
387 5cd2c5b6 Richard Henderson
            *lp = p;
388 17e2377a pbrook
        }
389 5cd2c5b6 Richard Henderson
390 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
391 5cd2c5b6 Richard Henderson
    }
392 5cd2c5b6 Richard Henderson
393 5cd2c5b6 Richard Henderson
    pd = *lp;
394 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
395 5cd2c5b6 Richard Henderson
        if (!alloc) {
396 5cd2c5b6 Richard Henderson
            return NULL;
397 5cd2c5b6 Richard Henderson
        }
398 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
399 5cd2c5b6 Richard Henderson
        *lp = pd;
400 54936004 bellard
    }
401 5cd2c5b6 Richard Henderson
402 5cd2c5b6 Richard Henderson
#undef ALLOC
403 5cd2c5b6 Richard Henderson
404 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
405 54936004 bellard
}
406 54936004 bellard
407 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
408 54936004 bellard
{
409 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
410 fd6ce8f6 bellard
}
411 fd6ce8f6 bellard
412 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
413 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
414 92e873b9 bellard
{
415 e3f4e2a4 pbrook
    PhysPageDesc *pd;
416 5cd2c5b6 Richard Henderson
    void **lp;
417 5cd2c5b6 Richard Henderson
    int i;
418 92e873b9 bellard
419 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
420 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
421 108c49b8 bellard
422 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
423 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
424 5cd2c5b6 Richard Henderson
        void **p = *lp;
425 5cd2c5b6 Richard Henderson
        if (p == NULL) {
426 5cd2c5b6 Richard Henderson
            if (!alloc) {
427 5cd2c5b6 Richard Henderson
                return NULL;
428 5cd2c5b6 Richard Henderson
            }
429 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
430 5cd2c5b6 Richard Henderson
        }
431 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
432 108c49b8 bellard
    }
433 5cd2c5b6 Richard Henderson
434 e3f4e2a4 pbrook
    pd = *lp;
435 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
436 e3f4e2a4 pbrook
        int i;
437 5cd2c5b6 Richard Henderson
438 5cd2c5b6 Richard Henderson
        if (!alloc) {
439 108c49b8 bellard
            return NULL;
440 5cd2c5b6 Richard Henderson
        }
441 5cd2c5b6 Richard Henderson
442 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
443 5cd2c5b6 Richard Henderson
444 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
445 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
446 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
447 67c4d23c pbrook
        }
448 92e873b9 bellard
    }
449 5cd2c5b6 Richard Henderson
450 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
451 92e873b9 bellard
}
452 92e873b9 bellard
453 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
454 92e873b9 bellard
{
455 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
456 92e873b9 bellard
}
457 92e873b9 bellard
458 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
459 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
460 3a7d929e bellard
                                    target_ulong vaddr);
461 c8a706fe pbrook
#define mmap_lock() do { } while(0)
462 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
463 9fa3e853 bellard
#endif
464 fd6ce8f6 bellard
465 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466 4369415f bellard
467 4369415f bellard
#if defined(CONFIG_USER_ONLY)
468 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
469 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
470 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
471 4369415f bellard
#endif
472 4369415f bellard
473 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
474 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
476 4369415f bellard
#endif
477 4369415f bellard
478 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
479 26a5f13b bellard
{
480 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
481 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
482 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
484 4369415f bellard
#else
485 26a5f13b bellard
    code_gen_buffer_size = tb_size;
486 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
487 4369415f bellard
#if defined(CONFIG_USER_ONLY)
488 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
489 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
490 4369415f bellard
#else
491 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
492 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
493 4369415f bellard
#endif
494 26a5f13b bellard
    }
495 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
496 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
497 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
498 26a5f13b bellard
       the host cpu and OS */
499 26a5f13b bellard
#if defined(__linux__) 
500 26a5f13b bellard
    {
501 26a5f13b bellard
        int flags;
502 141ac468 blueswir1
        void *start = NULL;
503 141ac468 blueswir1
504 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
505 26a5f13b bellard
#if defined(__x86_64__)
506 26a5f13b bellard
        flags |= MAP_32BIT;
507 26a5f13b bellard
        /* Cannot map more than that */
508 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
509 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
510 141ac468 blueswir1
#elif defined(__sparc_v9__)
511 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
512 141ac468 blueswir1
        flags |= MAP_FIXED;
513 141ac468 blueswir1
        start = (void *) 0x60000000UL;
514 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
515 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
516 1cb0661e balrog
#elif defined(__arm__)
517 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
518 1cb0661e balrog
        flags |= MAP_FIXED;
519 1cb0661e balrog
        start = (void *) 0x01000000UL;
520 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
521 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
522 26a5f13b bellard
#endif
523 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
524 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
525 26a5f13b bellard
                               flags, -1, 0);
526 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
527 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
528 26a5f13b bellard
            exit(1);
529 26a5f13b bellard
        }
530 26a5f13b bellard
    }
531 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
532 06e67a82 aliguori
    {
533 06e67a82 aliguori
        int flags;
534 06e67a82 aliguori
        void *addr = NULL;
535 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
536 06e67a82 aliguori
#if defined(__x86_64__)
537 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
538 06e67a82 aliguori
         * 0x40000000 is free */
539 06e67a82 aliguori
        flags |= MAP_FIXED;
540 06e67a82 aliguori
        addr = (void *)0x40000000;
541 06e67a82 aliguori
        /* Cannot map more than that */
542 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
543 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
544 06e67a82 aliguori
#endif
545 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
547 06e67a82 aliguori
                               flags, -1, 0);
548 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
549 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 06e67a82 aliguori
            exit(1);
551 06e67a82 aliguori
        }
552 06e67a82 aliguori
    }
553 26a5f13b bellard
#else
554 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
555 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
556 26a5f13b bellard
#endif
557 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
560 26a5f13b bellard
        code_gen_max_block_size();
561 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563 26a5f13b bellard
}
564 26a5f13b bellard
565 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
567 26a5f13b bellard
   size. */
568 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
569 26a5f13b bellard
{
570 26a5f13b bellard
    cpu_gen_init();
571 26a5f13b bellard
    code_gen_alloc(tb_size);
572 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
573 4369415f bellard
    page_init();
574 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
575 26a5f13b bellard
    io_mem_init();
576 e2eef170 pbrook
#endif
577 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
579 9002ec79 Richard Henderson
       initialize the prologue now.  */
580 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
581 9002ec79 Richard Henderson
#endif
582 26a5f13b bellard
}
583 26a5f13b bellard
584 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585 9656f324 pbrook
586 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
587 e7f4eff7 Juan Quintela
{
588 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
589 9656f324 pbrook
590 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 3098dba0 aurel32
       version_id is increased. */
592 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
593 9656f324 pbrook
    tlb_flush(env, 1);
594 9656f324 pbrook
595 9656f324 pbrook
    return 0;
596 9656f324 pbrook
}
597 e7f4eff7 Juan Quintela
598 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
599 e7f4eff7 Juan Quintela
    .name = "cpu_common",
600 e7f4eff7 Juan Quintela
    .version_id = 1,
601 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
602 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
603 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
604 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
605 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
606 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
607 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
608 e7f4eff7 Juan Quintela
    }
609 e7f4eff7 Juan Quintela
};
610 9656f324 pbrook
#endif
611 9656f324 pbrook
612 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
613 950f1472 Glauber Costa
{
614 950f1472 Glauber Costa
    CPUState *env = first_cpu;
615 950f1472 Glauber Costa
616 950f1472 Glauber Costa
    while (env) {
617 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
618 950f1472 Glauber Costa
            break;
619 950f1472 Glauber Costa
        env = env->next_cpu;
620 950f1472 Glauber Costa
    }
621 950f1472 Glauber Costa
622 950f1472 Glauber Costa
    return env;
623 950f1472 Glauber Costa
}
624 950f1472 Glauber Costa
625 6a00d601 bellard
void cpu_exec_init(CPUState *env)
626 fd6ce8f6 bellard
{
627 6a00d601 bellard
    CPUState **penv;
628 6a00d601 bellard
    int cpu_index;
629 6a00d601 bellard
630 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
631 c2764719 pbrook
    cpu_list_lock();
632 c2764719 pbrook
#endif
633 6a00d601 bellard
    env->next_cpu = NULL;
634 6a00d601 bellard
    penv = &first_cpu;
635 6a00d601 bellard
    cpu_index = 0;
636 6a00d601 bellard
    while (*penv != NULL) {
637 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
638 6a00d601 bellard
        cpu_index++;
639 6a00d601 bellard
    }
640 6a00d601 bellard
    env->cpu_index = cpu_index;
641 268a362c aliguori
    env->numa_node = 0;
642 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
643 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
644 6a00d601 bellard
    *penv = env;
645 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
646 c2764719 pbrook
    cpu_list_unlock();
647 c2764719 pbrook
#endif
648 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
650 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651 b3c7724c pbrook
                    cpu_save, cpu_load, env);
652 b3c7724c pbrook
#endif
653 fd6ce8f6 bellard
}
654 fd6ce8f6 bellard
655 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
656 9fa3e853 bellard
{
657 9fa3e853 bellard
    if (p->code_bitmap) {
658 59817ccb bellard
        qemu_free(p->code_bitmap);
659 9fa3e853 bellard
        p->code_bitmap = NULL;
660 9fa3e853 bellard
    }
661 9fa3e853 bellard
    p->code_write_count = 0;
662 9fa3e853 bellard
}
663 9fa3e853 bellard
664 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
665 5cd2c5b6 Richard Henderson
666 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
667 fd6ce8f6 bellard
{
668 5cd2c5b6 Richard Henderson
    int i;
669 fd6ce8f6 bellard
670 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
671 5cd2c5b6 Richard Henderson
        return;
672 5cd2c5b6 Richard Henderson
    }
673 5cd2c5b6 Richard Henderson
    if (level == 0) {
674 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
675 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
676 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
677 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
678 fd6ce8f6 bellard
        }
679 5cd2c5b6 Richard Henderson
    } else {
680 5cd2c5b6 Richard Henderson
        void **pp = *lp;
681 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
682 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
683 5cd2c5b6 Richard Henderson
        }
684 5cd2c5b6 Richard Henderson
    }
685 5cd2c5b6 Richard Henderson
}
686 5cd2c5b6 Richard Henderson
687 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
688 5cd2c5b6 Richard Henderson
{
689 5cd2c5b6 Richard Henderson
    int i;
690 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
691 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
692 fd6ce8f6 bellard
    }
693 fd6ce8f6 bellard
}
694 fd6ce8f6 bellard
695 fd6ce8f6 bellard
/* flush all the translation blocks */
696 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
697 6a00d601 bellard
void tb_flush(CPUState *env1)
698 fd6ce8f6 bellard
{
699 6a00d601 bellard
    CPUState *env;
700 0124311e bellard
#if defined(DEBUG_FLUSH)
701 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
702 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
703 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
704 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
705 fd6ce8f6 bellard
#endif
706 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
707 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
708 a208e54a pbrook
709 fd6ce8f6 bellard
    nb_tbs = 0;
710 3b46e624 ths
711 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
712 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
713 6a00d601 bellard
    }
714 9fa3e853 bellard
715 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
716 fd6ce8f6 bellard
    page_flush_tb();
717 9fa3e853 bellard
718 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
719 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
720 d4e8164f bellard
       expensive */
721 e3db7226 bellard
    tb_flush_count++;
722 fd6ce8f6 bellard
}
723 fd6ce8f6 bellard
724 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
725 fd6ce8f6 bellard
726 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
727 fd6ce8f6 bellard
{
728 fd6ce8f6 bellard
    TranslationBlock *tb;
729 fd6ce8f6 bellard
    int i;
730 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
731 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
732 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
733 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
734 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
735 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
736 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
737 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
738 fd6ce8f6 bellard
            }
739 fd6ce8f6 bellard
        }
740 fd6ce8f6 bellard
    }
741 fd6ce8f6 bellard
}
742 fd6ce8f6 bellard
743 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
744 fd6ce8f6 bellard
static void tb_page_check(void)
745 fd6ce8f6 bellard
{
746 fd6ce8f6 bellard
    TranslationBlock *tb;
747 fd6ce8f6 bellard
    int i, flags1, flags2;
748 3b46e624 ths
749 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
750 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
751 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
752 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
753 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
754 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
755 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
756 fd6ce8f6 bellard
            }
757 fd6ce8f6 bellard
        }
758 fd6ce8f6 bellard
    }
759 fd6ce8f6 bellard
}
760 fd6ce8f6 bellard
761 fd6ce8f6 bellard
#endif
762 fd6ce8f6 bellard
763 fd6ce8f6 bellard
/* invalidate one TB */
764 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
765 fd6ce8f6 bellard
                             int next_offset)
766 fd6ce8f6 bellard
{
767 fd6ce8f6 bellard
    TranslationBlock *tb1;
768 fd6ce8f6 bellard
    for(;;) {
769 fd6ce8f6 bellard
        tb1 = *ptb;
770 fd6ce8f6 bellard
        if (tb1 == tb) {
771 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
772 fd6ce8f6 bellard
            break;
773 fd6ce8f6 bellard
        }
774 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
775 fd6ce8f6 bellard
    }
776 fd6ce8f6 bellard
}
777 fd6ce8f6 bellard
778 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
779 9fa3e853 bellard
{
780 9fa3e853 bellard
    TranslationBlock *tb1;
781 9fa3e853 bellard
    unsigned int n1;
782 9fa3e853 bellard
783 9fa3e853 bellard
    for(;;) {
784 9fa3e853 bellard
        tb1 = *ptb;
785 9fa3e853 bellard
        n1 = (long)tb1 & 3;
786 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
787 9fa3e853 bellard
        if (tb1 == tb) {
788 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
789 9fa3e853 bellard
            break;
790 9fa3e853 bellard
        }
791 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
792 9fa3e853 bellard
    }
793 9fa3e853 bellard
}
794 9fa3e853 bellard
795 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
796 d4e8164f bellard
{
797 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
798 d4e8164f bellard
    unsigned int n1;
799 d4e8164f bellard
800 d4e8164f bellard
    ptb = &tb->jmp_next[n];
801 d4e8164f bellard
    tb1 = *ptb;
802 d4e8164f bellard
    if (tb1) {
803 d4e8164f bellard
        /* find tb(n) in circular list */
804 d4e8164f bellard
        for(;;) {
805 d4e8164f bellard
            tb1 = *ptb;
806 d4e8164f bellard
            n1 = (long)tb1 & 3;
807 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
808 d4e8164f bellard
            if (n1 == n && tb1 == tb)
809 d4e8164f bellard
                break;
810 d4e8164f bellard
            if (n1 == 2) {
811 d4e8164f bellard
                ptb = &tb1->jmp_first;
812 d4e8164f bellard
            } else {
813 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
814 d4e8164f bellard
            }
815 d4e8164f bellard
        }
816 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
817 d4e8164f bellard
        *ptb = tb->jmp_next[n];
818 d4e8164f bellard
819 d4e8164f bellard
        tb->jmp_next[n] = NULL;
820 d4e8164f bellard
    }
821 d4e8164f bellard
}
822 d4e8164f bellard
823 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
824 d4e8164f bellard
   another TB */
825 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
826 d4e8164f bellard
{
827 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
828 d4e8164f bellard
}
829 d4e8164f bellard
830 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
831 fd6ce8f6 bellard
{
832 6a00d601 bellard
    CPUState *env;
833 8a40a180 bellard
    PageDesc *p;
834 d4e8164f bellard
    unsigned int h, n1;
835 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
836 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
837 3b46e624 ths
838 8a40a180 bellard
    /* remove the TB from the hash list */
839 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
840 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
841 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
842 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
843 8a40a180 bellard
844 8a40a180 bellard
    /* remove the TB from the page list */
845 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
846 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
847 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
848 8a40a180 bellard
        invalidate_page_bitmap(p);
849 8a40a180 bellard
    }
850 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
851 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
852 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
853 8a40a180 bellard
        invalidate_page_bitmap(p);
854 8a40a180 bellard
    }
855 8a40a180 bellard
856 36bdbe54 bellard
    tb_invalidated_flag = 1;
857 59817ccb bellard
858 fd6ce8f6 bellard
    /* remove the TB from the hash list */
859 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
860 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
861 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
862 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
863 6a00d601 bellard
    }
864 d4e8164f bellard
865 d4e8164f bellard
    /* suppress this TB from the two jump lists */
866 d4e8164f bellard
    tb_jmp_remove(tb, 0);
867 d4e8164f bellard
    tb_jmp_remove(tb, 1);
868 d4e8164f bellard
869 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
870 d4e8164f bellard
    tb1 = tb->jmp_first;
871 d4e8164f bellard
    for(;;) {
872 d4e8164f bellard
        n1 = (long)tb1 & 3;
873 d4e8164f bellard
        if (n1 == 2)
874 d4e8164f bellard
            break;
875 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
877 d4e8164f bellard
        tb_reset_jump(tb1, n1);
878 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
879 d4e8164f bellard
        tb1 = tb2;
880 d4e8164f bellard
    }
881 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
882 9fa3e853 bellard
883 e3db7226 bellard
    tb_phys_invalidate_count++;
884 9fa3e853 bellard
}
885 9fa3e853 bellard
886 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
887 9fa3e853 bellard
{
888 9fa3e853 bellard
    int end, mask, end1;
889 9fa3e853 bellard
890 9fa3e853 bellard
    end = start + len;
891 9fa3e853 bellard
    tab += start >> 3;
892 9fa3e853 bellard
    mask = 0xff << (start & 7);
893 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
894 9fa3e853 bellard
        if (start < end) {
895 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
896 9fa3e853 bellard
            *tab |= mask;
897 9fa3e853 bellard
        }
898 9fa3e853 bellard
    } else {
899 9fa3e853 bellard
        *tab++ |= mask;
900 9fa3e853 bellard
        start = (start + 8) & ~7;
901 9fa3e853 bellard
        end1 = end & ~7;
902 9fa3e853 bellard
        while (start < end1) {
903 9fa3e853 bellard
            *tab++ = 0xff;
904 9fa3e853 bellard
            start += 8;
905 9fa3e853 bellard
        }
906 9fa3e853 bellard
        if (start < end) {
907 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
908 9fa3e853 bellard
            *tab |= mask;
909 9fa3e853 bellard
        }
910 9fa3e853 bellard
    }
911 9fa3e853 bellard
}
912 9fa3e853 bellard
913 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
914 9fa3e853 bellard
{
915 9fa3e853 bellard
    int n, tb_start, tb_end;
916 9fa3e853 bellard
    TranslationBlock *tb;
917 3b46e624 ths
918 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
919 9fa3e853 bellard
920 9fa3e853 bellard
    tb = p->first_tb;
921 9fa3e853 bellard
    while (tb != NULL) {
922 9fa3e853 bellard
        n = (long)tb & 3;
923 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
924 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
925 9fa3e853 bellard
        if (n == 0) {
926 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
927 9fa3e853 bellard
               it is not a problem */
928 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
929 9fa3e853 bellard
            tb_end = tb_start + tb->size;
930 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
931 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
932 9fa3e853 bellard
        } else {
933 9fa3e853 bellard
            tb_start = 0;
934 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
935 9fa3e853 bellard
        }
936 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
937 9fa3e853 bellard
        tb = tb->page_next[n];
938 9fa3e853 bellard
    }
939 9fa3e853 bellard
}
940 9fa3e853 bellard
941 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
942 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
943 2e70f6ef pbrook
                              int flags, int cflags)
944 d720b93d bellard
{
945 d720b93d bellard
    TranslationBlock *tb;
946 d720b93d bellard
    uint8_t *tc_ptr;
947 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
948 41c1b1c9 Paul Brook
    target_ulong virt_page2;
949 d720b93d bellard
    int code_gen_size;
950 d720b93d bellard
951 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
952 c27004ec bellard
    tb = tb_alloc(pc);
953 d720b93d bellard
    if (!tb) {
954 d720b93d bellard
        /* flush must be done */
955 d720b93d bellard
        tb_flush(env);
956 d720b93d bellard
        /* cannot fail at this point */
957 c27004ec bellard
        tb = tb_alloc(pc);
958 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
959 2e70f6ef pbrook
        tb_invalidated_flag = 1;
960 d720b93d bellard
    }
961 d720b93d bellard
    tc_ptr = code_gen_ptr;
962 d720b93d bellard
    tb->tc_ptr = tc_ptr;
963 d720b93d bellard
    tb->cs_base = cs_base;
964 d720b93d bellard
    tb->flags = flags;
965 d720b93d bellard
    tb->cflags = cflags;
966 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
967 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
968 3b46e624 ths
969 d720b93d bellard
    /* check next page if needed */
970 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
971 d720b93d bellard
    phys_page2 = -1;
972 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
973 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
974 d720b93d bellard
    }
975 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
976 2e70f6ef pbrook
    return tb;
977 d720b93d bellard
}
978 3b46e624 ths
979 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
980 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
981 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
982 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
983 d720b93d bellard
   TB if code is modified inside this TB. */
984 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
985 d720b93d bellard
                                   int is_cpu_write_access)
986 d720b93d bellard
{
987 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
988 d720b93d bellard
    CPUState *env = cpu_single_env;
989 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
990 6b917547 aliguori
    PageDesc *p;
991 6b917547 aliguori
    int n;
992 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
993 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
994 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
995 6b917547 aliguori
    int current_tb_modified = 0;
996 6b917547 aliguori
    target_ulong current_pc = 0;
997 6b917547 aliguori
    target_ulong current_cs_base = 0;
998 6b917547 aliguori
    int current_flags = 0;
999 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1000 9fa3e853 bellard
1001 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1002 5fafdf24 ths
    if (!p)
1003 9fa3e853 bellard
        return;
1004 5fafdf24 ths
    if (!p->code_bitmap &&
1005 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1006 d720b93d bellard
        is_cpu_write_access) {
1007 9fa3e853 bellard
        /* build code bitmap */
1008 9fa3e853 bellard
        build_page_bitmap(p);
1009 9fa3e853 bellard
    }
1010 9fa3e853 bellard
1011 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1012 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1013 9fa3e853 bellard
    tb = p->first_tb;
1014 9fa3e853 bellard
    while (tb != NULL) {
1015 9fa3e853 bellard
        n = (long)tb & 3;
1016 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1017 9fa3e853 bellard
        tb_next = tb->page_next[n];
1018 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1019 9fa3e853 bellard
        if (n == 0) {
1020 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1021 9fa3e853 bellard
               it is not a problem */
1022 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1023 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1024 9fa3e853 bellard
        } else {
1025 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1026 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1027 9fa3e853 bellard
        }
1028 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1029 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1030 d720b93d bellard
            if (current_tb_not_found) {
1031 d720b93d bellard
                current_tb_not_found = 0;
1032 d720b93d bellard
                current_tb = NULL;
1033 2e70f6ef pbrook
                if (env->mem_io_pc) {
1034 d720b93d bellard
                    /* now we have a real cpu fault */
1035 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1036 d720b93d bellard
                }
1037 d720b93d bellard
            }
1038 d720b93d bellard
            if (current_tb == tb &&
1039 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1040 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1041 d720b93d bellard
                its execution. We could be more precise by checking
1042 d720b93d bellard
                that the modification is after the current PC, but it
1043 d720b93d bellard
                would require a specialized function to partially
1044 d720b93d bellard
                restore the CPU state */
1045 3b46e624 ths
1046 d720b93d bellard
                current_tb_modified = 1;
1047 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1048 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1049 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1050 6b917547 aliguori
                                     &current_flags);
1051 d720b93d bellard
            }
1052 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1053 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1054 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1055 6f5a9f7e bellard
            saved_tb = NULL;
1056 6f5a9f7e bellard
            if (env) {
1057 6f5a9f7e bellard
                saved_tb = env->current_tb;
1058 6f5a9f7e bellard
                env->current_tb = NULL;
1059 6f5a9f7e bellard
            }
1060 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1061 6f5a9f7e bellard
            if (env) {
1062 6f5a9f7e bellard
                env->current_tb = saved_tb;
1063 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1064 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1065 6f5a9f7e bellard
            }
1066 9fa3e853 bellard
        }
1067 9fa3e853 bellard
        tb = tb_next;
1068 9fa3e853 bellard
    }
1069 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1070 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1071 9fa3e853 bellard
    if (!p->first_tb) {
1072 9fa3e853 bellard
        invalidate_page_bitmap(p);
1073 d720b93d bellard
        if (is_cpu_write_access) {
1074 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1075 d720b93d bellard
        }
1076 d720b93d bellard
    }
1077 d720b93d bellard
#endif
1078 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1079 d720b93d bellard
    if (current_tb_modified) {
1080 d720b93d bellard
        /* we generate a block containing just the instruction
1081 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1082 d720b93d bellard
           itself */
1083 ea1c1802 bellard
        env->current_tb = NULL;
1084 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1085 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1086 9fa3e853 bellard
    }
1087 fd6ce8f6 bellard
#endif
1088 9fa3e853 bellard
}
1089 fd6ce8f6 bellard
1090 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1091 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1092 9fa3e853 bellard
{
1093 9fa3e853 bellard
    PageDesc *p;
1094 9fa3e853 bellard
    int offset, b;
1095 59817ccb bellard
#if 0
1096 a4193c8a bellard
    if (1) {
1097 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1098 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1099 93fcfe39 aliguori
                  cpu_single_env->eip,
1100 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1101 59817ccb bellard
    }
1102 59817ccb bellard
#endif
1103 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1104 5fafdf24 ths
    if (!p)
1105 9fa3e853 bellard
        return;
1106 9fa3e853 bellard
    if (p->code_bitmap) {
1107 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1108 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1109 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1110 9fa3e853 bellard
            goto do_invalidate;
1111 9fa3e853 bellard
    } else {
1112 9fa3e853 bellard
    do_invalidate:
1113 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1114 9fa3e853 bellard
    }
1115 9fa3e853 bellard
}
1116 9fa3e853 bellard
1117 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1118 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1119 d720b93d bellard
                                    unsigned long pc, void *puc)
1120 9fa3e853 bellard
{
1121 6b917547 aliguori
    TranslationBlock *tb;
1122 9fa3e853 bellard
    PageDesc *p;
1123 6b917547 aliguori
    int n;
1124 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1125 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1126 d720b93d bellard
    CPUState *env = cpu_single_env;
1127 6b917547 aliguori
    int current_tb_modified = 0;
1128 6b917547 aliguori
    target_ulong current_pc = 0;
1129 6b917547 aliguori
    target_ulong current_cs_base = 0;
1130 6b917547 aliguori
    int current_flags = 0;
1131 d720b93d bellard
#endif
1132 9fa3e853 bellard
1133 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1134 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1135 5fafdf24 ths
    if (!p)
1136 9fa3e853 bellard
        return;
1137 9fa3e853 bellard
    tb = p->first_tb;
1138 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1139 d720b93d bellard
    if (tb && pc != 0) {
1140 d720b93d bellard
        current_tb = tb_find_pc(pc);
1141 d720b93d bellard
    }
1142 d720b93d bellard
#endif
1143 9fa3e853 bellard
    while (tb != NULL) {
1144 9fa3e853 bellard
        n = (long)tb & 3;
1145 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1146 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1147 d720b93d bellard
        if (current_tb == tb &&
1148 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1149 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1150 d720b93d bellard
                   its execution. We could be more precise by checking
1151 d720b93d bellard
                   that the modification is after the current PC, but it
1152 d720b93d bellard
                   would require a specialized function to partially
1153 d720b93d bellard
                   restore the CPU state */
1154 3b46e624 ths
1155 d720b93d bellard
            current_tb_modified = 1;
1156 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1157 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1158 6b917547 aliguori
                                 &current_flags);
1159 d720b93d bellard
        }
1160 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1161 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1162 9fa3e853 bellard
        tb = tb->page_next[n];
1163 9fa3e853 bellard
    }
1164 fd6ce8f6 bellard
    p->first_tb = NULL;
1165 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1166 d720b93d bellard
    if (current_tb_modified) {
1167 d720b93d bellard
        /* we generate a block containing just the instruction
1168 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1169 d720b93d bellard
           itself */
1170 ea1c1802 bellard
        env->current_tb = NULL;
1171 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1172 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1173 d720b93d bellard
    }
1174 d720b93d bellard
#endif
1175 fd6ce8f6 bellard
}
1176 9fa3e853 bellard
#endif
1177 fd6ce8f6 bellard
1178 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1179 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1180 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1181 fd6ce8f6 bellard
{
1182 fd6ce8f6 bellard
    PageDesc *p;
1183 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1184 9fa3e853 bellard
1185 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1186 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1187 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1188 9fa3e853 bellard
    last_first_tb = p->first_tb;
1189 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1190 9fa3e853 bellard
    invalidate_page_bitmap(p);
1191 fd6ce8f6 bellard
1192 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1193 d720b93d bellard
1194 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1195 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1196 53a5960a pbrook
        target_ulong addr;
1197 53a5960a pbrook
        PageDesc *p2;
1198 9fa3e853 bellard
        int prot;
1199 9fa3e853 bellard
1200 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1201 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1202 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1203 fd6ce8f6 bellard
        prot = 0;
1204 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1205 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1206 53a5960a pbrook
1207 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1208 53a5960a pbrook
            if (!p2)
1209 53a5960a pbrook
                continue;
1210 53a5960a pbrook
            prot |= p2->flags;
1211 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1212 53a5960a pbrook
          }
1213 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1214 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1215 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1216 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1217 53a5960a pbrook
               page_addr);
1218 fd6ce8f6 bellard
#endif
1219 fd6ce8f6 bellard
    }
1220 9fa3e853 bellard
#else
1221 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1222 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1223 9fa3e853 bellard
       allocated in a physical page */
1224 9fa3e853 bellard
    if (!last_first_tb) {
1225 6a00d601 bellard
        tlb_protect_code(page_addr);
1226 9fa3e853 bellard
    }
1227 9fa3e853 bellard
#endif
1228 d720b93d bellard
1229 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1230 fd6ce8f6 bellard
}
1231 fd6ce8f6 bellard
1232 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1233 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1234 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1235 fd6ce8f6 bellard
{
1236 fd6ce8f6 bellard
    TranslationBlock *tb;
1237 fd6ce8f6 bellard
1238 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1239 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1240 d4e8164f bellard
        return NULL;
1241 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1242 fd6ce8f6 bellard
    tb->pc = pc;
1243 b448f2f3 bellard
    tb->cflags = 0;
1244 d4e8164f bellard
    return tb;
1245 d4e8164f bellard
}
1246 d4e8164f bellard
1247 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1248 2e70f6ef pbrook
{
1249 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1250 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1251 2e70f6ef pbrook
       be the last one generated.  */
1252 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1253 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1254 2e70f6ef pbrook
        nb_tbs--;
1255 2e70f6ef pbrook
    }
1256 2e70f6ef pbrook
}
1257 2e70f6ef pbrook
1258 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1259 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1260 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1261 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1262 d4e8164f bellard
{
1263 9fa3e853 bellard
    unsigned int h;
1264 9fa3e853 bellard
    TranslationBlock **ptb;
1265 9fa3e853 bellard
1266 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1267 c8a706fe pbrook
       before we are done.  */
1268 c8a706fe pbrook
    mmap_lock();
1269 9fa3e853 bellard
    /* add in the physical hash table */
1270 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1271 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1272 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1273 9fa3e853 bellard
    *ptb = tb;
1274 fd6ce8f6 bellard
1275 fd6ce8f6 bellard
    /* add in the page list */
1276 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 9fa3e853 bellard
    if (phys_page2 != -1)
1278 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1279 9fa3e853 bellard
    else
1280 9fa3e853 bellard
        tb->page_addr[1] = -1;
1281 9fa3e853 bellard
1282 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1284 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1285 d4e8164f bellard
1286 d4e8164f bellard
    /* init original jump addresses */
1287 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1288 d4e8164f bellard
        tb_reset_jump(tb, 0);
1289 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1290 d4e8164f bellard
        tb_reset_jump(tb, 1);
1291 8a40a180 bellard
1292 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1293 8a40a180 bellard
    tb_page_check();
1294 8a40a180 bellard
#endif
1295 c8a706fe pbrook
    mmap_unlock();
1296 fd6ce8f6 bellard
}
1297 fd6ce8f6 bellard
1298 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1300 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1301 fd6ce8f6 bellard
{
1302 9fa3e853 bellard
    int m_min, m_max, m;
1303 9fa3e853 bellard
    unsigned long v;
1304 9fa3e853 bellard
    TranslationBlock *tb;
1305 a513fe19 bellard
1306 a513fe19 bellard
    if (nb_tbs <= 0)
1307 a513fe19 bellard
        return NULL;
1308 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1310 a513fe19 bellard
        return NULL;
1311 a513fe19 bellard
    /* binary search (cf Knuth) */
1312 a513fe19 bellard
    m_min = 0;
1313 a513fe19 bellard
    m_max = nb_tbs - 1;
1314 a513fe19 bellard
    while (m_min <= m_max) {
1315 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1316 a513fe19 bellard
        tb = &tbs[m];
1317 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1318 a513fe19 bellard
        if (v == tc_ptr)
1319 a513fe19 bellard
            return tb;
1320 a513fe19 bellard
        else if (tc_ptr < v) {
1321 a513fe19 bellard
            m_max = m - 1;
1322 a513fe19 bellard
        } else {
1323 a513fe19 bellard
            m_min = m + 1;
1324 a513fe19 bellard
        }
1325 5fafdf24 ths
    }
1326 a513fe19 bellard
    return &tbs[m_max];
1327 a513fe19 bellard
}
1328 7501267e bellard
1329 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1330 ea041c0e bellard
1331 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332 ea041c0e bellard
{
1333 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1334 ea041c0e bellard
    unsigned int n1;
1335 ea041c0e bellard
1336 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1337 ea041c0e bellard
    if (tb1 != NULL) {
1338 ea041c0e bellard
        /* find head of list */
1339 ea041c0e bellard
        for(;;) {
1340 ea041c0e bellard
            n1 = (long)tb1 & 3;
1341 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 ea041c0e bellard
            if (n1 == 2)
1343 ea041c0e bellard
                break;
1344 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1345 ea041c0e bellard
        }
1346 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1347 ea041c0e bellard
        tb_next = tb1;
1348 ea041c0e bellard
1349 ea041c0e bellard
        /* remove tb from the jmp_first list */
1350 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1351 ea041c0e bellard
        for(;;) {
1352 ea041c0e bellard
            tb1 = *ptb;
1353 ea041c0e bellard
            n1 = (long)tb1 & 3;
1354 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1356 ea041c0e bellard
                break;
1357 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1358 ea041c0e bellard
        }
1359 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1360 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1361 3b46e624 ths
1362 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1363 ea041c0e bellard
        tb_reset_jump(tb, n);
1364 ea041c0e bellard
1365 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1366 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1367 ea041c0e bellard
    }
1368 ea041c0e bellard
}
1369 ea041c0e bellard
1370 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1371 ea041c0e bellard
{
1372 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1373 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1374 ea041c0e bellard
}
1375 ea041c0e bellard
1376 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1377 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1378 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379 94df27fd Paul Brook
{
1380 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381 94df27fd Paul Brook
}
1382 94df27fd Paul Brook
#else
1383 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384 d720b93d bellard
{
1385 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1386 9b3c35e0 j_mayer
    target_ulong pd;
1387 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1388 c2f07f81 pbrook
    PhysPageDesc *p;
1389 d720b93d bellard
1390 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1391 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 c2f07f81 pbrook
    if (!p) {
1393 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1394 c2f07f81 pbrook
    } else {
1395 c2f07f81 pbrook
        pd = p->phys_offset;
1396 c2f07f81 pbrook
    }
1397 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1398 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1399 d720b93d bellard
}
1400 c27004ec bellard
#endif
1401 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1402 d720b93d bellard
1403 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1404 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405 c527ee8f Paul Brook
1406 c527ee8f Paul Brook
{
1407 c527ee8f Paul Brook
}
1408 c527ee8f Paul Brook
1409 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1411 c527ee8f Paul Brook
{
1412 c527ee8f Paul Brook
    return -ENOSYS;
1413 c527ee8f Paul Brook
}
1414 c527ee8f Paul Brook
#else
1415 6658ffb8 pbrook
/* Add a watchpoint.  */
1416 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1418 6658ffb8 pbrook
{
1419 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1420 c0ce998e aliguori
    CPUWatchpoint *wp;
1421 6658ffb8 pbrook
1422 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 b4051334 aliguori
        return -EINVAL;
1427 b4051334 aliguori
    }
1428 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1429 a1d1bb31 aliguori
1430 a1d1bb31 aliguori
    wp->vaddr = addr;
1431 b4051334 aliguori
    wp->len_mask = len_mask;
1432 a1d1bb31 aliguori
    wp->flags = flags;
1433 a1d1bb31 aliguori
1434 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1435 c0ce998e aliguori
    if (flags & BP_GDB)
1436 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1437 c0ce998e aliguori
    else
1438 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1439 6658ffb8 pbrook
1440 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1441 a1d1bb31 aliguori
1442 a1d1bb31 aliguori
    if (watchpoint)
1443 a1d1bb31 aliguori
        *watchpoint = wp;
1444 a1d1bb31 aliguori
    return 0;
1445 6658ffb8 pbrook
}
1446 6658ffb8 pbrook
1447 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1448 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 a1d1bb31 aliguori
                          int flags)
1450 6658ffb8 pbrook
{
1451 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1452 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1453 6658ffb8 pbrook
1454 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1455 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1456 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1457 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1458 6658ffb8 pbrook
            return 0;
1459 6658ffb8 pbrook
        }
1460 6658ffb8 pbrook
    }
1461 a1d1bb31 aliguori
    return -ENOENT;
1462 6658ffb8 pbrook
}
1463 6658ffb8 pbrook
1464 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1465 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466 a1d1bb31 aliguori
{
1467 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1468 7d03f82f edgar_igl
1469 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1470 a1d1bb31 aliguori
1471 a1d1bb31 aliguori
    qemu_free(watchpoint);
1472 a1d1bb31 aliguori
}
1473 a1d1bb31 aliguori
1474 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1475 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476 a1d1bb31 aliguori
{
1477 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1478 a1d1bb31 aliguori
1479 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1480 a1d1bb31 aliguori
        if (wp->flags & mask)
1481 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1482 c0ce998e aliguori
    }
1483 7d03f82f edgar_igl
}
1484 c527ee8f Paul Brook
#endif
1485 7d03f82f edgar_igl
1486 a1d1bb31 aliguori
/* Add a breakpoint.  */
1487 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1489 4c3a88a2 bellard
{
1490 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1491 c0ce998e aliguori
    CPUBreakpoint *bp;
1492 3b46e624 ths
1493 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1494 4c3a88a2 bellard
1495 a1d1bb31 aliguori
    bp->pc = pc;
1496 a1d1bb31 aliguori
    bp->flags = flags;
1497 a1d1bb31 aliguori
1498 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1499 c0ce998e aliguori
    if (flags & BP_GDB)
1500 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1501 c0ce998e aliguori
    else
1502 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1503 3b46e624 ths
1504 d720b93d bellard
    breakpoint_invalidate(env, pc);
1505 a1d1bb31 aliguori
1506 a1d1bb31 aliguori
    if (breakpoint)
1507 a1d1bb31 aliguori
        *breakpoint = bp;
1508 4c3a88a2 bellard
    return 0;
1509 4c3a88a2 bellard
#else
1510 a1d1bb31 aliguori
    return -ENOSYS;
1511 4c3a88a2 bellard
#endif
1512 4c3a88a2 bellard
}
1513 4c3a88a2 bellard
1514 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1515 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516 a1d1bb31 aliguori
{
1517 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1518 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1519 a1d1bb31 aliguori
1520 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1521 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1522 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1523 a1d1bb31 aliguori
            return 0;
1524 a1d1bb31 aliguori
        }
1525 7d03f82f edgar_igl
    }
1526 a1d1bb31 aliguori
    return -ENOENT;
1527 a1d1bb31 aliguori
#else
1528 a1d1bb31 aliguori
    return -ENOSYS;
1529 7d03f82f edgar_igl
#endif
1530 7d03f82f edgar_igl
}
1531 7d03f82f edgar_igl
1532 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1533 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1534 4c3a88a2 bellard
{
1535 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1536 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1537 d720b93d bellard
1538 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1539 a1d1bb31 aliguori
1540 a1d1bb31 aliguori
    qemu_free(breakpoint);
1541 a1d1bb31 aliguori
#endif
1542 a1d1bb31 aliguori
}
1543 a1d1bb31 aliguori
1544 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1545 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546 a1d1bb31 aliguori
{
1547 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1548 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1549 a1d1bb31 aliguori
1550 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1551 a1d1bb31 aliguori
        if (bp->flags & mask)
1552 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1553 c0ce998e aliguori
    }
1554 4c3a88a2 bellard
#endif
1555 4c3a88a2 bellard
}
1556 4c3a88a2 bellard
1557 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 c33a346e bellard
   CPU loop after each instruction */
1559 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1560 c33a346e bellard
{
1561 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1562 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1563 c33a346e bellard
        env->singlestep_enabled = enabled;
1564 e22a25c9 aliguori
        if (kvm_enabled())
1565 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1566 e22a25c9 aliguori
        else {
1567 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1568 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1569 e22a25c9 aliguori
            tb_flush(env);
1570 e22a25c9 aliguori
        }
1571 c33a346e bellard
    }
1572 c33a346e bellard
#endif
1573 c33a346e bellard
}
1574 c33a346e bellard
1575 34865134 bellard
/* enable or disable low levels log */
1576 34865134 bellard
void cpu_set_log(int log_flags)
1577 34865134 bellard
{
1578 34865134 bellard
    loglevel = log_flags;
1579 34865134 bellard
    if (loglevel && !logfile) {
1580 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1581 34865134 bellard
        if (!logfile) {
1582 34865134 bellard
            perror(logfilename);
1583 34865134 bellard
            _exit(1);
1584 34865134 bellard
        }
1585 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1586 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 9fa3e853 bellard
        {
1588 b55266b5 blueswir1
            static char logfile_buf[4096];
1589 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 9fa3e853 bellard
        }
1591 bf65f53f Filip Navara
#elif !defined(_WIN32)
1592 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1593 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1594 9fa3e853 bellard
#endif
1595 e735b91c pbrook
        log_append = 1;
1596 e735b91c pbrook
    }
1597 e735b91c pbrook
    if (!loglevel && logfile) {
1598 e735b91c pbrook
        fclose(logfile);
1599 e735b91c pbrook
        logfile = NULL;
1600 34865134 bellard
    }
1601 34865134 bellard
}
1602 34865134 bellard
1603 34865134 bellard
void cpu_set_log_filename(const char *filename)
1604 34865134 bellard
{
1605 34865134 bellard
    logfilename = strdup(filename);
1606 e735b91c pbrook
    if (logfile) {
1607 e735b91c pbrook
        fclose(logfile);
1608 e735b91c pbrook
        logfile = NULL;
1609 e735b91c pbrook
    }
1610 e735b91c pbrook
    cpu_set_log(loglevel);
1611 34865134 bellard
}
1612 c33a346e bellard
1613 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1614 ea041c0e bellard
{
1615 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1616 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1617 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1618 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1619 ea041c0e bellard
    TranslationBlock *tb;
1620 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1621 59817ccb bellard
1622 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1623 3098dba0 aurel32
    tb = env->current_tb;
1624 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1625 3098dba0 aurel32
       all the potentially executing TB */
1626 f76cfe56 Riku Voipio
    if (tb) {
1627 3098dba0 aurel32
        env->current_tb = NULL;
1628 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1629 be214e6c aurel32
    }
1630 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1631 3098dba0 aurel32
}
1632 3098dba0 aurel32
1633 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1634 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1635 3098dba0 aurel32
{
1636 3098dba0 aurel32
    int old_mask;
1637 be214e6c aurel32
1638 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1639 68a79315 bellard
    env->interrupt_request |= mask;
1640 3098dba0 aurel32
1641 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1642 8edac960 aliguori
    /*
1643 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1644 8edac960 aliguori
     * case its halted.
1645 8edac960 aliguori
     */
1646 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1647 8edac960 aliguori
        qemu_cpu_kick(env);
1648 8edac960 aliguori
        return;
1649 8edac960 aliguori
    }
1650 8edac960 aliguori
#endif
1651 8edac960 aliguori
1652 2e70f6ef pbrook
    if (use_icount) {
1653 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1654 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1655 2e70f6ef pbrook
        if (!can_do_io(env)
1656 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1657 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1658 2e70f6ef pbrook
        }
1659 2e70f6ef pbrook
#endif
1660 2e70f6ef pbrook
    } else {
1661 3098dba0 aurel32
        cpu_unlink_tb(env);
1662 ea041c0e bellard
    }
1663 ea041c0e bellard
}
1664 ea041c0e bellard
1665 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1666 b54ad049 bellard
{
1667 b54ad049 bellard
    env->interrupt_request &= ~mask;
1668 b54ad049 bellard
}
1669 b54ad049 bellard
1670 3098dba0 aurel32
void cpu_exit(CPUState *env)
1671 3098dba0 aurel32
{
1672 3098dba0 aurel32
    env->exit_request = 1;
1673 3098dba0 aurel32
    cpu_unlink_tb(env);
1674 3098dba0 aurel32
}
1675 3098dba0 aurel32
1676 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1677 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1678 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1679 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1680 f193c797 bellard
      "show target assembly code for each compiled TB" },
1681 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1682 57fec1fe bellard
      "show micro ops for each compiled TB" },
1683 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1684 e01a1157 blueswir1
      "show micro ops "
1685 e01a1157 blueswir1
#ifdef TARGET_I386
1686 e01a1157 blueswir1
      "before eflags optimization and "
1687 f193c797 bellard
#endif
1688 e01a1157 blueswir1
      "after liveness analysis" },
1689 f193c797 bellard
    { CPU_LOG_INT, "int",
1690 f193c797 bellard
      "show interrupts/exceptions in short format" },
1691 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1692 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1693 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1694 e91c8a77 ths
      "show CPU state before block translation" },
1695 f193c797 bellard
#ifdef TARGET_I386
1696 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1697 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1698 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1699 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1700 f193c797 bellard
#endif
1701 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1702 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1703 fd872598 bellard
      "show all i/o ports accesses" },
1704 8e3a9fd2 bellard
#endif
1705 f193c797 bellard
    { 0, NULL, NULL },
1706 f193c797 bellard
};
1707 f193c797 bellard
1708 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1709 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1711 f6f3fbca Michael S. Tsirkin
1712 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1713 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1714 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1715 f6f3fbca Michael S. Tsirkin
{
1716 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1717 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1718 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1719 f6f3fbca Michael S. Tsirkin
    }
1720 f6f3fbca Michael S. Tsirkin
}
1721 f6f3fbca Michael S. Tsirkin
1722 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1723 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1724 f6f3fbca Michael S. Tsirkin
{
1725 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1726 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1727 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1728 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1729 f6f3fbca Michael S. Tsirkin
            return r;
1730 f6f3fbca Michael S. Tsirkin
    }
1731 f6f3fbca Michael S. Tsirkin
    return 0;
1732 f6f3fbca Michael S. Tsirkin
}
1733 f6f3fbca Michael S. Tsirkin
1734 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1735 f6f3fbca Michael S. Tsirkin
{
1736 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1737 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1738 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1739 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1740 f6f3fbca Michael S. Tsirkin
            return r;
1741 f6f3fbca Michael S. Tsirkin
    }
1742 f6f3fbca Michael S. Tsirkin
    return 0;
1743 f6f3fbca Michael S. Tsirkin
}
1744 f6f3fbca Michael S. Tsirkin
1745 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1747 f6f3fbca Michael S. Tsirkin
{
1748 5cd2c5b6 Richard Henderson
    int i;
1749 f6f3fbca Michael S. Tsirkin
1750 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1751 5cd2c5b6 Richard Henderson
        return;
1752 5cd2c5b6 Richard Henderson
    }
1753 5cd2c5b6 Richard Henderson
    if (level == 0) {
1754 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1755 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1756 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1758 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1759 f6f3fbca Michael S. Tsirkin
            }
1760 5cd2c5b6 Richard Henderson
        }
1761 5cd2c5b6 Richard Henderson
    } else {
1762 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1763 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1764 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1765 f6f3fbca Michael S. Tsirkin
        }
1766 f6f3fbca Michael S. Tsirkin
    }
1767 f6f3fbca Michael S. Tsirkin
}
1768 f6f3fbca Michael S. Tsirkin
1769 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1770 f6f3fbca Michael S. Tsirkin
{
1771 5cd2c5b6 Richard Henderson
    int i;
1772 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1773 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1775 f6f3fbca Michael S. Tsirkin
    }
1776 f6f3fbca Michael S. Tsirkin
}
1777 f6f3fbca Michael S. Tsirkin
1778 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779 f6f3fbca Michael S. Tsirkin
{
1780 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1782 f6f3fbca Michael S. Tsirkin
}
1783 f6f3fbca Michael S. Tsirkin
1784 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785 f6f3fbca Michael S. Tsirkin
{
1786 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1787 f6f3fbca Michael S. Tsirkin
}
1788 f6f3fbca Michael S. Tsirkin
#endif
1789 f6f3fbca Michael S. Tsirkin
1790 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1791 f193c797 bellard
{
1792 f193c797 bellard
    if (strlen(s2) != n)
1793 f193c797 bellard
        return 0;
1794 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1795 f193c797 bellard
}
1796 3b46e624 ths
1797 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1798 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1799 f193c797 bellard
{
1800 c7cd6a37 blueswir1
    const CPULogItem *item;
1801 f193c797 bellard
    int mask;
1802 f193c797 bellard
    const char *p, *p1;
1803 f193c797 bellard
1804 f193c797 bellard
    p = str;
1805 f193c797 bellard
    mask = 0;
1806 f193c797 bellard
    for(;;) {
1807 f193c797 bellard
        p1 = strchr(p, ',');
1808 f193c797 bellard
        if (!p1)
1809 f193c797 bellard
            p1 = p + strlen(p);
1810 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1811 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1812 8e3a9fd2 bellard
                        mask |= item->mask;
1813 8e3a9fd2 bellard
                }
1814 8e3a9fd2 bellard
        } else {
1815 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1816 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1817 f193c797 bellard
                goto found;
1818 f193c797 bellard
        }
1819 f193c797 bellard
        return 0;
1820 8e3a9fd2 bellard
        }
1821 f193c797 bellard
    found:
1822 f193c797 bellard
        mask |= item->mask;
1823 f193c797 bellard
        if (*p1 != ',')
1824 f193c797 bellard
            break;
1825 f193c797 bellard
        p = p1 + 1;
1826 f193c797 bellard
    }
1827 f193c797 bellard
    return mask;
1828 f193c797 bellard
}
1829 ea041c0e bellard
1830 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1831 7501267e bellard
{
1832 7501267e bellard
    va_list ap;
1833 493ae1f0 pbrook
    va_list ap2;
1834 7501267e bellard
1835 7501267e bellard
    va_start(ap, fmt);
1836 493ae1f0 pbrook
    va_copy(ap2, ap);
1837 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1838 7501267e bellard
    vfprintf(stderr, fmt, ap);
1839 7501267e bellard
    fprintf(stderr, "\n");
1840 7501267e bellard
#ifdef TARGET_I386
1841 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842 7fe48483 bellard
#else
1843 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1844 7501267e bellard
#endif
1845 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1846 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1847 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1848 93fcfe39 aliguori
        qemu_log("\n");
1849 f9373291 j_mayer
#ifdef TARGET_I386
1850 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1851 f9373291 j_mayer
#else
1852 93fcfe39 aliguori
        log_cpu_state(env, 0);
1853 f9373291 j_mayer
#endif
1854 31b1a7b4 aliguori
        qemu_log_flush();
1855 93fcfe39 aliguori
        qemu_log_close();
1856 924edcae balrog
    }
1857 493ae1f0 pbrook
    va_end(ap2);
1858 f9373291 j_mayer
    va_end(ap);
1859 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1860 fd052bf6 Riku Voipio
    {
1861 fd052bf6 Riku Voipio
        struct sigaction act;
1862 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1863 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1864 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1865 fd052bf6 Riku Voipio
    }
1866 fd052bf6 Riku Voipio
#endif
1867 7501267e bellard
    abort();
1868 7501267e bellard
}
1869 7501267e bellard
1870 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1871 c5be9f08 ths
{
1872 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1873 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1874 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1875 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1876 5a38f081 aliguori
    CPUBreakpoint *bp;
1877 5a38f081 aliguori
    CPUWatchpoint *wp;
1878 5a38f081 aliguori
#endif
1879 5a38f081 aliguori
1880 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1881 5a38f081 aliguori
1882 5a38f081 aliguori
    /* Preserve chaining and index. */
1883 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1884 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1885 5a38f081 aliguori
1886 5a38f081 aliguori
    /* Clone all break/watchpoints.
1887 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1888 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1889 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1890 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1891 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1892 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1893 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 5a38f081 aliguori
    }
1895 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1896 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 5a38f081 aliguori
                              wp->flags, NULL);
1898 5a38f081 aliguori
    }
1899 5a38f081 aliguori
#endif
1900 5a38f081 aliguori
1901 c5be9f08 ths
    return new_env;
1902 c5be9f08 ths
}
1903 c5be9f08 ths
1904 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1905 0124311e bellard
1906 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907 5c751e99 edgar_igl
{
1908 5c751e99 edgar_igl
    unsigned int i;
1909 5c751e99 edgar_igl
1910 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1911 5c751e99 edgar_igl
       overlap the flushed page.  */
1912 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1914 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915 5c751e99 edgar_igl
1916 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1917 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1918 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1919 5c751e99 edgar_igl
}
1920 5c751e99 edgar_igl
1921 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1922 08738984 Igor Kovalenko
    .addr_read  = -1,
1923 08738984 Igor Kovalenko
    .addr_write = -1,
1924 08738984 Igor Kovalenko
    .addr_code  = -1,
1925 08738984 Igor Kovalenko
    .addend     = -1,
1926 08738984 Igor Kovalenko
};
1927 08738984 Igor Kovalenko
1928 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1929 ee8b7021 bellard
   implemented yet) */
1930 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1931 33417e70 bellard
{
1932 33417e70 bellard
    int i;
1933 0124311e bellard
1934 9fa3e853 bellard
#if defined(DEBUG_TLB)
1935 9fa3e853 bellard
    printf("tlb_flush:\n");
1936 9fa3e853 bellard
#endif
1937 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1938 0124311e bellard
       links while we are modifying them */
1939 0124311e bellard
    env->current_tb = NULL;
1940 0124311e bellard
1941 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1942 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1943 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1944 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1945 cfde4bd9 Isaku Yamahata
        }
1946 33417e70 bellard
    }
1947 9fa3e853 bellard
1948 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1949 9fa3e853 bellard
1950 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1951 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1952 e3db7226 bellard
    tlb_flush_count++;
1953 33417e70 bellard
}
1954 33417e70 bellard
1955 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1956 61382a50 bellard
{
1957 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1958 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1959 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1960 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1961 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1962 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1963 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1964 84b7b8e7 bellard
    }
1965 61382a50 bellard
}
1966 61382a50 bellard
1967 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1968 33417e70 bellard
{
1969 8a40a180 bellard
    int i;
1970 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1971 0124311e bellard
1972 9fa3e853 bellard
#if defined(DEBUG_TLB)
1973 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1974 9fa3e853 bellard
#endif
1975 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1976 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1978 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1979 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1981 d4c430a8 Paul Brook
#endif
1982 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1983 d4c430a8 Paul Brook
        return;
1984 d4c430a8 Paul Brook
    }
1985 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1986 0124311e bellard
       links while we are modifying them */
1987 0124311e bellard
    env->current_tb = NULL;
1988 61382a50 bellard
1989 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1990 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1991 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1993 0124311e bellard
1994 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1995 9fa3e853 bellard
}
1996 9fa3e853 bellard
1997 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1998 9fa3e853 bellard
   can be detected */
1999 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2000 9fa3e853 bellard
{
2001 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2002 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2003 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2004 9fa3e853 bellard
}
2005 9fa3e853 bellard
2006 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2007 3a7d929e bellard
   tested for self modifying code */
2008 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2009 3a7d929e bellard
                                    target_ulong vaddr)
2010 9fa3e853 bellard
{
2011 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2012 1ccde1cb bellard
}
2013 1ccde1cb bellard
2014 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2015 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2016 1ccde1cb bellard
{
2017 1ccde1cb bellard
    unsigned long addr;
2018 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2020 1ccde1cb bellard
        if ((addr - start) < length) {
2021 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2022 1ccde1cb bellard
        }
2023 1ccde1cb bellard
    }
2024 1ccde1cb bellard
}
2025 1ccde1cb bellard
2026 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2027 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2028 0a962c02 bellard
                                     int dirty_flags)
2029 1ccde1cb bellard
{
2030 1ccde1cb bellard
    CPUState *env;
2031 4f2ac237 bellard
    unsigned long length, start1;
2032 f7c11b53 Yoshiaki Tamura
    int i;
2033 1ccde1cb bellard
2034 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2035 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2036 1ccde1cb bellard
2037 1ccde1cb bellard
    length = end - start;
2038 1ccde1cb bellard
    if (length == 0)
2039 1ccde1cb bellard
        return;
2040 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2041 f23db169 bellard
2042 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2043 1ccde1cb bellard
       when accessing the range */
2044 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2045 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2046 5579c7f3 pbrook
       address comparisons below.  */
2047 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2048 5579c7f3 pbrook
            != (end - 1) - start) {
2049 5579c7f3 pbrook
        abort();
2050 5579c7f3 pbrook
    }
2051 5579c7f3 pbrook
2052 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2053 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2054 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2056 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 cfde4bd9 Isaku Yamahata
                                      start1, length);
2058 cfde4bd9 Isaku Yamahata
        }
2059 6a00d601 bellard
    }
2060 1ccde1cb bellard
}
2061 1ccde1cb bellard
2062 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2063 74576198 aliguori
{
2064 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2065 74576198 aliguori
    in_migration = enable;
2066 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2067 f6f3fbca Michael S. Tsirkin
    return ret;
2068 74576198 aliguori
}
2069 74576198 aliguori
2070 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2071 74576198 aliguori
{
2072 74576198 aliguori
    return in_migration;
2073 74576198 aliguori
}
2074 74576198 aliguori
2075 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2077 2bec46dc aliguori
{
2078 7b8f3b78 Michael S. Tsirkin
    int ret;
2079 151f7749 Jan Kiszka
2080 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2081 151f7749 Jan Kiszka
    return ret;
2082 2bec46dc aliguori
}
2083 2bec46dc aliguori
2084 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2085 3a7d929e bellard
{
2086 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2087 5579c7f3 pbrook
    void *p;
2088 3a7d929e bellard
2089 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2090 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2091 5579c7f3 pbrook
            + tlb_entry->addend);
2092 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2093 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2094 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2095 3a7d929e bellard
        }
2096 3a7d929e bellard
    }
2097 3a7d929e bellard
}
2098 3a7d929e bellard
2099 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2100 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2101 3a7d929e bellard
{
2102 3a7d929e bellard
    int i;
2103 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2104 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2105 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2106 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2107 cfde4bd9 Isaku Yamahata
    }
2108 3a7d929e bellard
}
2109 3a7d929e bellard
2110 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2111 1ccde1cb bellard
{
2112 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2113 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2114 1ccde1cb bellard
}
2115 1ccde1cb bellard
2116 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2117 0f459d16 pbrook
   so that it is no longer dirty */
2118 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2119 1ccde1cb bellard
{
2120 1ccde1cb bellard
    int i;
2121 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2122 1ccde1cb bellard
2123 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2124 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2125 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2126 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2127 9fa3e853 bellard
}
2128 9fa3e853 bellard
2129 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2130 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2131 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2132 d4c430a8 Paul Brook
                               target_ulong size)
2133 d4c430a8 Paul Brook
{
2134 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2135 d4c430a8 Paul Brook
2136 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2137 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2138 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2139 d4c430a8 Paul Brook
        return;
2140 d4c430a8 Paul Brook
    }
2141 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2142 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2143 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2144 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2145 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2146 d4c430a8 Paul Brook
        mask <<= 1;
2147 d4c430a8 Paul Brook
    }
2148 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2149 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2150 d4c430a8 Paul Brook
}
2151 d4c430a8 Paul Brook
2152 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2153 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2154 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2155 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2156 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2157 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2158 9fa3e853 bellard
{
2159 92e873b9 bellard
    PhysPageDesc *p;
2160 4f2ac237 bellard
    unsigned long pd;
2161 9fa3e853 bellard
    unsigned int index;
2162 4f2ac237 bellard
    target_ulong address;
2163 0f459d16 pbrook
    target_ulong code_address;
2164 355b1943 Paul Brook
    unsigned long addend;
2165 84b7b8e7 bellard
    CPUTLBEntry *te;
2166 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2167 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2168 9fa3e853 bellard
2169 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2170 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2171 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2172 d4c430a8 Paul Brook
    }
2173 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2174 9fa3e853 bellard
    if (!p) {
2175 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2176 9fa3e853 bellard
    } else {
2177 9fa3e853 bellard
        pd = p->phys_offset;
2178 9fa3e853 bellard
    }
2179 9fa3e853 bellard
#if defined(DEBUG_TLB)
2180 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2181 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2182 9fa3e853 bellard
#endif
2183 9fa3e853 bellard
2184 0f459d16 pbrook
    address = vaddr;
2185 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2186 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2187 0f459d16 pbrook
        address |= TLB_MMIO;
2188 0f459d16 pbrook
    }
2189 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2190 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2191 0f459d16 pbrook
        /* Normal RAM.  */
2192 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2193 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2194 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2195 0f459d16 pbrook
        else
2196 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2197 0f459d16 pbrook
    } else {
2198 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2199 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2200 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2201 0f459d16 pbrook
           and avoid full address decoding in every device.
2202 0f459d16 pbrook
           We can't use the high bits of pd for this because
2203 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2204 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2205 8da3ff18 pbrook
        if (p) {
2206 8da3ff18 pbrook
            iotlb += p->region_offset;
2207 8da3ff18 pbrook
        } else {
2208 8da3ff18 pbrook
            iotlb += paddr;
2209 8da3ff18 pbrook
        }
2210 0f459d16 pbrook
    }
2211 0f459d16 pbrook
2212 0f459d16 pbrook
    code_address = address;
2213 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2214 0f459d16 pbrook
       watchpoint trap routines.  */
2215 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2216 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2217 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2218 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2219 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2220 0f459d16 pbrook
            address |= TLB_MMIO;
2221 6658ffb8 pbrook
        }
2222 0f459d16 pbrook
    }
2223 d79acba4 balrog
2224 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2227 0f459d16 pbrook
    te->addend = addend - vaddr;
2228 0f459d16 pbrook
    if (prot & PAGE_READ) {
2229 0f459d16 pbrook
        te->addr_read = address;
2230 0f459d16 pbrook
    } else {
2231 0f459d16 pbrook
        te->addr_read = -1;
2232 0f459d16 pbrook
    }
2233 5c751e99 edgar_igl
2234 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2235 0f459d16 pbrook
        te->addr_code = code_address;
2236 0f459d16 pbrook
    } else {
2237 0f459d16 pbrook
        te->addr_code = -1;
2238 0f459d16 pbrook
    }
2239 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2240 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2241 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2242 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2243 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2244 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2245 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2246 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2247 9fa3e853 bellard
        } else {
2248 0f459d16 pbrook
            te->addr_write = address;
2249 9fa3e853 bellard
        }
2250 0f459d16 pbrook
    } else {
2251 0f459d16 pbrook
        te->addr_write = -1;
2252 9fa3e853 bellard
    }
2253 9fa3e853 bellard
}
2254 9fa3e853 bellard
2255 0124311e bellard
#else
2256 0124311e bellard
2257 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2258 0124311e bellard
{
2259 0124311e bellard
}
2260 0124311e bellard
2261 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2262 0124311e bellard
{
2263 0124311e bellard
}
2264 0124311e bellard
2265 edf8e2af Mika Westerberg
/*
2266 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2267 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2268 edf8e2af Mika Westerberg
 */
2269 5cd2c5b6 Richard Henderson
2270 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2271 5cd2c5b6 Richard Henderson
{
2272 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2273 5cd2c5b6 Richard Henderson
    void *priv;
2274 5cd2c5b6 Richard Henderson
    unsigned long start;
2275 5cd2c5b6 Richard Henderson
    int prot;
2276 5cd2c5b6 Richard Henderson
};
2277 5cd2c5b6 Richard Henderson
2278 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2279 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2280 5cd2c5b6 Richard Henderson
{
2281 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2282 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2283 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2284 5cd2c5b6 Richard Henderson
            return rc;
2285 5cd2c5b6 Richard Henderson
        }
2286 5cd2c5b6 Richard Henderson
    }
2287 5cd2c5b6 Richard Henderson
2288 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2289 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2290 5cd2c5b6 Richard Henderson
2291 5cd2c5b6 Richard Henderson
    return 0;
2292 5cd2c5b6 Richard Henderson
}
2293 5cd2c5b6 Richard Henderson
2294 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2295 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2296 5cd2c5b6 Richard Henderson
{
2297 b480d9b7 Paul Brook
    abi_ulong pa;
2298 5cd2c5b6 Richard Henderson
    int i, rc;
2299 5cd2c5b6 Richard Henderson
2300 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2301 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2302 5cd2c5b6 Richard Henderson
    }
2303 5cd2c5b6 Richard Henderson
2304 5cd2c5b6 Richard Henderson
    if (level == 0) {
2305 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2306 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2307 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2308 5cd2c5b6 Richard Henderson
2309 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2310 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2311 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2312 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2313 5cd2c5b6 Richard Henderson
                    return rc;
2314 9fa3e853 bellard
                }
2315 9fa3e853 bellard
            }
2316 5cd2c5b6 Richard Henderson
        }
2317 5cd2c5b6 Richard Henderson
    } else {
2318 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2319 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2320 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2321 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2322 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2323 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2324 5cd2c5b6 Richard Henderson
                return rc;
2325 5cd2c5b6 Richard Henderson
            }
2326 5cd2c5b6 Richard Henderson
        }
2327 5cd2c5b6 Richard Henderson
    }
2328 5cd2c5b6 Richard Henderson
2329 5cd2c5b6 Richard Henderson
    return 0;
2330 5cd2c5b6 Richard Henderson
}
2331 5cd2c5b6 Richard Henderson
2332 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2333 5cd2c5b6 Richard Henderson
{
2334 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2335 5cd2c5b6 Richard Henderson
    unsigned long i;
2336 5cd2c5b6 Richard Henderson
2337 5cd2c5b6 Richard Henderson
    data.fn = fn;
2338 5cd2c5b6 Richard Henderson
    data.priv = priv;
2339 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2340 5cd2c5b6 Richard Henderson
    data.prot = 0;
2341 5cd2c5b6 Richard Henderson
2342 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2343 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2344 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2345 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2346 5cd2c5b6 Richard Henderson
            return rc;
2347 9fa3e853 bellard
        }
2348 33417e70 bellard
    }
2349 5cd2c5b6 Richard Henderson
2350 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2351 edf8e2af Mika Westerberg
}
2352 edf8e2af Mika Westerberg
2353 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2354 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2355 edf8e2af Mika Westerberg
{
2356 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2357 edf8e2af Mika Westerberg
2358 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2359 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2360 edf8e2af Mika Westerberg
        start, end, end - start,
2361 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2362 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2363 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2364 edf8e2af Mika Westerberg
2365 edf8e2af Mika Westerberg
    return (0);
2366 edf8e2af Mika Westerberg
}
2367 edf8e2af Mika Westerberg
2368 edf8e2af Mika Westerberg
/* dump memory mappings */
2369 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2370 edf8e2af Mika Westerberg
{
2371 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2372 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2373 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2374 33417e70 bellard
}
2375 33417e70 bellard
2376 53a5960a pbrook
int page_get_flags(target_ulong address)
2377 33417e70 bellard
{
2378 9fa3e853 bellard
    PageDesc *p;
2379 9fa3e853 bellard
2380 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2381 33417e70 bellard
    if (!p)
2382 9fa3e853 bellard
        return 0;
2383 9fa3e853 bellard
    return p->flags;
2384 9fa3e853 bellard
}
2385 9fa3e853 bellard
2386 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2387 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2388 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2389 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2390 9fa3e853 bellard
{
2391 376a7909 Richard Henderson
    target_ulong addr, len;
2392 376a7909 Richard Henderson
2393 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2394 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2395 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2396 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2397 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2398 376a7909 Richard Henderson
#endif
2399 376a7909 Richard Henderson
    assert(start < end);
2400 9fa3e853 bellard
2401 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2402 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2403 376a7909 Richard Henderson
2404 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2405 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2406 376a7909 Richard Henderson
    }
2407 376a7909 Richard Henderson
2408 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2409 376a7909 Richard Henderson
         len != 0;
2410 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2412 376a7909 Richard Henderson
2413 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2414 376a7909 Richard Henderson
           the code inside.  */
2415 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2416 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2417 9fa3e853 bellard
            p->first_tb) {
2418 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2419 9fa3e853 bellard
        }
2420 9fa3e853 bellard
        p->flags = flags;
2421 9fa3e853 bellard
    }
2422 33417e70 bellard
}
2423 33417e70 bellard
2424 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2425 3d97b40b ths
{
2426 3d97b40b ths
    PageDesc *p;
2427 3d97b40b ths
    target_ulong end;
2428 3d97b40b ths
    target_ulong addr;
2429 3d97b40b ths
2430 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2431 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2432 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2433 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2434 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2435 376a7909 Richard Henderson
#endif
2436 376a7909 Richard Henderson
2437 3e0650a9 Richard Henderson
    if (len == 0) {
2438 3e0650a9 Richard Henderson
        return 0;
2439 3e0650a9 Richard Henderson
    }
2440 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2441 376a7909 Richard Henderson
        /* We've wrapped around.  */
2442 55f280c9 balrog
        return -1;
2443 376a7909 Richard Henderson
    }
2444 55f280c9 balrog
2445 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2446 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2447 3d97b40b ths
2448 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2449 376a7909 Richard Henderson
         len != 0;
2450 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2451 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2452 3d97b40b ths
        if( !p )
2453 3d97b40b ths
            return -1;
2454 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2455 3d97b40b ths
            return -1;
2456 3d97b40b ths
2457 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2458 3d97b40b ths
            return -1;
2459 dae3270c bellard
        if (flags & PAGE_WRITE) {
2460 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2461 dae3270c bellard
                return -1;
2462 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2463 dae3270c bellard
               contains translated code */
2464 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2465 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2466 dae3270c bellard
                    return -1;
2467 dae3270c bellard
            }
2468 dae3270c bellard
            return 0;
2469 dae3270c bellard
        }
2470 3d97b40b ths
    }
2471 3d97b40b ths
    return 0;
2472 3d97b40b ths
}
2473 3d97b40b ths
2474 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2475 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2476 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2477 9fa3e853 bellard
{
2478 45d679d6 Aurelien Jarno
    unsigned int prot;
2479 45d679d6 Aurelien Jarno
    PageDesc *p;
2480 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2481 9fa3e853 bellard
2482 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2483 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2484 c8a706fe pbrook
       practice it seems to be ok.  */
2485 c8a706fe pbrook
    mmap_lock();
2486 c8a706fe pbrook
2487 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2488 45d679d6 Aurelien Jarno
    if (!p) {
2489 c8a706fe pbrook
        mmap_unlock();
2490 9fa3e853 bellard
        return 0;
2491 c8a706fe pbrook
    }
2492 45d679d6 Aurelien Jarno
2493 9fa3e853 bellard
    /* if the page was really writable, then we change its
2494 9fa3e853 bellard
       protection back to writable */
2495 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2496 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2497 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2498 45d679d6 Aurelien Jarno
2499 45d679d6 Aurelien Jarno
        prot = 0;
2500 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2501 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2502 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2503 45d679d6 Aurelien Jarno
            prot |= p->flags;
2504 45d679d6 Aurelien Jarno
2505 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2506 9fa3e853 bellard
               the corresponding translated code. */
2507 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2508 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2509 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2510 9fa3e853 bellard
#endif
2511 9fa3e853 bellard
        }
2512 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2513 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2514 45d679d6 Aurelien Jarno
2515 45d679d6 Aurelien Jarno
        mmap_unlock();
2516 45d679d6 Aurelien Jarno
        return 1;
2517 9fa3e853 bellard
    }
2518 c8a706fe pbrook
    mmap_unlock();
2519 9fa3e853 bellard
    return 0;
2520 9fa3e853 bellard
}
2521 9fa3e853 bellard
2522 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2523 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2524 1ccde1cb bellard
{
2525 1ccde1cb bellard
}
2526 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2527 9fa3e853 bellard
2528 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2529 8da3ff18 pbrook
2530 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2531 c04b2b78 Paul Brook
typedef struct subpage_t {
2532 c04b2b78 Paul Brook
    target_phys_addr_t base;
2533 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2534 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2535 c04b2b78 Paul Brook
} subpage_t;
2536 c04b2b78 Paul Brook
2537 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2538 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2539 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2540 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2541 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2542 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2543 db7b5426 blueswir1
                      need_subpage)                                     \
2544 db7b5426 blueswir1
    do {                                                                \
2545 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2546 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2547 db7b5426 blueswir1
        else {                                                          \
2548 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2549 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2550 db7b5426 blueswir1
                need_subpage = 1;                                       \
2551 db7b5426 blueswir1
        }                                                               \
2552 db7b5426 blueswir1
                                                                        \
2553 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2554 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2555 db7b5426 blueswir1
        else {                                                          \
2556 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2557 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2558 db7b5426 blueswir1
                need_subpage = 1;                                       \
2559 db7b5426 blueswir1
        }                                                               \
2560 db7b5426 blueswir1
    } while (0)
2561 db7b5426 blueswir1
2562 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2563 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2564 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2565 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2566 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2567 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2568 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2569 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2570 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2571 c227f099 Anthony Liguori
                                         ram_addr_t size,
2572 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2573 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2574 33417e70 bellard
{
2575 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2576 92e873b9 bellard
    PhysPageDesc *p;
2577 9d42037b bellard
    CPUState *env;
2578 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2579 f6405247 Richard Henderson
    subpage_t *subpage;
2580 33417e70 bellard
2581 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2582 f6f3fbca Michael S. Tsirkin
2583 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2584 67c4d23c pbrook
        region_offset = start_addr;
2585 67c4d23c pbrook
    }
2586 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2587 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2588 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2589 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2590 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2591 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2592 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2593 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2594 db7b5426 blueswir1
            int need_subpage = 0;
2595 db7b5426 blueswir1
2596 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2597 db7b5426 blueswir1
                          need_subpage);
2598 f6405247 Richard Henderson
            if (need_subpage) {
2599 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2600 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2601 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2602 8da3ff18 pbrook
                                           p->region_offset);
2603 db7b5426 blueswir1
                } else {
2604 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2605 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2606 db7b5426 blueswir1
                }
2607 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2608 8da3ff18 pbrook
                                 region_offset);
2609 8da3ff18 pbrook
                p->region_offset = 0;
2610 db7b5426 blueswir1
            } else {
2611 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2612 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2614 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2615 db7b5426 blueswir1
            }
2616 db7b5426 blueswir1
        } else {
2617 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2618 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2619 8da3ff18 pbrook
            p->region_offset = region_offset;
2620 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2621 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2622 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2623 0e8f0967 pbrook
            } else {
2624 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2625 db7b5426 blueswir1
                int need_subpage = 0;
2626 db7b5426 blueswir1
2627 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2628 db7b5426 blueswir1
                              end_addr2, need_subpage);
2629 db7b5426 blueswir1
2630 f6405247 Richard Henderson
                if (need_subpage) {
2631 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2632 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2633 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2634 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2635 8da3ff18 pbrook
                                     phys_offset, region_offset);
2636 8da3ff18 pbrook
                    p->region_offset = 0;
2637 db7b5426 blueswir1
                }
2638 db7b5426 blueswir1
            }
2639 db7b5426 blueswir1
        }
2640 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2641 33417e70 bellard
    }
2642 3b46e624 ths
2643 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2644 9d42037b bellard
       reset the modified entries */
2645 9d42037b bellard
    /* XXX: slow ! */
2646 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2647 9d42037b bellard
        tlb_flush(env, 1);
2648 9d42037b bellard
    }
2649 33417e70 bellard
}
2650 33417e70 bellard
2651 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2652 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2653 ba863458 bellard
{
2654 ba863458 bellard
    PhysPageDesc *p;
2655 ba863458 bellard
2656 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657 ba863458 bellard
    if (!p)
2658 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2659 ba863458 bellard
    return p->phys_offset;
2660 ba863458 bellard
}
2661 ba863458 bellard
2662 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2663 f65ed4c1 aliguori
{
2664 f65ed4c1 aliguori
    if (kvm_enabled())
2665 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2666 f65ed4c1 aliguori
}
2667 f65ed4c1 aliguori
2668 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2669 f65ed4c1 aliguori
{
2670 f65ed4c1 aliguori
    if (kvm_enabled())
2671 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2672 f65ed4c1 aliguori
}
2673 f65ed4c1 aliguori
2674 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2675 62a2744c Sheng Yang
{
2676 62a2744c Sheng Yang
    if (kvm_enabled())
2677 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2678 62a2744c Sheng Yang
}
2679 62a2744c Sheng Yang
2680 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2681 c902760f Marcelo Tosatti
2682 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2683 c902760f Marcelo Tosatti
2684 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2685 c902760f Marcelo Tosatti
2686 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2687 c902760f Marcelo Tosatti
{
2688 c902760f Marcelo Tosatti
    struct statfs fs;
2689 c902760f Marcelo Tosatti
    int ret;
2690 c902760f Marcelo Tosatti
2691 c902760f Marcelo Tosatti
    do {
2692 c902760f Marcelo Tosatti
            ret = statfs(path, &fs);
2693 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2694 c902760f Marcelo Tosatti
2695 c902760f Marcelo Tosatti
    if (ret != 0) {
2696 6adc0549 Michael Tokarev
            perror(path);
2697 c902760f Marcelo Tosatti
            return 0;
2698 c902760f Marcelo Tosatti
    }
2699 c902760f Marcelo Tosatti
2700 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2701 c902760f Marcelo Tosatti
            fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2702 c902760f Marcelo Tosatti
2703 c902760f Marcelo Tosatti
    return fs.f_bsize;
2704 c902760f Marcelo Tosatti
}
2705 c902760f Marcelo Tosatti
2706 c902760f Marcelo Tosatti
static void *file_ram_alloc(ram_addr_t memory, const char *path)
2707 c902760f Marcelo Tosatti
{
2708 c902760f Marcelo Tosatti
    char *filename;
2709 c902760f Marcelo Tosatti
    void *area;
2710 c902760f Marcelo Tosatti
    int fd;
2711 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2712 c902760f Marcelo Tosatti
    int flags;
2713 c902760f Marcelo Tosatti
#endif
2714 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2715 c902760f Marcelo Tosatti
2716 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2717 c902760f Marcelo Tosatti
    if (!hpagesize) {
2718 c902760f Marcelo Tosatti
        return NULL;
2719 c902760f Marcelo Tosatti
    }
2720 c902760f Marcelo Tosatti
2721 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2722 c902760f Marcelo Tosatti
        return NULL;
2723 c902760f Marcelo Tosatti
    }
2724 c902760f Marcelo Tosatti
2725 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 c902760f Marcelo Tosatti
        return NULL;
2728 c902760f Marcelo Tosatti
    }
2729 c902760f Marcelo Tosatti
2730 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731 c902760f Marcelo Tosatti
        return NULL;
2732 c902760f Marcelo Tosatti
    }
2733 c902760f Marcelo Tosatti
2734 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2735 c902760f Marcelo Tosatti
    if (fd < 0) {
2736 6adc0549 Michael Tokarev
        perror("unable to create backing store for hugepages");
2737 c902760f Marcelo Tosatti
        free(filename);
2738 c902760f Marcelo Tosatti
        return NULL;
2739 c902760f Marcelo Tosatti
    }
2740 c902760f Marcelo Tosatti
    unlink(filename);
2741 c902760f Marcelo Tosatti
    free(filename);
2742 c902760f Marcelo Tosatti
2743 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744 c902760f Marcelo Tosatti
2745 c902760f Marcelo Tosatti
    /*
2746 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2747 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2748 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2749 c902760f Marcelo Tosatti
     * mmap will fail.
2750 c902760f Marcelo Tosatti
     */
2751 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2752 c902760f Marcelo Tosatti
        perror("ftruncate");
2753 c902760f Marcelo Tosatti
2754 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2755 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2757 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2758 c902760f Marcelo Tosatti
     */
2759 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761 c902760f Marcelo Tosatti
#else
2762 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763 c902760f Marcelo Tosatti
#endif
2764 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2765 c902760f Marcelo Tosatti
        perror("file_ram_alloc: can't mmap RAM pages");
2766 c902760f Marcelo Tosatti
        close(fd);
2767 c902760f Marcelo Tosatti
        return (NULL);
2768 c902760f Marcelo Tosatti
    }
2769 c902760f Marcelo Tosatti
    return area;
2770 c902760f Marcelo Tosatti
}
2771 c902760f Marcelo Tosatti
#endif
2772 c902760f Marcelo Tosatti
2773 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2774 94a6b54f pbrook
{
2775 94a6b54f pbrook
    RAMBlock *new_block;
2776 94a6b54f pbrook
2777 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2778 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2779 94a6b54f pbrook
2780 c902760f Marcelo Tosatti
    if (mem_path) {
2781 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2782 c902760f Marcelo Tosatti
        new_block->host = file_ram_alloc(size, mem_path);
2783 618a568d Marcelo Tosatti
        if (!new_block->host) {
2784 618a568d Marcelo Tosatti
            new_block->host = qemu_vmalloc(size);
2785 618a568d Marcelo Tosatti
#ifdef MADV_MERGEABLE
2786 618a568d Marcelo Tosatti
            madvise(new_block->host, size, MADV_MERGEABLE);
2787 618a568d Marcelo Tosatti
#endif
2788 618a568d Marcelo Tosatti
        }
2789 c902760f Marcelo Tosatti
#else
2790 c902760f Marcelo Tosatti
        fprintf(stderr, "-mem-path option unsupported\n");
2791 c902760f Marcelo Tosatti
        exit(1);
2792 c902760f Marcelo Tosatti
#endif
2793 c902760f Marcelo Tosatti
    } else {
2794 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2795 c902760f Marcelo Tosatti
        /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2796 c902760f Marcelo Tosatti
        new_block->host = mmap((void*)0x1000000, size,
2797 c902760f Marcelo Tosatti
                                PROT_EXEC|PROT_READ|PROT_WRITE,
2798 c902760f Marcelo Tosatti
                                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2799 6b02494d Alexander Graf
#else
2800 c902760f Marcelo Tosatti
        new_block->host = qemu_vmalloc(size);
2801 6b02494d Alexander Graf
#endif
2802 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2803 c902760f Marcelo Tosatti
        madvise(new_block->host, size, MADV_MERGEABLE);
2804 ccb167e9 Izik Eidus
#endif
2805 c902760f Marcelo Tosatti
    }
2806 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2807 94a6b54f pbrook
    new_block->length = size;
2808 94a6b54f pbrook
2809 94a6b54f pbrook
    new_block->next = ram_blocks;
2810 94a6b54f pbrook
    ram_blocks = new_block;
2811 94a6b54f pbrook
2812 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2813 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2814 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2815 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2816 94a6b54f pbrook
2817 94a6b54f pbrook
    last_ram_offset += size;
2818 94a6b54f pbrook
2819 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2820 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2821 6f0437e8 Jan Kiszka
2822 94a6b54f pbrook
    return new_block->offset;
2823 94a6b54f pbrook
}
2824 e9a1ab19 bellard
2825 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2826 e9a1ab19 bellard
{
2827 94a6b54f pbrook
    /* TODO: implement this.  */
2828 e9a1ab19 bellard
}
2829 e9a1ab19 bellard
2830 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2831 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2832 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2833 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2834 5579c7f3 pbrook

2835 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2836 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2837 5579c7f3 pbrook
 */
2838 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2839 dc828ca1 pbrook
{
2840 94a6b54f pbrook
    RAMBlock *prev;
2841 94a6b54f pbrook
    RAMBlock **prevp;
2842 94a6b54f pbrook
    RAMBlock *block;
2843 94a6b54f pbrook
2844 94a6b54f pbrook
    prev = NULL;
2845 94a6b54f pbrook
    prevp = &ram_blocks;
2846 94a6b54f pbrook
    block = ram_blocks;
2847 94a6b54f pbrook
    while (block && (block->offset > addr
2848 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2849 94a6b54f pbrook
        if (prev)
2850 94a6b54f pbrook
          prevp = &prev->next;
2851 94a6b54f pbrook
        prev = block;
2852 94a6b54f pbrook
        block = block->next;
2853 94a6b54f pbrook
    }
2854 94a6b54f pbrook
    if (!block) {
2855 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2856 94a6b54f pbrook
        abort();
2857 94a6b54f pbrook
    }
2858 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2859 94a6b54f pbrook
    if (prev) {
2860 94a6b54f pbrook
        prev->next = block->next;
2861 94a6b54f pbrook
        block->next = *prevp;
2862 94a6b54f pbrook
        *prevp = block;
2863 94a6b54f pbrook
    }
2864 94a6b54f pbrook
    return block->host + (addr - block->offset);
2865 dc828ca1 pbrook
}
2866 dc828ca1 pbrook
2867 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2868 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2869 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2870 5579c7f3 pbrook
{
2871 94a6b54f pbrook
    RAMBlock *block;
2872 94a6b54f pbrook
    uint8_t *host = ptr;
2873 94a6b54f pbrook
2874 94a6b54f pbrook
    block = ram_blocks;
2875 94a6b54f pbrook
    while (block && (block->host > host
2876 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2877 94a6b54f pbrook
        block = block->next;
2878 94a6b54f pbrook
    }
2879 94a6b54f pbrook
    if (!block) {
2880 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2881 94a6b54f pbrook
        abort();
2882 94a6b54f pbrook
    }
2883 94a6b54f pbrook
    return block->offset + (host - block->host);
2884 5579c7f3 pbrook
}
2885 5579c7f3 pbrook
2886 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2887 33417e70 bellard
{
2888 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2889 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2890 67d3b957 pbrook
#endif
2891 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2892 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2893 e18231a3 blueswir1
#endif
2894 e18231a3 blueswir1
    return 0;
2895 e18231a3 blueswir1
}
2896 e18231a3 blueswir1
2897 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2898 e18231a3 blueswir1
{
2899 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2900 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2901 e18231a3 blueswir1
#endif
2902 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2903 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2904 e18231a3 blueswir1
#endif
2905 e18231a3 blueswir1
    return 0;
2906 e18231a3 blueswir1
}
2907 e18231a3 blueswir1
2908 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2909 e18231a3 blueswir1
{
2910 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2911 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2912 e18231a3 blueswir1
#endif
2913 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2914 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2915 b4f0a316 blueswir1
#endif
2916 33417e70 bellard
    return 0;
2917 33417e70 bellard
}
2918 33417e70 bellard
2919 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2920 33417e70 bellard
{
2921 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2922 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2923 67d3b957 pbrook
#endif
2924 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2925 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2926 e18231a3 blueswir1
#endif
2927 e18231a3 blueswir1
}
2928 e18231a3 blueswir1
2929 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2930 e18231a3 blueswir1
{
2931 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2932 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2933 e18231a3 blueswir1
#endif
2934 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2935 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2936 e18231a3 blueswir1
#endif
2937 e18231a3 blueswir1
}
2938 e18231a3 blueswir1
2939 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2940 e18231a3 blueswir1
{
2941 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2942 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2943 e18231a3 blueswir1
#endif
2944 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2945 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2946 b4f0a316 blueswir1
#endif
2947 33417e70 bellard
}
2948 33417e70 bellard
2949 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2950 33417e70 bellard
    unassigned_mem_readb,
2951 e18231a3 blueswir1
    unassigned_mem_readw,
2952 e18231a3 blueswir1
    unassigned_mem_readl,
2953 33417e70 bellard
};
2954 33417e70 bellard
2955 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2956 33417e70 bellard
    unassigned_mem_writeb,
2957 e18231a3 blueswir1
    unassigned_mem_writew,
2958 e18231a3 blueswir1
    unassigned_mem_writel,
2959 33417e70 bellard
};
2960 33417e70 bellard
2961 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2962 0f459d16 pbrook
                                uint32_t val)
2963 9fa3e853 bellard
{
2964 3a7d929e bellard
    int dirty_flags;
2965 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2966 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2967 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2968 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2969 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2970 9fa3e853 bellard
#endif
2971 3a7d929e bellard
    }
2972 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2973 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2974 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2975 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2976 f23db169 bellard
       flushed */
2977 f23db169 bellard
    if (dirty_flags == 0xff)
2978 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2979 9fa3e853 bellard
}
2980 9fa3e853 bellard
2981 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2982 0f459d16 pbrook
                                uint32_t val)
2983 9fa3e853 bellard
{
2984 3a7d929e bellard
    int dirty_flags;
2985 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2986 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2987 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2988 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2989 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2990 9fa3e853 bellard
#endif
2991 3a7d929e bellard
    }
2992 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2993 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2994 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2995 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2996 f23db169 bellard
       flushed */
2997 f23db169 bellard
    if (dirty_flags == 0xff)
2998 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2999 9fa3e853 bellard
}
3000 9fa3e853 bellard
3001 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3002 0f459d16 pbrook
                                uint32_t val)
3003 9fa3e853 bellard
{
3004 3a7d929e bellard
    int dirty_flags;
3005 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3006 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3007 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3008 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3009 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3010 9fa3e853 bellard
#endif
3011 3a7d929e bellard
    }
3012 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3013 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3014 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3015 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3016 f23db169 bellard
       flushed */
3017 f23db169 bellard
    if (dirty_flags == 0xff)
3018 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3019 9fa3e853 bellard
}
3020 9fa3e853 bellard
3021 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3022 9fa3e853 bellard
    NULL, /* never used */
3023 9fa3e853 bellard
    NULL, /* never used */
3024 9fa3e853 bellard
    NULL, /* never used */
3025 9fa3e853 bellard
};
3026 9fa3e853 bellard
3027 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3028 1ccde1cb bellard
    notdirty_mem_writeb,
3029 1ccde1cb bellard
    notdirty_mem_writew,
3030 1ccde1cb bellard
    notdirty_mem_writel,
3031 1ccde1cb bellard
};
3032 1ccde1cb bellard
3033 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3034 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3035 0f459d16 pbrook
{
3036 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3037 06d55cc1 aliguori
    target_ulong pc, cs_base;
3038 06d55cc1 aliguori
    TranslationBlock *tb;
3039 0f459d16 pbrook
    target_ulong vaddr;
3040 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3041 06d55cc1 aliguori
    int cpu_flags;
3042 0f459d16 pbrook
3043 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3044 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3045 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3046 06d55cc1 aliguori
         * current instruction. */
3047 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3048 06d55cc1 aliguori
        return;
3049 06d55cc1 aliguori
    }
3050 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3051 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3052 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3053 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3054 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3055 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3056 6e140f28 aliguori
                env->watchpoint_hit = wp;
3057 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3058 6e140f28 aliguori
                if (!tb) {
3059 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3060 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3061 6e140f28 aliguori
                }
3062 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3063 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3064 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3065 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3066 6e140f28 aliguori
                } else {
3067 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3068 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3069 6e140f28 aliguori
                }
3070 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3071 06d55cc1 aliguori
            }
3072 6e140f28 aliguori
        } else {
3073 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3074 0f459d16 pbrook
        }
3075 0f459d16 pbrook
    }
3076 0f459d16 pbrook
}
3077 0f459d16 pbrook
3078 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3079 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3080 6658ffb8 pbrook
   phys routines.  */
3081 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3082 6658ffb8 pbrook
{
3083 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3084 6658ffb8 pbrook
    return ldub_phys(addr);
3085 6658ffb8 pbrook
}
3086 6658ffb8 pbrook
3087 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3088 6658ffb8 pbrook
{
3089 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3090 6658ffb8 pbrook
    return lduw_phys(addr);
3091 6658ffb8 pbrook
}
3092 6658ffb8 pbrook
3093 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3094 6658ffb8 pbrook
{
3095 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3096 6658ffb8 pbrook
    return ldl_phys(addr);
3097 6658ffb8 pbrook
}
3098 6658ffb8 pbrook
3099 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3100 6658ffb8 pbrook
                             uint32_t val)
3101 6658ffb8 pbrook
{
3102 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3103 6658ffb8 pbrook
    stb_phys(addr, val);
3104 6658ffb8 pbrook
}
3105 6658ffb8 pbrook
3106 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3107 6658ffb8 pbrook
                             uint32_t val)
3108 6658ffb8 pbrook
{
3109 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3110 6658ffb8 pbrook
    stw_phys(addr, val);
3111 6658ffb8 pbrook
}
3112 6658ffb8 pbrook
3113 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3114 6658ffb8 pbrook
                             uint32_t val)
3115 6658ffb8 pbrook
{
3116 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3117 6658ffb8 pbrook
    stl_phys(addr, val);
3118 6658ffb8 pbrook
}
3119 6658ffb8 pbrook
3120 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3121 6658ffb8 pbrook
    watch_mem_readb,
3122 6658ffb8 pbrook
    watch_mem_readw,
3123 6658ffb8 pbrook
    watch_mem_readl,
3124 6658ffb8 pbrook
};
3125 6658ffb8 pbrook
3126 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3127 6658ffb8 pbrook
    watch_mem_writeb,
3128 6658ffb8 pbrook
    watch_mem_writew,
3129 6658ffb8 pbrook
    watch_mem_writel,
3130 6658ffb8 pbrook
};
3131 6658ffb8 pbrook
3132 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3133 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3134 f6405247 Richard Henderson
                                        unsigned int len)
3135 db7b5426 blueswir1
{
3136 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3137 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3138 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3139 db7b5426 blueswir1
           mmio, len, addr, idx);
3140 db7b5426 blueswir1
#endif
3141 db7b5426 blueswir1
3142 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3143 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3144 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3145 db7b5426 blueswir1
}
3146 db7b5426 blueswir1
3147 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3148 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3149 db7b5426 blueswir1
{
3150 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3151 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3152 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3153 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3154 db7b5426 blueswir1
#endif
3155 f6405247 Richard Henderson
3156 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3157 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3158 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3159 db7b5426 blueswir1
}
3160 db7b5426 blueswir1
3161 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3162 db7b5426 blueswir1
{
3163 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3164 db7b5426 blueswir1
}
3165 db7b5426 blueswir1
3166 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3167 db7b5426 blueswir1
                            uint32_t value)
3168 db7b5426 blueswir1
{
3169 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3170 db7b5426 blueswir1
}
3171 db7b5426 blueswir1
3172 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3173 db7b5426 blueswir1
{
3174 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3175 db7b5426 blueswir1
}
3176 db7b5426 blueswir1
3177 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3178 db7b5426 blueswir1
                            uint32_t value)
3179 db7b5426 blueswir1
{
3180 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3181 db7b5426 blueswir1
}
3182 db7b5426 blueswir1
3183 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3184 db7b5426 blueswir1
{
3185 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3186 db7b5426 blueswir1
}
3187 db7b5426 blueswir1
3188 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3189 f6405247 Richard Henderson
                            uint32_t value)
3190 db7b5426 blueswir1
{
3191 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3192 db7b5426 blueswir1
}
3193 db7b5426 blueswir1
3194 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3195 db7b5426 blueswir1
    &subpage_readb,
3196 db7b5426 blueswir1
    &subpage_readw,
3197 db7b5426 blueswir1
    &subpage_readl,
3198 db7b5426 blueswir1
};
3199 db7b5426 blueswir1
3200 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3201 db7b5426 blueswir1
    &subpage_writeb,
3202 db7b5426 blueswir1
    &subpage_writew,
3203 db7b5426 blueswir1
    &subpage_writel,
3204 db7b5426 blueswir1
};
3205 db7b5426 blueswir1
3206 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3207 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3208 db7b5426 blueswir1
{
3209 db7b5426 blueswir1
    int idx, eidx;
3210 db7b5426 blueswir1
3211 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3212 db7b5426 blueswir1
        return -1;
3213 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3214 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3215 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3216 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3217 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3218 db7b5426 blueswir1
#endif
3219 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3220 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3221 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3222 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3223 db7b5426 blueswir1
    }
3224 db7b5426 blueswir1
3225 db7b5426 blueswir1
    return 0;
3226 db7b5426 blueswir1
}
3227 db7b5426 blueswir1
3228 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3229 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3230 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3231 db7b5426 blueswir1
{
3232 c227f099 Anthony Liguori
    subpage_t *mmio;
3233 db7b5426 blueswir1
    int subpage_memory;
3234 db7b5426 blueswir1
3235 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3236 1eec614b aliguori
3237 1eec614b aliguori
    mmio->base = base;
3238 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3239 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3240 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3241 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3242 db7b5426 blueswir1
#endif
3243 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3244 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3245 db7b5426 blueswir1
3246 db7b5426 blueswir1
    return mmio;
3247 db7b5426 blueswir1
}
3248 db7b5426 blueswir1
3249 88715657 aliguori
static int get_free_io_mem_idx(void)
3250 88715657 aliguori
{
3251 88715657 aliguori
    int i;
3252 88715657 aliguori
3253 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3254 88715657 aliguori
        if (!io_mem_used[i]) {
3255 88715657 aliguori
            io_mem_used[i] = 1;
3256 88715657 aliguori
            return i;
3257 88715657 aliguori
        }
3258 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3259 88715657 aliguori
    return -1;
3260 88715657 aliguori
}
3261 88715657 aliguori
3262 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3263 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3264 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3265 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3266 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3267 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3268 4254fab8 blueswir1
   returned if error. */
3269 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3270 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3271 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3272 1eed09cb Avi Kivity
                                        void *opaque)
3273 33417e70 bellard
{
3274 3cab721d Richard Henderson
    int i;
3275 3cab721d Richard Henderson
3276 33417e70 bellard
    if (io_index <= 0) {
3277 88715657 aliguori
        io_index = get_free_io_mem_idx();
3278 88715657 aliguori
        if (io_index == -1)
3279 88715657 aliguori
            return io_index;
3280 33417e70 bellard
    } else {
3281 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3282 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3283 33417e70 bellard
            return -1;
3284 33417e70 bellard
    }
3285 b5ff1b31 bellard
3286 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3287 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3288 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3289 3cab721d Richard Henderson
    }
3290 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3291 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3292 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3293 3cab721d Richard Henderson
    }
3294 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3295 f6405247 Richard Henderson
3296 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3297 33417e70 bellard
}
3298 61382a50 bellard
3299 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3300 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3301 1eed09cb Avi Kivity
                           void *opaque)
3302 1eed09cb Avi Kivity
{
3303 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3304 1eed09cb Avi Kivity
}
3305 1eed09cb Avi Kivity
3306 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3307 88715657 aliguori
{
3308 88715657 aliguori
    int i;
3309 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3310 88715657 aliguori
3311 88715657 aliguori
    for (i=0;i < 3; i++) {
3312 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3313 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3314 88715657 aliguori
    }
3315 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3316 88715657 aliguori
    io_mem_used[io_index] = 0;
3317 88715657 aliguori
}
3318 88715657 aliguori
3319 e9179ce1 Avi Kivity
static void io_mem_init(void)
3320 e9179ce1 Avi Kivity
{
3321 e9179ce1 Avi Kivity
    int i;
3322 e9179ce1 Avi Kivity
3323 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3324 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3325 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3326 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3327 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3328 e9179ce1 Avi Kivity
3329 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3330 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3331 e9179ce1 Avi Kivity
}
3332 e9179ce1 Avi Kivity
3333 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3334 e2eef170 pbrook
3335 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3336 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3337 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3338 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3339 13eb76e0 bellard
{
3340 13eb76e0 bellard
    int l, flags;
3341 13eb76e0 bellard
    target_ulong page;
3342 53a5960a pbrook
    void * p;
3343 13eb76e0 bellard
3344 13eb76e0 bellard
    while (len > 0) {
3345 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3346 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3347 13eb76e0 bellard
        if (l > len)
3348 13eb76e0 bellard
            l = len;
3349 13eb76e0 bellard
        flags = page_get_flags(page);
3350 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3351 a68fe89c Paul Brook
            return -1;
3352 13eb76e0 bellard
        if (is_write) {
3353 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3354 a68fe89c Paul Brook
                return -1;
3355 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3356 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3357 a68fe89c Paul Brook
                return -1;
3358 72fb7daa aurel32
            memcpy(p, buf, l);
3359 72fb7daa aurel32
            unlock_user(p, addr, l);
3360 13eb76e0 bellard
        } else {
3361 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3362 a68fe89c Paul Brook
                return -1;
3363 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3364 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3365 a68fe89c Paul Brook
                return -1;
3366 72fb7daa aurel32
            memcpy(buf, p, l);
3367 5b257578 aurel32
            unlock_user(p, addr, 0);
3368 13eb76e0 bellard
        }
3369 13eb76e0 bellard
        len -= l;
3370 13eb76e0 bellard
        buf += l;
3371 13eb76e0 bellard
        addr += l;
3372 13eb76e0 bellard
    }
3373 a68fe89c Paul Brook
    return 0;
3374 13eb76e0 bellard
}
3375 8df1cd07 bellard
3376 13eb76e0 bellard
#else
3377 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3378 13eb76e0 bellard
                            int len, int is_write)
3379 13eb76e0 bellard
{
3380 13eb76e0 bellard
    int l, io_index;
3381 13eb76e0 bellard
    uint8_t *ptr;
3382 13eb76e0 bellard
    uint32_t val;
3383 c227f099 Anthony Liguori
    target_phys_addr_t page;
3384 2e12669a bellard
    unsigned long pd;
3385 92e873b9 bellard
    PhysPageDesc *p;
3386 3b46e624 ths
3387 13eb76e0 bellard
    while (len > 0) {
3388 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3389 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3390 13eb76e0 bellard
        if (l > len)
3391 13eb76e0 bellard
            l = len;
3392 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3393 13eb76e0 bellard
        if (!p) {
3394 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3395 13eb76e0 bellard
        } else {
3396 13eb76e0 bellard
            pd = p->phys_offset;
3397 13eb76e0 bellard
        }
3398 3b46e624 ths
3399 13eb76e0 bellard
        if (is_write) {
3400 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3401 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3402 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3403 8da3ff18 pbrook
                if (p)
3404 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3405 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3406 6a00d601 bellard
                   potential bugs */
3407 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3408 1c213d19 bellard
                    /* 32 bit write access */
3409 c27004ec bellard
                    val = ldl_p(buf);
3410 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3411 13eb76e0 bellard
                    l = 4;
3412 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3413 1c213d19 bellard
                    /* 16 bit write access */
3414 c27004ec bellard
                    val = lduw_p(buf);
3415 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3416 13eb76e0 bellard
                    l = 2;
3417 13eb76e0 bellard
                } else {
3418 1c213d19 bellard
                    /* 8 bit write access */
3419 c27004ec bellard
                    val = ldub_p(buf);
3420 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3421 13eb76e0 bellard
                    l = 1;
3422 13eb76e0 bellard
                }
3423 13eb76e0 bellard
            } else {
3424 b448f2f3 bellard
                unsigned long addr1;
3425 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3426 13eb76e0 bellard
                /* RAM case */
3427 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3428 13eb76e0 bellard
                memcpy(ptr, buf, l);
3429 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3430 3a7d929e bellard
                    /* invalidate code */
3431 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3432 3a7d929e bellard
                    /* set dirty bit */
3433 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3434 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3435 3a7d929e bellard
                }
3436 13eb76e0 bellard
            }
3437 13eb76e0 bellard
        } else {
3438 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3439 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3440 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3441 13eb76e0 bellard
                /* I/O case */
3442 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3443 8da3ff18 pbrook
                if (p)
3444 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3445 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3446 13eb76e0 bellard
                    /* 32 bit read access */
3447 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3448 c27004ec bellard
                    stl_p(buf, val);
3449 13eb76e0 bellard
                    l = 4;
3450 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3451 13eb76e0 bellard
                    /* 16 bit read access */
3452 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3453 c27004ec bellard
                    stw_p(buf, val);
3454 13eb76e0 bellard
                    l = 2;
3455 13eb76e0 bellard
                } else {
3456 1c213d19 bellard
                    /* 8 bit read access */
3457 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3458 c27004ec bellard
                    stb_p(buf, val);
3459 13eb76e0 bellard
                    l = 1;
3460 13eb76e0 bellard
                }
3461 13eb76e0 bellard
            } else {
3462 13eb76e0 bellard
                /* RAM case */
3463 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3464 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3465 13eb76e0 bellard
                memcpy(buf, ptr, l);
3466 13eb76e0 bellard
            }
3467 13eb76e0 bellard
        }
3468 13eb76e0 bellard
        len -= l;
3469 13eb76e0 bellard
        buf += l;
3470 13eb76e0 bellard
        addr += l;
3471 13eb76e0 bellard
    }
3472 13eb76e0 bellard
}
3473 8df1cd07 bellard
3474 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3475 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3476 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3477 d0ecd2aa bellard
{
3478 d0ecd2aa bellard
    int l;
3479 d0ecd2aa bellard
    uint8_t *ptr;
3480 c227f099 Anthony Liguori
    target_phys_addr_t page;
3481 d0ecd2aa bellard
    unsigned long pd;
3482 d0ecd2aa bellard
    PhysPageDesc *p;
3483 3b46e624 ths
3484 d0ecd2aa bellard
    while (len > 0) {
3485 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3486 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3487 d0ecd2aa bellard
        if (l > len)
3488 d0ecd2aa bellard
            l = len;
3489 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3490 d0ecd2aa bellard
        if (!p) {
3491 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3492 d0ecd2aa bellard
        } else {
3493 d0ecd2aa bellard
            pd = p->phys_offset;
3494 d0ecd2aa bellard
        }
3495 3b46e624 ths
3496 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3497 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3498 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3499 d0ecd2aa bellard
            /* do nothing */
3500 d0ecd2aa bellard
        } else {
3501 d0ecd2aa bellard
            unsigned long addr1;
3502 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3503 d0ecd2aa bellard
            /* ROM/RAM case */
3504 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3505 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3506 d0ecd2aa bellard
        }
3507 d0ecd2aa bellard
        len -= l;
3508 d0ecd2aa bellard
        buf += l;
3509 d0ecd2aa bellard
        addr += l;
3510 d0ecd2aa bellard
    }
3511 d0ecd2aa bellard
}
3512 d0ecd2aa bellard
3513 6d16c2f8 aliguori
typedef struct {
3514 6d16c2f8 aliguori
    void *buffer;
3515 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3516 c227f099 Anthony Liguori
    target_phys_addr_t len;
3517 6d16c2f8 aliguori
} BounceBuffer;
3518 6d16c2f8 aliguori
3519 6d16c2f8 aliguori
static BounceBuffer bounce;
3520 6d16c2f8 aliguori
3521 ba223c29 aliguori
typedef struct MapClient {
3522 ba223c29 aliguori
    void *opaque;
3523 ba223c29 aliguori
    void (*callback)(void *opaque);
3524 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3525 ba223c29 aliguori
} MapClient;
3526 ba223c29 aliguori
3527 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3528 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3529 ba223c29 aliguori
3530 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3531 ba223c29 aliguori
{
3532 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3533 ba223c29 aliguori
3534 ba223c29 aliguori
    client->opaque = opaque;
3535 ba223c29 aliguori
    client->callback = callback;
3536 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3537 ba223c29 aliguori
    return client;
3538 ba223c29 aliguori
}
3539 ba223c29 aliguori
3540 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3541 ba223c29 aliguori
{
3542 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3543 ba223c29 aliguori
3544 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3545 34d5e948 Isaku Yamahata
    qemu_free(client);
3546 ba223c29 aliguori
}
3547 ba223c29 aliguori
3548 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3549 ba223c29 aliguori
{
3550 ba223c29 aliguori
    MapClient *client;
3551 ba223c29 aliguori
3552 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3553 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3554 ba223c29 aliguori
        client->callback(client->opaque);
3555 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3556 ba223c29 aliguori
    }
3557 ba223c29 aliguori
}
3558 ba223c29 aliguori
3559 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3560 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3561 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3562 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3563 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3564 ba223c29 aliguori
 * likely to succeed.
3565 6d16c2f8 aliguori
 */
3566 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3567 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3568 6d16c2f8 aliguori
                              int is_write)
3569 6d16c2f8 aliguori
{
3570 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3571 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3572 6d16c2f8 aliguori
    int l;
3573 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3574 6d16c2f8 aliguori
    uint8_t *ptr;
3575 c227f099 Anthony Liguori
    target_phys_addr_t page;
3576 6d16c2f8 aliguori
    unsigned long pd;
3577 6d16c2f8 aliguori
    PhysPageDesc *p;
3578 6d16c2f8 aliguori
    unsigned long addr1;
3579 6d16c2f8 aliguori
3580 6d16c2f8 aliguori
    while (len > 0) {
3581 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3582 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3583 6d16c2f8 aliguori
        if (l > len)
3584 6d16c2f8 aliguori
            l = len;
3585 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3586 6d16c2f8 aliguori
        if (!p) {
3587 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3588 6d16c2f8 aliguori
        } else {
3589 6d16c2f8 aliguori
            pd = p->phys_offset;
3590 6d16c2f8 aliguori
        }
3591 6d16c2f8 aliguori
3592 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3593 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3594 6d16c2f8 aliguori
                break;
3595 6d16c2f8 aliguori
            }
3596 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3597 6d16c2f8 aliguori
            bounce.addr = addr;
3598 6d16c2f8 aliguori
            bounce.len = l;
3599 6d16c2f8 aliguori
            if (!is_write) {
3600 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3601 6d16c2f8 aliguori
            }
3602 6d16c2f8 aliguori
            ptr = bounce.buffer;
3603 6d16c2f8 aliguori
        } else {
3604 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3605 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3606 6d16c2f8 aliguori
        }
3607 6d16c2f8 aliguori
        if (!done) {
3608 6d16c2f8 aliguori
            ret = ptr;
3609 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3610 6d16c2f8 aliguori
            break;
3611 6d16c2f8 aliguori
        }
3612 6d16c2f8 aliguori
3613 6d16c2f8 aliguori
        len -= l;
3614 6d16c2f8 aliguori
        addr += l;
3615 6d16c2f8 aliguori
        done += l;
3616 6d16c2f8 aliguori
    }
3617 6d16c2f8 aliguori
    *plen = done;
3618 6d16c2f8 aliguori
    return ret;
3619 6d16c2f8 aliguori
}
3620 6d16c2f8 aliguori
3621 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3622 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3623 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3624 6d16c2f8 aliguori
 */
3625 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3626 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3627 6d16c2f8 aliguori
{
3628 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3629 6d16c2f8 aliguori
        if (is_write) {
3630 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3631 6d16c2f8 aliguori
            while (access_len) {
3632 6d16c2f8 aliguori
                unsigned l;
3633 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3634 6d16c2f8 aliguori
                if (l > access_len)
3635 6d16c2f8 aliguori
                    l = access_len;
3636 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3637 6d16c2f8 aliguori
                    /* invalidate code */
3638 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3639 6d16c2f8 aliguori
                    /* set dirty bit */
3640 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3641 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3642 6d16c2f8 aliguori
                }
3643 6d16c2f8 aliguori
                addr1 += l;
3644 6d16c2f8 aliguori
                access_len -= l;
3645 6d16c2f8 aliguori
            }
3646 6d16c2f8 aliguori
        }
3647 6d16c2f8 aliguori
        return;
3648 6d16c2f8 aliguori
    }
3649 6d16c2f8 aliguori
    if (is_write) {
3650 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3651 6d16c2f8 aliguori
    }
3652 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3653 6d16c2f8 aliguori
    bounce.buffer = NULL;
3654 ba223c29 aliguori
    cpu_notify_map_clients();
3655 6d16c2f8 aliguori
}
3656 d0ecd2aa bellard
3657 8df1cd07 bellard
/* warning: addr must be aligned */
3658 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3659 8df1cd07 bellard
{
3660 8df1cd07 bellard
    int io_index;
3661 8df1cd07 bellard
    uint8_t *ptr;
3662 8df1cd07 bellard
    uint32_t val;
3663 8df1cd07 bellard
    unsigned long pd;
3664 8df1cd07 bellard
    PhysPageDesc *p;
3665 8df1cd07 bellard
3666 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3667 8df1cd07 bellard
    if (!p) {
3668 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3669 8df1cd07 bellard
    } else {
3670 8df1cd07 bellard
        pd = p->phys_offset;
3671 8df1cd07 bellard
    }
3672 3b46e624 ths
3673 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3674 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3675 8df1cd07 bellard
        /* I/O case */
3676 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3677 8da3ff18 pbrook
        if (p)
3678 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3679 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3680 8df1cd07 bellard
    } else {
3681 8df1cd07 bellard
        /* RAM case */
3682 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3683 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3684 8df1cd07 bellard
        val = ldl_p(ptr);
3685 8df1cd07 bellard
    }
3686 8df1cd07 bellard
    return val;
3687 8df1cd07 bellard
}
3688 8df1cd07 bellard
3689 84b7b8e7 bellard
/* warning: addr must be aligned */
3690 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3691 84b7b8e7 bellard
{
3692 84b7b8e7 bellard
    int io_index;
3693 84b7b8e7 bellard
    uint8_t *ptr;
3694 84b7b8e7 bellard
    uint64_t val;
3695 84b7b8e7 bellard
    unsigned long pd;
3696 84b7b8e7 bellard
    PhysPageDesc *p;
3697 84b7b8e7 bellard
3698 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3699 84b7b8e7 bellard
    if (!p) {
3700 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3701 84b7b8e7 bellard
    } else {
3702 84b7b8e7 bellard
        pd = p->phys_offset;
3703 84b7b8e7 bellard
    }
3704 3b46e624 ths
3705 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3706 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3707 84b7b8e7 bellard
        /* I/O case */
3708 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3709 8da3ff18 pbrook
        if (p)
3710 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3711 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3712 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3713 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3714 84b7b8e7 bellard
#else
3715 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3716 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3717 84b7b8e7 bellard
#endif
3718 84b7b8e7 bellard
    } else {
3719 84b7b8e7 bellard
        /* RAM case */
3720 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3721 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3722 84b7b8e7 bellard
        val = ldq_p(ptr);
3723 84b7b8e7 bellard
    }
3724 84b7b8e7 bellard
    return val;
3725 84b7b8e7 bellard
}
3726 84b7b8e7 bellard
3727 aab33094 bellard
/* XXX: optimize */
3728 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3729 aab33094 bellard
{
3730 aab33094 bellard
    uint8_t val;
3731 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3732 aab33094 bellard
    return val;
3733 aab33094 bellard
}
3734 aab33094 bellard
3735 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3736 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3737 aab33094 bellard
{
3738 733f0b02 Michael S. Tsirkin
    int io_index;
3739 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3740 733f0b02 Michael S. Tsirkin
    uint64_t val;
3741 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3742 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3743 733f0b02 Michael S. Tsirkin
3744 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3745 733f0b02 Michael S. Tsirkin
    if (!p) {
3746 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3747 733f0b02 Michael S. Tsirkin
    } else {
3748 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3749 733f0b02 Michael S. Tsirkin
    }
3750 733f0b02 Michael S. Tsirkin
3751 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3752 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
3753 733f0b02 Michael S. Tsirkin
        /* I/O case */
3754 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3755 733f0b02 Michael S. Tsirkin
        if (p)
3756 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3757 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3758 733f0b02 Michael S. Tsirkin
    } else {
3759 733f0b02 Michael S. Tsirkin
        /* RAM case */
3760 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3761 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3762 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
3763 733f0b02 Michael S. Tsirkin
    }
3764 733f0b02 Michael S. Tsirkin
    return val;
3765 aab33094 bellard
}
3766 aab33094 bellard
3767 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3768 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3769 8df1cd07 bellard
   bits are used to track modified PTEs */
3770 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3771 8df1cd07 bellard
{
3772 8df1cd07 bellard
    int io_index;
3773 8df1cd07 bellard
    uint8_t *ptr;
3774 8df1cd07 bellard
    unsigned long pd;
3775 8df1cd07 bellard
    PhysPageDesc *p;
3776 8df1cd07 bellard
3777 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3778 8df1cd07 bellard
    if (!p) {
3779 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3780 8df1cd07 bellard
    } else {
3781 8df1cd07 bellard
        pd = p->phys_offset;
3782 8df1cd07 bellard
    }
3783 3b46e624 ths
3784 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3785 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3786 8da3ff18 pbrook
        if (p)
3787 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3788 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3789 8df1cd07 bellard
    } else {
3790 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3791 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3792 8df1cd07 bellard
        stl_p(ptr, val);
3793 74576198 aliguori
3794 74576198 aliguori
        if (unlikely(in_migration)) {
3795 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3796 74576198 aliguori
                /* invalidate code */
3797 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3798 74576198 aliguori
                /* set dirty bit */
3799 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3800 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3801 74576198 aliguori
            }
3802 74576198 aliguori
        }
3803 8df1cd07 bellard
    }
3804 8df1cd07 bellard
}
3805 8df1cd07 bellard
3806 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3807 bc98a7ef j_mayer
{
3808 bc98a7ef j_mayer
    int io_index;
3809 bc98a7ef j_mayer
    uint8_t *ptr;
3810 bc98a7ef j_mayer
    unsigned long pd;
3811 bc98a7ef j_mayer
    PhysPageDesc *p;
3812 bc98a7ef j_mayer
3813 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3814 bc98a7ef j_mayer
    if (!p) {
3815 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3816 bc98a7ef j_mayer
    } else {
3817 bc98a7ef j_mayer
        pd = p->phys_offset;
3818 bc98a7ef j_mayer
    }
3819 3b46e624 ths
3820 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3821 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3822 8da3ff18 pbrook
        if (p)
3823 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3824 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3825 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3826 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3827 bc98a7ef j_mayer
#else
3828 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3829 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3830 bc98a7ef j_mayer
#endif
3831 bc98a7ef j_mayer
    } else {
3832 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3833 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3834 bc98a7ef j_mayer
        stq_p(ptr, val);
3835 bc98a7ef j_mayer
    }
3836 bc98a7ef j_mayer
}
3837 bc98a7ef j_mayer
3838 8df1cd07 bellard
/* warning: addr must be aligned */
3839 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3840 8df1cd07 bellard
{
3841 8df1cd07 bellard
    int io_index;
3842 8df1cd07 bellard
    uint8_t *ptr;
3843 8df1cd07 bellard
    unsigned long pd;
3844 8df1cd07 bellard
    PhysPageDesc *p;
3845 8df1cd07 bellard
3846 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3847 8df1cd07 bellard
    if (!p) {
3848 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3849 8df1cd07 bellard
    } else {
3850 8df1cd07 bellard
        pd = p->phys_offset;
3851 8df1cd07 bellard
    }
3852 3b46e624 ths
3853 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3854 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3855 8da3ff18 pbrook
        if (p)
3856 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3857 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3858 8df1cd07 bellard
    } else {
3859 8df1cd07 bellard
        unsigned long addr1;
3860 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3861 8df1cd07 bellard
        /* RAM case */
3862 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3863 8df1cd07 bellard
        stl_p(ptr, val);
3864 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3865 3a7d929e bellard
            /* invalidate code */
3866 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3867 3a7d929e bellard
            /* set dirty bit */
3868 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3869 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3870 3a7d929e bellard
        }
3871 8df1cd07 bellard
    }
3872 8df1cd07 bellard
}
3873 8df1cd07 bellard
3874 aab33094 bellard
/* XXX: optimize */
3875 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3876 aab33094 bellard
{
3877 aab33094 bellard
    uint8_t v = val;
3878 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3879 aab33094 bellard
}
3880 aab33094 bellard
3881 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3882 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3883 aab33094 bellard
{
3884 733f0b02 Michael S. Tsirkin
    int io_index;
3885 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3886 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3887 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3888 733f0b02 Michael S. Tsirkin
3889 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3890 733f0b02 Michael S. Tsirkin
    if (!p) {
3891 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3892 733f0b02 Michael S. Tsirkin
    } else {
3893 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3894 733f0b02 Michael S. Tsirkin
    }
3895 733f0b02 Michael S. Tsirkin
3896 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3897 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3898 733f0b02 Michael S. Tsirkin
        if (p)
3899 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3900 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3901 733f0b02 Michael S. Tsirkin
    } else {
3902 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
3903 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3904 733f0b02 Michael S. Tsirkin
        /* RAM case */
3905 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
3906 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
3907 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
3908 733f0b02 Michael S. Tsirkin
            /* invalidate code */
3909 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3910 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
3911 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
3912 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
3913 733f0b02 Michael S. Tsirkin
        }
3914 733f0b02 Michael S. Tsirkin
    }
3915 aab33094 bellard
}
3916 aab33094 bellard
3917 aab33094 bellard
/* XXX: optimize */
3918 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3919 aab33094 bellard
{
3920 aab33094 bellard
    val = tswap64(val);
3921 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3922 aab33094 bellard
}
3923 aab33094 bellard
3924 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3925 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3926 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3927 13eb76e0 bellard
{
3928 13eb76e0 bellard
    int l;
3929 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3930 9b3c35e0 j_mayer
    target_ulong page;
3931 13eb76e0 bellard
3932 13eb76e0 bellard
    while (len > 0) {
3933 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3934 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3935 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3936 13eb76e0 bellard
        if (phys_addr == -1)
3937 13eb76e0 bellard
            return -1;
3938 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3939 13eb76e0 bellard
        if (l > len)
3940 13eb76e0 bellard
            l = len;
3941 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3942 5e2972fd aliguori
        if (is_write)
3943 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3944 5e2972fd aliguori
        else
3945 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3946 13eb76e0 bellard
        len -= l;
3947 13eb76e0 bellard
        buf += l;
3948 13eb76e0 bellard
        addr += l;
3949 13eb76e0 bellard
    }
3950 13eb76e0 bellard
    return 0;
3951 13eb76e0 bellard
}
3952 a68fe89c Paul Brook
#endif
3953 13eb76e0 bellard
3954 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3955 2e70f6ef pbrook
   must be at the end of the TB */
3956 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3957 2e70f6ef pbrook
{
3958 2e70f6ef pbrook
    TranslationBlock *tb;
3959 2e70f6ef pbrook
    uint32_t n, cflags;
3960 2e70f6ef pbrook
    target_ulong pc, cs_base;
3961 2e70f6ef pbrook
    uint64_t flags;
3962 2e70f6ef pbrook
3963 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3964 2e70f6ef pbrook
    if (!tb) {
3965 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3966 2e70f6ef pbrook
                  retaddr);
3967 2e70f6ef pbrook
    }
3968 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3969 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3970 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3971 bf20dc07 ths
       occurred.  */
3972 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3973 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3974 2e70f6ef pbrook
    n++;
3975 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3976 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3977 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3978 2e70f6ef pbrook
       branch.  */
3979 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3980 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3981 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3982 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3983 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3984 2e70f6ef pbrook
    }
3985 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3986 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3987 2e70f6ef pbrook
            && n > 1) {
3988 2e70f6ef pbrook
        env->pc -= 2;
3989 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3990 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3991 2e70f6ef pbrook
    }
3992 2e70f6ef pbrook
#endif
3993 2e70f6ef pbrook
    /* This should never happen.  */
3994 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3995 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3996 2e70f6ef pbrook
3997 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3998 2e70f6ef pbrook
    pc = tb->pc;
3999 2e70f6ef pbrook
    cs_base = tb->cs_base;
4000 2e70f6ef pbrook
    flags = tb->flags;
4001 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4002 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4003 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4004 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4005 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4006 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4007 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4008 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4009 2e70f6ef pbrook
       second new TB.  */
4010 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4011 2e70f6ef pbrook
}
4012 2e70f6ef pbrook
4013 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4014 b3755a91 Paul Brook
4015 e3db7226 bellard
void dump_exec_info(FILE *f,
4016 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4017 e3db7226 bellard
{
4018 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4019 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4020 e3db7226 bellard
    TranslationBlock *tb;
4021 3b46e624 ths
4022 e3db7226 bellard
    target_code_size = 0;
4023 e3db7226 bellard
    max_target_code_size = 0;
4024 e3db7226 bellard
    cross_page = 0;
4025 e3db7226 bellard
    direct_jmp_count = 0;
4026 e3db7226 bellard
    direct_jmp2_count = 0;
4027 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4028 e3db7226 bellard
        tb = &tbs[i];
4029 e3db7226 bellard
        target_code_size += tb->size;
4030 e3db7226 bellard
        if (tb->size > max_target_code_size)
4031 e3db7226 bellard
            max_target_code_size = tb->size;
4032 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4033 e3db7226 bellard
            cross_page++;
4034 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4035 e3db7226 bellard
            direct_jmp_count++;
4036 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4037 e3db7226 bellard
                direct_jmp2_count++;
4038 e3db7226 bellard
            }
4039 e3db7226 bellard
        }
4040 e3db7226 bellard
    }
4041 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4042 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4043 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
4044 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4045 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4046 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4047 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4048 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4049 e3db7226 bellard
                max_target_code_size);
4050 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4051 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4052 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4053 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4054 5fafdf24 ths
            cross_page,
4055 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4056 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4057 5fafdf24 ths
                direct_jmp_count,
4058 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4059 e3db7226 bellard
                direct_jmp2_count,
4060 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4061 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4062 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4063 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4064 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4065 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4066 e3db7226 bellard
}
4067 e3db7226 bellard
4068 61382a50 bellard
#define MMUSUFFIX _cmmu
4069 61382a50 bellard
#define GETPC() NULL
4070 61382a50 bellard
#define env cpu_single_env
4071 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4072 61382a50 bellard
4073 61382a50 bellard
#define SHIFT 0
4074 61382a50 bellard
#include "softmmu_template.h"
4075 61382a50 bellard
4076 61382a50 bellard
#define SHIFT 1
4077 61382a50 bellard
#include "softmmu_template.h"
4078 61382a50 bellard
4079 61382a50 bellard
#define SHIFT 2
4080 61382a50 bellard
#include "softmmu_template.h"
4081 61382a50 bellard
4082 61382a50 bellard
#define SHIFT 3
4083 61382a50 bellard
#include "softmmu_template.h"
4084 61382a50 bellard
4085 61382a50 bellard
#undef env
4086 61382a50 bellard
4087 61382a50 bellard
#endif