Statistics
| Branch: | Revision:

root / exec.c @ eabba580

History | View | Annotate | Download (126 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
37 53a5960a pbrook
#include <qemu.h>
38 fd052bf6 Riku Voipio
#include <signal.h>
39 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 f01576f1 Juergen Lock
#include <sys/param.h>
41 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
42 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
43 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44 f01576f1 Juergen Lock
#include <sys/time.h>
45 f01576f1 Juergen Lock
#include <sys/proc.h>
46 f01576f1 Juergen Lock
#include <machine/profile.h>
47 f01576f1 Juergen Lock
#define _KERNEL
48 f01576f1 Juergen Lock
#include <sys/user.h>
49 f01576f1 Juergen Lock
#undef _KERNEL
50 f01576f1 Juergen Lock
#undef sigqueue
51 f01576f1 Juergen Lock
#include <libutil.h>
52 f01576f1 Juergen Lock
#endif
53 f01576f1 Juergen Lock
#endif
54 53a5960a pbrook
#endif
55 54936004 bellard
56 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
57 66e85a21 bellard
//#define DEBUG_FLUSH
58 9fa3e853 bellard
//#define DEBUG_TLB
59 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
60 fd6ce8f6 bellard
61 fd6ce8f6 bellard
/* make various TB consistency checks */
62 5fafdf24 ths
//#define DEBUG_TB_CHECK
63 5fafdf24 ths
//#define DEBUG_TLB_CHECK
64 fd6ce8f6 bellard
65 1196be37 ths
//#define DEBUG_IOPORT
66 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
67 1196be37 ths
68 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
69 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
70 99773bd4 pbrook
#undef DEBUG_TB_CHECK
71 99773bd4 pbrook
#endif
72 99773bd4 pbrook
73 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
74 9fa3e853 bellard
75 bdaf78e0 blueswir1
static TranslationBlock *tbs;
76 24ab68ac Stefan Weil
static int code_gen_max_blocks;
77 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 bdaf78e0 blueswir1
static int nb_tbs;
79 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
80 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81 fd6ce8f6 bellard
82 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
83 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
85 d03d860b blueswir1
 section close to code segment. */
86 d03d860b blueswir1
#define code_gen_section                                \
87 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
88 d03d860b blueswir1
    __attribute__((aligned (32)))
89 f8e2af11 Stefan Weil
#elif defined(_WIN32)
90 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
91 f8e2af11 Stefan Weil
#define code_gen_section                                \
92 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
93 d03d860b blueswir1
#else
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 d03d860b blueswir1
#endif
97 d03d860b blueswir1
98 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
99 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
100 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
101 26a5f13b bellard
/* threshold to flush the translated code buffer */
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
103 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
104 fd6ce8f6 bellard
105 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
106 9fa3e853 bellard
int phys_ram_fd;
107 74576198 aliguori
static int in_migration;
108 94a6b54f pbrook
109 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110 e2eef170 pbrook
#endif
111 9fa3e853 bellard
112 6a00d601 bellard
CPUState *first_cpu;
113 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
114 6a00d601 bellard
   cpu_exec() */
115 5fafdf24 ths
CPUState *cpu_single_env;
116 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
117 bf20dc07 ths
   1 = Precise instruction counting.
118 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
119 2e70f6ef pbrook
int use_icount = 0;
120 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
121 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
122 2e70f6ef pbrook
int64_t qemu_icount;
123 6a00d601 bellard
124 54936004 bellard
typedef struct PageDesc {
125 92e873b9 bellard
    /* list of TBs intersecting this ram page */
126 fd6ce8f6 bellard
    TranslationBlock *first_tb;
127 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
128 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
129 9fa3e853 bellard
    unsigned int code_write_count;
130 9fa3e853 bellard
    uint8_t *code_bitmap;
131 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
132 9fa3e853 bellard
    unsigned long flags;
133 9fa3e853 bellard
#endif
134 54936004 bellard
} PageDesc;
135 54936004 bellard
136 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
137 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
138 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
139 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141 41c1b1c9 Paul Brook
#else
142 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143 41c1b1c9 Paul Brook
#endif
144 bedb69ea j_mayer
#else
145 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146 bedb69ea j_mayer
#endif
147 54936004 bellard
148 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
149 5cd2c5b6 Richard Henderson
#define L2_BITS 10
150 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
151 54936004 bellard
152 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
153 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
154 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
158 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
159 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161 5cd2c5b6 Richard Henderson
#else
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
163 5cd2c5b6 Richard Henderson
#endif
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167 5cd2c5b6 Richard Henderson
#else
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
169 5cd2c5b6 Richard Henderson
#endif
170 5cd2c5b6 Richard Henderson
171 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173 5cd2c5b6 Richard Henderson
174 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 83fb7adf bellard
unsigned long qemu_real_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_bits;
179 83fb7adf bellard
unsigned long qemu_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_mask;
181 54936004 bellard
182 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
183 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
184 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
185 54936004 bellard
186 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
187 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
188 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
189 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
190 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
191 41c1b1c9 Paul Brook
} PhysPageDesc;
192 41c1b1c9 Paul Brook
193 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
194 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
195 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
196 6d9a1304 Paul Brook
197 e2eef170 pbrook
static void io_mem_init(void);
198 e2eef170 pbrook
199 33417e70 bellard
/* io memory support */
200 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
204 6658ffb8 pbrook
static int io_mem_watch;
205 6658ffb8 pbrook
#endif
206 33417e70 bellard
207 34865134 bellard
/* log support */
208 1e8b27ca Juha Riihimรคki
#ifdef WIN32
209 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
210 1e8b27ca Juha Riihimรคki
#else
211 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
212 1e8b27ca Juha Riihimรคki
#endif
213 34865134 bellard
FILE *logfile;
214 34865134 bellard
int loglevel;
215 e735b91c pbrook
static int log_append = 0;
216 34865134 bellard
217 e3db7226 bellard
/* statistics */
218 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
219 e3db7226 bellard
static int tlb_flush_count;
220 b3755a91 Paul Brook
#endif
221 e3db7226 bellard
static int tb_flush_count;
222 e3db7226 bellard
static int tb_phys_invalidate_count;
223 e3db7226 bellard
224 7cb69cae bellard
#ifdef _WIN32
225 7cb69cae bellard
static void map_exec(void *addr, long size)
226 7cb69cae bellard
{
227 7cb69cae bellard
    DWORD old_protect;
228 7cb69cae bellard
    VirtualProtect(addr, size,
229 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
230 7cb69cae bellard
    
231 7cb69cae bellard
}
232 7cb69cae bellard
#else
233 7cb69cae bellard
static void map_exec(void *addr, long size)
234 7cb69cae bellard
{
235 4369415f bellard
    unsigned long start, end, page_size;
236 7cb69cae bellard
    
237 4369415f bellard
    page_size = getpagesize();
238 7cb69cae bellard
    start = (unsigned long)addr;
239 4369415f bellard
    start &= ~(page_size - 1);
240 7cb69cae bellard
    
241 7cb69cae bellard
    end = (unsigned long)addr + size;
242 4369415f bellard
    end += page_size - 1;
243 4369415f bellard
    end &= ~(page_size - 1);
244 7cb69cae bellard
    
245 7cb69cae bellard
    mprotect((void *)start, end - start,
246 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
247 7cb69cae bellard
}
248 7cb69cae bellard
#endif
249 7cb69cae bellard
250 b346ff46 bellard
static void page_init(void)
251 54936004 bellard
{
252 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
253 54936004 bellard
       TARGET_PAGE_SIZE */
254 c2b48b69 aliguori
#ifdef _WIN32
255 c2b48b69 aliguori
    {
256 c2b48b69 aliguori
        SYSTEM_INFO system_info;
257 c2b48b69 aliguori
258 c2b48b69 aliguori
        GetSystemInfo(&system_info);
259 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
260 c2b48b69 aliguori
    }
261 c2b48b69 aliguori
#else
262 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
263 c2b48b69 aliguori
#endif
264 83fb7adf bellard
    if (qemu_host_page_size == 0)
265 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
266 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
268 83fb7adf bellard
    qemu_host_page_bits = 0;
269 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 83fb7adf bellard
        qemu_host_page_bits++;
271 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272 50a9569b balrog
273 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274 50a9569b balrog
    {
275 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
276 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
277 f01576f1 Juergen Lock
        int i, cnt;
278 f01576f1 Juergen Lock
279 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
280 f01576f1 Juergen Lock
        if (freep) {
281 f01576f1 Juergen Lock
            mmap_lock();
282 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
283 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
286 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
287 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
288 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289 f01576f1 Juergen Lock
290 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
291 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
292 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 f01576f1 Juergen Lock
                    } else {
294 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 f01576f1 Juergen Lock
                        endaddr = ~0ul;
296 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297 f01576f1 Juergen Lock
#endif
298 f01576f1 Juergen Lock
                    }
299 f01576f1 Juergen Lock
                }
300 f01576f1 Juergen Lock
            }
301 f01576f1 Juergen Lock
            free(freep);
302 f01576f1 Juergen Lock
            mmap_unlock();
303 f01576f1 Juergen Lock
        }
304 f01576f1 Juergen Lock
#else
305 50a9569b balrog
        FILE *f;
306 50a9569b balrog
307 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
308 5cd2c5b6 Richard Henderson
309 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
310 50a9569b balrog
        if (f) {
311 5cd2c5b6 Richard Henderson
            mmap_lock();
312 5cd2c5b6 Richard Henderson
313 50a9569b balrog
            do {
314 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
315 5cd2c5b6 Richard Henderson
                int n;
316 5cd2c5b6 Richard Henderson
317 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
320 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321 5cd2c5b6 Richard Henderson
322 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
323 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
324 5cd2c5b6 Richard Henderson
                    } else {
325 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
326 5cd2c5b6 Richard Henderson
                    }
327 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328 50a9569b balrog
                }
329 50a9569b balrog
            } while (!feof(f));
330 5cd2c5b6 Richard Henderson
331 50a9569b balrog
            fclose(f);
332 5cd2c5b6 Richard Henderson
            mmap_unlock();
333 50a9569b balrog
        }
334 f01576f1 Juergen Lock
#endif
335 50a9569b balrog
    }
336 50a9569b balrog
#endif
337 54936004 bellard
}
338 54936004 bellard
339 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340 54936004 bellard
{
341 41c1b1c9 Paul Brook
    PageDesc *pd;
342 41c1b1c9 Paul Brook
    void **lp;
343 41c1b1c9 Paul Brook
    int i;
344 41c1b1c9 Paul Brook
345 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
346 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
348 5cd2c5b6 Richard Henderson
    do {                                                \
349 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351 5cd2c5b6 Richard Henderson
    } while (0)
352 5cd2c5b6 Richard Henderson
#else
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
354 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
355 17e2377a pbrook
#endif
356 434929bf aliguori
357 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
358 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359 5cd2c5b6 Richard Henderson
360 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
361 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 5cd2c5b6 Richard Henderson
        void **p = *lp;
363 5cd2c5b6 Richard Henderson
364 5cd2c5b6 Richard Henderson
        if (p == NULL) {
365 5cd2c5b6 Richard Henderson
            if (!alloc) {
366 5cd2c5b6 Richard Henderson
                return NULL;
367 5cd2c5b6 Richard Henderson
            }
368 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
369 5cd2c5b6 Richard Henderson
            *lp = p;
370 17e2377a pbrook
        }
371 5cd2c5b6 Richard Henderson
372 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 5cd2c5b6 Richard Henderson
    }
374 5cd2c5b6 Richard Henderson
375 5cd2c5b6 Richard Henderson
    pd = *lp;
376 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
377 5cd2c5b6 Richard Henderson
        if (!alloc) {
378 5cd2c5b6 Richard Henderson
            return NULL;
379 5cd2c5b6 Richard Henderson
        }
380 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 5cd2c5b6 Richard Henderson
        *lp = pd;
382 54936004 bellard
    }
383 5cd2c5b6 Richard Henderson
384 5cd2c5b6 Richard Henderson
#undef ALLOC
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
387 54936004 bellard
}
388 54936004 bellard
389 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
390 54936004 bellard
{
391 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
392 fd6ce8f6 bellard
}
393 fd6ce8f6 bellard
394 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
395 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396 92e873b9 bellard
{
397 e3f4e2a4 pbrook
    PhysPageDesc *pd;
398 5cd2c5b6 Richard Henderson
    void **lp;
399 5cd2c5b6 Richard Henderson
    int i;
400 92e873b9 bellard
401 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
402 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403 108c49b8 bellard
404 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
405 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 5cd2c5b6 Richard Henderson
        void **p = *lp;
407 5cd2c5b6 Richard Henderson
        if (p == NULL) {
408 5cd2c5b6 Richard Henderson
            if (!alloc) {
409 5cd2c5b6 Richard Henderson
                return NULL;
410 5cd2c5b6 Richard Henderson
            }
411 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 5cd2c5b6 Richard Henderson
        }
413 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414 108c49b8 bellard
    }
415 5cd2c5b6 Richard Henderson
416 e3f4e2a4 pbrook
    pd = *lp;
417 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
418 e3f4e2a4 pbrook
        int i;
419 5cd2c5b6 Richard Henderson
420 5cd2c5b6 Richard Henderson
        if (!alloc) {
421 108c49b8 bellard
            return NULL;
422 5cd2c5b6 Richard Henderson
        }
423 5cd2c5b6 Richard Henderson
424 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425 5cd2c5b6 Richard Henderson
426 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
427 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429 67c4d23c pbrook
        }
430 92e873b9 bellard
    }
431 5cd2c5b6 Richard Henderson
432 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
433 92e873b9 bellard
}
434 92e873b9 bellard
435 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436 92e873b9 bellard
{
437 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
438 92e873b9 bellard
}
439 92e873b9 bellard
440 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
441 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442 3a7d929e bellard
                                    target_ulong vaddr);
443 c8a706fe pbrook
#define mmap_lock() do { } while(0)
444 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
445 9fa3e853 bellard
#endif
446 fd6ce8f6 bellard
447 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448 4369415f bellard
449 4369415f bellard
#if defined(CONFIG_USER_ONLY)
450 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
451 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
452 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
453 4369415f bellard
#endif
454 4369415f bellard
455 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
456 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
458 4369415f bellard
#endif
459 4369415f bellard
460 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
461 26a5f13b bellard
{
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
464 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
466 4369415f bellard
#else
467 26a5f13b bellard
    code_gen_buffer_size = tb_size;
468 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
469 4369415f bellard
#if defined(CONFIG_USER_ONLY)
470 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
471 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
#else
473 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
474 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475 4369415f bellard
#endif
476 26a5f13b bellard
    }
477 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
480 26a5f13b bellard
       the host cpu and OS */
481 26a5f13b bellard
#if defined(__linux__) 
482 26a5f13b bellard
    {
483 26a5f13b bellard
        int flags;
484 141ac468 blueswir1
        void *start = NULL;
485 141ac468 blueswir1
486 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487 26a5f13b bellard
#if defined(__x86_64__)
488 26a5f13b bellard
        flags |= MAP_32BIT;
489 26a5f13b bellard
        /* Cannot map more than that */
490 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
492 141ac468 blueswir1
#elif defined(__sparc_v9__)
493 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
494 141ac468 blueswir1
        flags |= MAP_FIXED;
495 141ac468 blueswir1
        start = (void *) 0x60000000UL;
496 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
498 1cb0661e balrog
#elif defined(__arm__)
499 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
500 1cb0661e balrog
        flags |= MAP_FIXED;
501 1cb0661e balrog
        start = (void *) 0x01000000UL;
502 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
504 eba0b893 Richard Henderson
#elif defined(__s390x__)
505 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
506 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
507 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 eba0b893 Richard Henderson
        }
510 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
511 26a5f13b bellard
#endif
512 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514 26a5f13b bellard
                               flags, -1, 0);
515 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
516 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 26a5f13b bellard
            exit(1);
518 26a5f13b bellard
        }
519 26a5f13b bellard
    }
520 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
521 06e67a82 aliguori
    {
522 06e67a82 aliguori
        int flags;
523 06e67a82 aliguori
        void *addr = NULL;
524 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
525 06e67a82 aliguori
#if defined(__x86_64__)
526 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
527 06e67a82 aliguori
         * 0x40000000 is free */
528 06e67a82 aliguori
        flags |= MAP_FIXED;
529 06e67a82 aliguori
        addr = (void *)0x40000000;
530 06e67a82 aliguori
        /* Cannot map more than that */
531 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
532 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
533 06e67a82 aliguori
#endif
534 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
535 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
536 06e67a82 aliguori
                               flags, -1, 0);
537 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
538 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
539 06e67a82 aliguori
            exit(1);
540 06e67a82 aliguori
        }
541 06e67a82 aliguori
    }
542 26a5f13b bellard
#else
543 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
544 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
545 26a5f13b bellard
#endif
546 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
547 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
548 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
549 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
550 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
551 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
552 26a5f13b bellard
}
553 26a5f13b bellard
554 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
555 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
556 26a5f13b bellard
   size. */
557 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
558 26a5f13b bellard
{
559 26a5f13b bellard
    cpu_gen_init();
560 26a5f13b bellard
    code_gen_alloc(tb_size);
561 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
562 4369415f bellard
    page_init();
563 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
564 26a5f13b bellard
    io_mem_init();
565 e2eef170 pbrook
#endif
566 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
567 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
568 9002ec79 Richard Henderson
       initialize the prologue now.  */
569 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
570 9002ec79 Richard Henderson
#endif
571 26a5f13b bellard
}
572 26a5f13b bellard
573 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
574 9656f324 pbrook
575 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
576 e7f4eff7 Juan Quintela
{
577 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
578 9656f324 pbrook
579 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
580 3098dba0 aurel32
       version_id is increased. */
581 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
582 9656f324 pbrook
    tlb_flush(env, 1);
583 9656f324 pbrook
584 9656f324 pbrook
    return 0;
585 9656f324 pbrook
}
586 e7f4eff7 Juan Quintela
587 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
588 e7f4eff7 Juan Quintela
    .name = "cpu_common",
589 e7f4eff7 Juan Quintela
    .version_id = 1,
590 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
591 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
592 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
593 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
594 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
595 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
596 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
597 e7f4eff7 Juan Quintela
    }
598 e7f4eff7 Juan Quintela
};
599 9656f324 pbrook
#endif
600 9656f324 pbrook
601 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
602 950f1472 Glauber Costa
{
603 950f1472 Glauber Costa
    CPUState *env = first_cpu;
604 950f1472 Glauber Costa
605 950f1472 Glauber Costa
    while (env) {
606 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
607 950f1472 Glauber Costa
            break;
608 950f1472 Glauber Costa
        env = env->next_cpu;
609 950f1472 Glauber Costa
    }
610 950f1472 Glauber Costa
611 950f1472 Glauber Costa
    return env;
612 950f1472 Glauber Costa
}
613 950f1472 Glauber Costa
614 6a00d601 bellard
void cpu_exec_init(CPUState *env)
615 fd6ce8f6 bellard
{
616 6a00d601 bellard
    CPUState **penv;
617 6a00d601 bellard
    int cpu_index;
618 6a00d601 bellard
619 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
620 c2764719 pbrook
    cpu_list_lock();
621 c2764719 pbrook
#endif
622 6a00d601 bellard
    env->next_cpu = NULL;
623 6a00d601 bellard
    penv = &first_cpu;
624 6a00d601 bellard
    cpu_index = 0;
625 6a00d601 bellard
    while (*penv != NULL) {
626 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
627 6a00d601 bellard
        cpu_index++;
628 6a00d601 bellard
    }
629 6a00d601 bellard
    env->cpu_index = cpu_index;
630 268a362c aliguori
    env->numa_node = 0;
631 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
632 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
633 6a00d601 bellard
    *penv = env;
634 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
635 c2764719 pbrook
    cpu_list_unlock();
636 c2764719 pbrook
#endif
637 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
638 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
639 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
640 b3c7724c pbrook
                    cpu_save, cpu_load, env);
641 b3c7724c pbrook
#endif
642 fd6ce8f6 bellard
}
643 fd6ce8f6 bellard
644 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
645 9fa3e853 bellard
{
646 9fa3e853 bellard
    if (p->code_bitmap) {
647 59817ccb bellard
        qemu_free(p->code_bitmap);
648 9fa3e853 bellard
        p->code_bitmap = NULL;
649 9fa3e853 bellard
    }
650 9fa3e853 bellard
    p->code_write_count = 0;
651 9fa3e853 bellard
}
652 9fa3e853 bellard
653 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
654 5cd2c5b6 Richard Henderson
655 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
656 fd6ce8f6 bellard
{
657 5cd2c5b6 Richard Henderson
    int i;
658 fd6ce8f6 bellard
659 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
660 5cd2c5b6 Richard Henderson
        return;
661 5cd2c5b6 Richard Henderson
    }
662 5cd2c5b6 Richard Henderson
    if (level == 0) {
663 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
664 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
665 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
666 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
667 fd6ce8f6 bellard
        }
668 5cd2c5b6 Richard Henderson
    } else {
669 5cd2c5b6 Richard Henderson
        void **pp = *lp;
670 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
671 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
672 5cd2c5b6 Richard Henderson
        }
673 5cd2c5b6 Richard Henderson
    }
674 5cd2c5b6 Richard Henderson
}
675 5cd2c5b6 Richard Henderson
676 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
677 5cd2c5b6 Richard Henderson
{
678 5cd2c5b6 Richard Henderson
    int i;
679 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
680 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
681 fd6ce8f6 bellard
    }
682 fd6ce8f6 bellard
}
683 fd6ce8f6 bellard
684 fd6ce8f6 bellard
/* flush all the translation blocks */
685 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
686 6a00d601 bellard
void tb_flush(CPUState *env1)
687 fd6ce8f6 bellard
{
688 6a00d601 bellard
    CPUState *env;
689 0124311e bellard
#if defined(DEBUG_FLUSH)
690 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
691 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
692 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
693 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
694 fd6ce8f6 bellard
#endif
695 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
696 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
697 a208e54a pbrook
698 fd6ce8f6 bellard
    nb_tbs = 0;
699 3b46e624 ths
700 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
701 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
702 6a00d601 bellard
    }
703 9fa3e853 bellard
704 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
705 fd6ce8f6 bellard
    page_flush_tb();
706 9fa3e853 bellard
707 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
708 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
709 d4e8164f bellard
       expensive */
710 e3db7226 bellard
    tb_flush_count++;
711 fd6ce8f6 bellard
}
712 fd6ce8f6 bellard
713 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
714 fd6ce8f6 bellard
715 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
716 fd6ce8f6 bellard
{
717 fd6ce8f6 bellard
    TranslationBlock *tb;
718 fd6ce8f6 bellard
    int i;
719 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
720 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
721 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
722 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
723 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
724 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
725 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
726 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
727 fd6ce8f6 bellard
            }
728 fd6ce8f6 bellard
        }
729 fd6ce8f6 bellard
    }
730 fd6ce8f6 bellard
}
731 fd6ce8f6 bellard
732 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
733 fd6ce8f6 bellard
static void tb_page_check(void)
734 fd6ce8f6 bellard
{
735 fd6ce8f6 bellard
    TranslationBlock *tb;
736 fd6ce8f6 bellard
    int i, flags1, flags2;
737 3b46e624 ths
738 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
739 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
740 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
741 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
742 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
743 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
744 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
745 fd6ce8f6 bellard
            }
746 fd6ce8f6 bellard
        }
747 fd6ce8f6 bellard
    }
748 fd6ce8f6 bellard
}
749 fd6ce8f6 bellard
750 fd6ce8f6 bellard
#endif
751 fd6ce8f6 bellard
752 fd6ce8f6 bellard
/* invalidate one TB */
753 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
754 fd6ce8f6 bellard
                             int next_offset)
755 fd6ce8f6 bellard
{
756 fd6ce8f6 bellard
    TranslationBlock *tb1;
757 fd6ce8f6 bellard
    for(;;) {
758 fd6ce8f6 bellard
        tb1 = *ptb;
759 fd6ce8f6 bellard
        if (tb1 == tb) {
760 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
761 fd6ce8f6 bellard
            break;
762 fd6ce8f6 bellard
        }
763 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
764 fd6ce8f6 bellard
    }
765 fd6ce8f6 bellard
}
766 fd6ce8f6 bellard
767 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
768 9fa3e853 bellard
{
769 9fa3e853 bellard
    TranslationBlock *tb1;
770 9fa3e853 bellard
    unsigned int n1;
771 9fa3e853 bellard
772 9fa3e853 bellard
    for(;;) {
773 9fa3e853 bellard
        tb1 = *ptb;
774 9fa3e853 bellard
        n1 = (long)tb1 & 3;
775 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
776 9fa3e853 bellard
        if (tb1 == tb) {
777 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
778 9fa3e853 bellard
            break;
779 9fa3e853 bellard
        }
780 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
781 9fa3e853 bellard
    }
782 9fa3e853 bellard
}
783 9fa3e853 bellard
784 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
785 d4e8164f bellard
{
786 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
787 d4e8164f bellard
    unsigned int n1;
788 d4e8164f bellard
789 d4e8164f bellard
    ptb = &tb->jmp_next[n];
790 d4e8164f bellard
    tb1 = *ptb;
791 d4e8164f bellard
    if (tb1) {
792 d4e8164f bellard
        /* find tb(n) in circular list */
793 d4e8164f bellard
        for(;;) {
794 d4e8164f bellard
            tb1 = *ptb;
795 d4e8164f bellard
            n1 = (long)tb1 & 3;
796 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
797 d4e8164f bellard
            if (n1 == n && tb1 == tb)
798 d4e8164f bellard
                break;
799 d4e8164f bellard
            if (n1 == 2) {
800 d4e8164f bellard
                ptb = &tb1->jmp_first;
801 d4e8164f bellard
            } else {
802 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
803 d4e8164f bellard
            }
804 d4e8164f bellard
        }
805 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
806 d4e8164f bellard
        *ptb = tb->jmp_next[n];
807 d4e8164f bellard
808 d4e8164f bellard
        tb->jmp_next[n] = NULL;
809 d4e8164f bellard
    }
810 d4e8164f bellard
}
811 d4e8164f bellard
812 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
813 d4e8164f bellard
   another TB */
814 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
815 d4e8164f bellard
{
816 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
817 d4e8164f bellard
}
818 d4e8164f bellard
819 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
820 fd6ce8f6 bellard
{
821 6a00d601 bellard
    CPUState *env;
822 8a40a180 bellard
    PageDesc *p;
823 d4e8164f bellard
    unsigned int h, n1;
824 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
825 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
826 3b46e624 ths
827 8a40a180 bellard
    /* remove the TB from the hash list */
828 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
829 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
830 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
831 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
832 8a40a180 bellard
833 8a40a180 bellard
    /* remove the TB from the page list */
834 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
835 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
836 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
837 8a40a180 bellard
        invalidate_page_bitmap(p);
838 8a40a180 bellard
    }
839 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
840 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
841 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
842 8a40a180 bellard
        invalidate_page_bitmap(p);
843 8a40a180 bellard
    }
844 8a40a180 bellard
845 36bdbe54 bellard
    tb_invalidated_flag = 1;
846 59817ccb bellard
847 fd6ce8f6 bellard
    /* remove the TB from the hash list */
848 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
849 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
850 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
851 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
852 6a00d601 bellard
    }
853 d4e8164f bellard
854 d4e8164f bellard
    /* suppress this TB from the two jump lists */
855 d4e8164f bellard
    tb_jmp_remove(tb, 0);
856 d4e8164f bellard
    tb_jmp_remove(tb, 1);
857 d4e8164f bellard
858 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
859 d4e8164f bellard
    tb1 = tb->jmp_first;
860 d4e8164f bellard
    for(;;) {
861 d4e8164f bellard
        n1 = (long)tb1 & 3;
862 d4e8164f bellard
        if (n1 == 2)
863 d4e8164f bellard
            break;
864 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
865 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
866 d4e8164f bellard
        tb_reset_jump(tb1, n1);
867 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
868 d4e8164f bellard
        tb1 = tb2;
869 d4e8164f bellard
    }
870 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
871 9fa3e853 bellard
872 e3db7226 bellard
    tb_phys_invalidate_count++;
873 9fa3e853 bellard
}
874 9fa3e853 bellard
875 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
876 9fa3e853 bellard
{
877 9fa3e853 bellard
    int end, mask, end1;
878 9fa3e853 bellard
879 9fa3e853 bellard
    end = start + len;
880 9fa3e853 bellard
    tab += start >> 3;
881 9fa3e853 bellard
    mask = 0xff << (start & 7);
882 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
883 9fa3e853 bellard
        if (start < end) {
884 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
885 9fa3e853 bellard
            *tab |= mask;
886 9fa3e853 bellard
        }
887 9fa3e853 bellard
    } else {
888 9fa3e853 bellard
        *tab++ |= mask;
889 9fa3e853 bellard
        start = (start + 8) & ~7;
890 9fa3e853 bellard
        end1 = end & ~7;
891 9fa3e853 bellard
        while (start < end1) {
892 9fa3e853 bellard
            *tab++ = 0xff;
893 9fa3e853 bellard
            start += 8;
894 9fa3e853 bellard
        }
895 9fa3e853 bellard
        if (start < end) {
896 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
897 9fa3e853 bellard
            *tab |= mask;
898 9fa3e853 bellard
        }
899 9fa3e853 bellard
    }
900 9fa3e853 bellard
}
901 9fa3e853 bellard
902 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
903 9fa3e853 bellard
{
904 9fa3e853 bellard
    int n, tb_start, tb_end;
905 9fa3e853 bellard
    TranslationBlock *tb;
906 3b46e624 ths
907 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
908 9fa3e853 bellard
909 9fa3e853 bellard
    tb = p->first_tb;
910 9fa3e853 bellard
    while (tb != NULL) {
911 9fa3e853 bellard
        n = (long)tb & 3;
912 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
913 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
914 9fa3e853 bellard
        if (n == 0) {
915 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
916 9fa3e853 bellard
               it is not a problem */
917 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
918 9fa3e853 bellard
            tb_end = tb_start + tb->size;
919 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
920 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
921 9fa3e853 bellard
        } else {
922 9fa3e853 bellard
            tb_start = 0;
923 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
924 9fa3e853 bellard
        }
925 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
926 9fa3e853 bellard
        tb = tb->page_next[n];
927 9fa3e853 bellard
    }
928 9fa3e853 bellard
}
929 9fa3e853 bellard
930 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
931 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
932 2e70f6ef pbrook
                              int flags, int cflags)
933 d720b93d bellard
{
934 d720b93d bellard
    TranslationBlock *tb;
935 d720b93d bellard
    uint8_t *tc_ptr;
936 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
937 41c1b1c9 Paul Brook
    target_ulong virt_page2;
938 d720b93d bellard
    int code_gen_size;
939 d720b93d bellard
940 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
941 c27004ec bellard
    tb = tb_alloc(pc);
942 d720b93d bellard
    if (!tb) {
943 d720b93d bellard
        /* flush must be done */
944 d720b93d bellard
        tb_flush(env);
945 d720b93d bellard
        /* cannot fail at this point */
946 c27004ec bellard
        tb = tb_alloc(pc);
947 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
948 2e70f6ef pbrook
        tb_invalidated_flag = 1;
949 d720b93d bellard
    }
950 d720b93d bellard
    tc_ptr = code_gen_ptr;
951 d720b93d bellard
    tb->tc_ptr = tc_ptr;
952 d720b93d bellard
    tb->cs_base = cs_base;
953 d720b93d bellard
    tb->flags = flags;
954 d720b93d bellard
    tb->cflags = cflags;
955 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
956 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
957 3b46e624 ths
958 d720b93d bellard
    /* check next page if needed */
959 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
960 d720b93d bellard
    phys_page2 = -1;
961 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
962 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
963 d720b93d bellard
    }
964 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
965 2e70f6ef pbrook
    return tb;
966 d720b93d bellard
}
967 3b46e624 ths
968 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
969 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
970 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
971 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
972 d720b93d bellard
   TB if code is modified inside this TB. */
973 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
974 d720b93d bellard
                                   int is_cpu_write_access)
975 d720b93d bellard
{
976 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
977 d720b93d bellard
    CPUState *env = cpu_single_env;
978 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
979 6b917547 aliguori
    PageDesc *p;
980 6b917547 aliguori
    int n;
981 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
982 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
983 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
984 6b917547 aliguori
    int current_tb_modified = 0;
985 6b917547 aliguori
    target_ulong current_pc = 0;
986 6b917547 aliguori
    target_ulong current_cs_base = 0;
987 6b917547 aliguori
    int current_flags = 0;
988 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
989 9fa3e853 bellard
990 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
991 5fafdf24 ths
    if (!p)
992 9fa3e853 bellard
        return;
993 5fafdf24 ths
    if (!p->code_bitmap &&
994 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
995 d720b93d bellard
        is_cpu_write_access) {
996 9fa3e853 bellard
        /* build code bitmap */
997 9fa3e853 bellard
        build_page_bitmap(p);
998 9fa3e853 bellard
    }
999 9fa3e853 bellard
1000 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1001 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1002 9fa3e853 bellard
    tb = p->first_tb;
1003 9fa3e853 bellard
    while (tb != NULL) {
1004 9fa3e853 bellard
        n = (long)tb & 3;
1005 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1006 9fa3e853 bellard
        tb_next = tb->page_next[n];
1007 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1008 9fa3e853 bellard
        if (n == 0) {
1009 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1010 9fa3e853 bellard
               it is not a problem */
1011 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1012 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1013 9fa3e853 bellard
        } else {
1014 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1015 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1016 9fa3e853 bellard
        }
1017 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1018 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1019 d720b93d bellard
            if (current_tb_not_found) {
1020 d720b93d bellard
                current_tb_not_found = 0;
1021 d720b93d bellard
                current_tb = NULL;
1022 2e70f6ef pbrook
                if (env->mem_io_pc) {
1023 d720b93d bellard
                    /* now we have a real cpu fault */
1024 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1025 d720b93d bellard
                }
1026 d720b93d bellard
            }
1027 d720b93d bellard
            if (current_tb == tb &&
1028 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1029 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1030 d720b93d bellard
                its execution. We could be more precise by checking
1031 d720b93d bellard
                that the modification is after the current PC, but it
1032 d720b93d bellard
                would require a specialized function to partially
1033 d720b93d bellard
                restore the CPU state */
1034 3b46e624 ths
1035 d720b93d bellard
                current_tb_modified = 1;
1036 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1037 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1038 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1039 6b917547 aliguori
                                     &current_flags);
1040 d720b93d bellard
            }
1041 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1042 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1043 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1044 6f5a9f7e bellard
            saved_tb = NULL;
1045 6f5a9f7e bellard
            if (env) {
1046 6f5a9f7e bellard
                saved_tb = env->current_tb;
1047 6f5a9f7e bellard
                env->current_tb = NULL;
1048 6f5a9f7e bellard
            }
1049 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1050 6f5a9f7e bellard
            if (env) {
1051 6f5a9f7e bellard
                env->current_tb = saved_tb;
1052 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1053 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1054 6f5a9f7e bellard
            }
1055 9fa3e853 bellard
        }
1056 9fa3e853 bellard
        tb = tb_next;
1057 9fa3e853 bellard
    }
1058 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1059 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1060 9fa3e853 bellard
    if (!p->first_tb) {
1061 9fa3e853 bellard
        invalidate_page_bitmap(p);
1062 d720b93d bellard
        if (is_cpu_write_access) {
1063 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1064 d720b93d bellard
        }
1065 d720b93d bellard
    }
1066 d720b93d bellard
#endif
1067 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1068 d720b93d bellard
    if (current_tb_modified) {
1069 d720b93d bellard
        /* we generate a block containing just the instruction
1070 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1071 d720b93d bellard
           itself */
1072 ea1c1802 bellard
        env->current_tb = NULL;
1073 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1074 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1075 9fa3e853 bellard
    }
1076 fd6ce8f6 bellard
#endif
1077 9fa3e853 bellard
}
1078 fd6ce8f6 bellard
1079 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1080 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1081 9fa3e853 bellard
{
1082 9fa3e853 bellard
    PageDesc *p;
1083 9fa3e853 bellard
    int offset, b;
1084 59817ccb bellard
#if 0
1085 a4193c8a bellard
    if (1) {
1086 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1087 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1088 93fcfe39 aliguori
                  cpu_single_env->eip,
1089 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1090 59817ccb bellard
    }
1091 59817ccb bellard
#endif
1092 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1093 5fafdf24 ths
    if (!p)
1094 9fa3e853 bellard
        return;
1095 9fa3e853 bellard
    if (p->code_bitmap) {
1096 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1097 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1098 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1099 9fa3e853 bellard
            goto do_invalidate;
1100 9fa3e853 bellard
    } else {
1101 9fa3e853 bellard
    do_invalidate:
1102 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1103 9fa3e853 bellard
    }
1104 9fa3e853 bellard
}
1105 9fa3e853 bellard
1106 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1107 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1108 d720b93d bellard
                                    unsigned long pc, void *puc)
1109 9fa3e853 bellard
{
1110 6b917547 aliguori
    TranslationBlock *tb;
1111 9fa3e853 bellard
    PageDesc *p;
1112 6b917547 aliguori
    int n;
1113 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1114 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1115 d720b93d bellard
    CPUState *env = cpu_single_env;
1116 6b917547 aliguori
    int current_tb_modified = 0;
1117 6b917547 aliguori
    target_ulong current_pc = 0;
1118 6b917547 aliguori
    target_ulong current_cs_base = 0;
1119 6b917547 aliguori
    int current_flags = 0;
1120 d720b93d bellard
#endif
1121 9fa3e853 bellard
1122 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1123 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1124 5fafdf24 ths
    if (!p)
1125 9fa3e853 bellard
        return;
1126 9fa3e853 bellard
    tb = p->first_tb;
1127 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1128 d720b93d bellard
    if (tb && pc != 0) {
1129 d720b93d bellard
        current_tb = tb_find_pc(pc);
1130 d720b93d bellard
    }
1131 d720b93d bellard
#endif
1132 9fa3e853 bellard
    while (tb != NULL) {
1133 9fa3e853 bellard
        n = (long)tb & 3;
1134 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1135 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1136 d720b93d bellard
        if (current_tb == tb &&
1137 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1138 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1139 d720b93d bellard
                   its execution. We could be more precise by checking
1140 d720b93d bellard
                   that the modification is after the current PC, but it
1141 d720b93d bellard
                   would require a specialized function to partially
1142 d720b93d bellard
                   restore the CPU state */
1143 3b46e624 ths
1144 d720b93d bellard
            current_tb_modified = 1;
1145 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1146 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1147 6b917547 aliguori
                                 &current_flags);
1148 d720b93d bellard
        }
1149 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1150 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1151 9fa3e853 bellard
        tb = tb->page_next[n];
1152 9fa3e853 bellard
    }
1153 fd6ce8f6 bellard
    p->first_tb = NULL;
1154 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1155 d720b93d bellard
    if (current_tb_modified) {
1156 d720b93d bellard
        /* we generate a block containing just the instruction
1157 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1158 d720b93d bellard
           itself */
1159 ea1c1802 bellard
        env->current_tb = NULL;
1160 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1161 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1162 d720b93d bellard
    }
1163 d720b93d bellard
#endif
1164 fd6ce8f6 bellard
}
1165 9fa3e853 bellard
#endif
1166 fd6ce8f6 bellard
1167 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1168 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1169 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1170 fd6ce8f6 bellard
{
1171 fd6ce8f6 bellard
    PageDesc *p;
1172 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1173 9fa3e853 bellard
1174 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1175 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1176 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1177 9fa3e853 bellard
    last_first_tb = p->first_tb;
1178 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1179 9fa3e853 bellard
    invalidate_page_bitmap(p);
1180 fd6ce8f6 bellard
1181 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1182 d720b93d bellard
1183 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1184 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1185 53a5960a pbrook
        target_ulong addr;
1186 53a5960a pbrook
        PageDesc *p2;
1187 9fa3e853 bellard
        int prot;
1188 9fa3e853 bellard
1189 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1190 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1191 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1192 fd6ce8f6 bellard
        prot = 0;
1193 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1194 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1195 53a5960a pbrook
1196 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1197 53a5960a pbrook
            if (!p2)
1198 53a5960a pbrook
                continue;
1199 53a5960a pbrook
            prot |= p2->flags;
1200 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1201 53a5960a pbrook
          }
1202 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1203 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1204 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1205 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1206 53a5960a pbrook
               page_addr);
1207 fd6ce8f6 bellard
#endif
1208 fd6ce8f6 bellard
    }
1209 9fa3e853 bellard
#else
1210 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1211 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1212 9fa3e853 bellard
       allocated in a physical page */
1213 9fa3e853 bellard
    if (!last_first_tb) {
1214 6a00d601 bellard
        tlb_protect_code(page_addr);
1215 9fa3e853 bellard
    }
1216 9fa3e853 bellard
#endif
1217 d720b93d bellard
1218 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1219 fd6ce8f6 bellard
}
1220 fd6ce8f6 bellard
1221 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1222 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1223 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1224 fd6ce8f6 bellard
{
1225 fd6ce8f6 bellard
    TranslationBlock *tb;
1226 fd6ce8f6 bellard
1227 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1228 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1229 d4e8164f bellard
        return NULL;
1230 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1231 fd6ce8f6 bellard
    tb->pc = pc;
1232 b448f2f3 bellard
    tb->cflags = 0;
1233 d4e8164f bellard
    return tb;
1234 d4e8164f bellard
}
1235 d4e8164f bellard
1236 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1237 2e70f6ef pbrook
{
1238 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1239 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1240 2e70f6ef pbrook
       be the last one generated.  */
1241 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1242 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1243 2e70f6ef pbrook
        nb_tbs--;
1244 2e70f6ef pbrook
    }
1245 2e70f6ef pbrook
}
1246 2e70f6ef pbrook
1247 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1248 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1249 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1250 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1251 d4e8164f bellard
{
1252 9fa3e853 bellard
    unsigned int h;
1253 9fa3e853 bellard
    TranslationBlock **ptb;
1254 9fa3e853 bellard
1255 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1256 c8a706fe pbrook
       before we are done.  */
1257 c8a706fe pbrook
    mmap_lock();
1258 9fa3e853 bellard
    /* add in the physical hash table */
1259 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1260 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1261 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1262 9fa3e853 bellard
    *ptb = tb;
1263 fd6ce8f6 bellard
1264 fd6ce8f6 bellard
    /* add in the page list */
1265 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1266 9fa3e853 bellard
    if (phys_page2 != -1)
1267 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1268 9fa3e853 bellard
    else
1269 9fa3e853 bellard
        tb->page_addr[1] = -1;
1270 9fa3e853 bellard
1271 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1272 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1273 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1274 d4e8164f bellard
1275 d4e8164f bellard
    /* init original jump addresses */
1276 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1277 d4e8164f bellard
        tb_reset_jump(tb, 0);
1278 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1279 d4e8164f bellard
        tb_reset_jump(tb, 1);
1280 8a40a180 bellard
1281 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1282 8a40a180 bellard
    tb_page_check();
1283 8a40a180 bellard
#endif
1284 c8a706fe pbrook
    mmap_unlock();
1285 fd6ce8f6 bellard
}
1286 fd6ce8f6 bellard
1287 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1288 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1289 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1290 fd6ce8f6 bellard
{
1291 9fa3e853 bellard
    int m_min, m_max, m;
1292 9fa3e853 bellard
    unsigned long v;
1293 9fa3e853 bellard
    TranslationBlock *tb;
1294 a513fe19 bellard
1295 a513fe19 bellard
    if (nb_tbs <= 0)
1296 a513fe19 bellard
        return NULL;
1297 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1298 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1299 a513fe19 bellard
        return NULL;
1300 a513fe19 bellard
    /* binary search (cf Knuth) */
1301 a513fe19 bellard
    m_min = 0;
1302 a513fe19 bellard
    m_max = nb_tbs - 1;
1303 a513fe19 bellard
    while (m_min <= m_max) {
1304 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1305 a513fe19 bellard
        tb = &tbs[m];
1306 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1307 a513fe19 bellard
        if (v == tc_ptr)
1308 a513fe19 bellard
            return tb;
1309 a513fe19 bellard
        else if (tc_ptr < v) {
1310 a513fe19 bellard
            m_max = m - 1;
1311 a513fe19 bellard
        } else {
1312 a513fe19 bellard
            m_min = m + 1;
1313 a513fe19 bellard
        }
1314 5fafdf24 ths
    }
1315 a513fe19 bellard
    return &tbs[m_max];
1316 a513fe19 bellard
}
1317 7501267e bellard
1318 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1319 ea041c0e bellard
1320 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1321 ea041c0e bellard
{
1322 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1323 ea041c0e bellard
    unsigned int n1;
1324 ea041c0e bellard
1325 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1326 ea041c0e bellard
    if (tb1 != NULL) {
1327 ea041c0e bellard
        /* find head of list */
1328 ea041c0e bellard
        for(;;) {
1329 ea041c0e bellard
            n1 = (long)tb1 & 3;
1330 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1331 ea041c0e bellard
            if (n1 == 2)
1332 ea041c0e bellard
                break;
1333 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1334 ea041c0e bellard
        }
1335 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1336 ea041c0e bellard
        tb_next = tb1;
1337 ea041c0e bellard
1338 ea041c0e bellard
        /* remove tb from the jmp_first list */
1339 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1340 ea041c0e bellard
        for(;;) {
1341 ea041c0e bellard
            tb1 = *ptb;
1342 ea041c0e bellard
            n1 = (long)tb1 & 3;
1343 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1344 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1345 ea041c0e bellard
                break;
1346 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1347 ea041c0e bellard
        }
1348 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1349 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1350 3b46e624 ths
1351 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1352 ea041c0e bellard
        tb_reset_jump(tb, n);
1353 ea041c0e bellard
1354 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1355 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1356 ea041c0e bellard
    }
1357 ea041c0e bellard
}
1358 ea041c0e bellard
1359 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1360 ea041c0e bellard
{
1361 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1362 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1363 ea041c0e bellard
}
1364 ea041c0e bellard
1365 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1366 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1367 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1368 94df27fd Paul Brook
{
1369 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1370 94df27fd Paul Brook
}
1371 94df27fd Paul Brook
#else
1372 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1373 d720b93d bellard
{
1374 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1375 9b3c35e0 j_mayer
    target_ulong pd;
1376 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1377 c2f07f81 pbrook
    PhysPageDesc *p;
1378 d720b93d bellard
1379 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1380 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1381 c2f07f81 pbrook
    if (!p) {
1382 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1383 c2f07f81 pbrook
    } else {
1384 c2f07f81 pbrook
        pd = p->phys_offset;
1385 c2f07f81 pbrook
    }
1386 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1387 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1388 d720b93d bellard
}
1389 c27004ec bellard
#endif
1390 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1391 d720b93d bellard
1392 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1393 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1394 c527ee8f Paul Brook
1395 c527ee8f Paul Brook
{
1396 c527ee8f Paul Brook
}
1397 c527ee8f Paul Brook
1398 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1399 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1400 c527ee8f Paul Brook
{
1401 c527ee8f Paul Brook
    return -ENOSYS;
1402 c527ee8f Paul Brook
}
1403 c527ee8f Paul Brook
#else
1404 6658ffb8 pbrook
/* Add a watchpoint.  */
1405 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1407 6658ffb8 pbrook
{
1408 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1409 c0ce998e aliguori
    CPUWatchpoint *wp;
1410 6658ffb8 pbrook
1411 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1412 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1413 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1414 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1415 b4051334 aliguori
        return -EINVAL;
1416 b4051334 aliguori
    }
1417 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1418 a1d1bb31 aliguori
1419 a1d1bb31 aliguori
    wp->vaddr = addr;
1420 b4051334 aliguori
    wp->len_mask = len_mask;
1421 a1d1bb31 aliguori
    wp->flags = flags;
1422 a1d1bb31 aliguori
1423 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1424 c0ce998e aliguori
    if (flags & BP_GDB)
1425 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1426 c0ce998e aliguori
    else
1427 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1428 6658ffb8 pbrook
1429 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1430 a1d1bb31 aliguori
1431 a1d1bb31 aliguori
    if (watchpoint)
1432 a1d1bb31 aliguori
        *watchpoint = wp;
1433 a1d1bb31 aliguori
    return 0;
1434 6658ffb8 pbrook
}
1435 6658ffb8 pbrook
1436 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1437 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1438 a1d1bb31 aliguori
                          int flags)
1439 6658ffb8 pbrook
{
1440 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1441 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1442 6658ffb8 pbrook
1443 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1444 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1445 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1446 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1447 6658ffb8 pbrook
            return 0;
1448 6658ffb8 pbrook
        }
1449 6658ffb8 pbrook
    }
1450 a1d1bb31 aliguori
    return -ENOENT;
1451 6658ffb8 pbrook
}
1452 6658ffb8 pbrook
1453 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1454 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1455 a1d1bb31 aliguori
{
1456 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1457 7d03f82f edgar_igl
1458 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1459 a1d1bb31 aliguori
1460 a1d1bb31 aliguori
    qemu_free(watchpoint);
1461 a1d1bb31 aliguori
}
1462 a1d1bb31 aliguori
1463 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1464 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1465 a1d1bb31 aliguori
{
1466 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1467 a1d1bb31 aliguori
1468 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1469 a1d1bb31 aliguori
        if (wp->flags & mask)
1470 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1471 c0ce998e aliguori
    }
1472 7d03f82f edgar_igl
}
1473 c527ee8f Paul Brook
#endif
1474 7d03f82f edgar_igl
1475 a1d1bb31 aliguori
/* Add a breakpoint.  */
1476 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1477 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1478 4c3a88a2 bellard
{
1479 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1480 c0ce998e aliguori
    CPUBreakpoint *bp;
1481 3b46e624 ths
1482 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1483 4c3a88a2 bellard
1484 a1d1bb31 aliguori
    bp->pc = pc;
1485 a1d1bb31 aliguori
    bp->flags = flags;
1486 a1d1bb31 aliguori
1487 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1488 c0ce998e aliguori
    if (flags & BP_GDB)
1489 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1490 c0ce998e aliguori
    else
1491 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1492 3b46e624 ths
1493 d720b93d bellard
    breakpoint_invalidate(env, pc);
1494 a1d1bb31 aliguori
1495 a1d1bb31 aliguori
    if (breakpoint)
1496 a1d1bb31 aliguori
        *breakpoint = bp;
1497 4c3a88a2 bellard
    return 0;
1498 4c3a88a2 bellard
#else
1499 a1d1bb31 aliguori
    return -ENOSYS;
1500 4c3a88a2 bellard
#endif
1501 4c3a88a2 bellard
}
1502 4c3a88a2 bellard
1503 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1504 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1505 a1d1bb31 aliguori
{
1506 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1507 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1508 a1d1bb31 aliguori
1509 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1510 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1511 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1512 a1d1bb31 aliguori
            return 0;
1513 a1d1bb31 aliguori
        }
1514 7d03f82f edgar_igl
    }
1515 a1d1bb31 aliguori
    return -ENOENT;
1516 a1d1bb31 aliguori
#else
1517 a1d1bb31 aliguori
    return -ENOSYS;
1518 7d03f82f edgar_igl
#endif
1519 7d03f82f edgar_igl
}
1520 7d03f82f edgar_igl
1521 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1522 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1523 4c3a88a2 bellard
{
1524 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1525 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1526 d720b93d bellard
1527 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1528 a1d1bb31 aliguori
1529 a1d1bb31 aliguori
    qemu_free(breakpoint);
1530 a1d1bb31 aliguori
#endif
1531 a1d1bb31 aliguori
}
1532 a1d1bb31 aliguori
1533 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1534 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1535 a1d1bb31 aliguori
{
1536 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1537 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1538 a1d1bb31 aliguori
1539 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1540 a1d1bb31 aliguori
        if (bp->flags & mask)
1541 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1542 c0ce998e aliguori
    }
1543 4c3a88a2 bellard
#endif
1544 4c3a88a2 bellard
}
1545 4c3a88a2 bellard
1546 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1547 c33a346e bellard
   CPU loop after each instruction */
1548 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1549 c33a346e bellard
{
1550 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1551 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1552 c33a346e bellard
        env->singlestep_enabled = enabled;
1553 e22a25c9 aliguori
        if (kvm_enabled())
1554 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1555 e22a25c9 aliguori
        else {
1556 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1557 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1558 e22a25c9 aliguori
            tb_flush(env);
1559 e22a25c9 aliguori
        }
1560 c33a346e bellard
    }
1561 c33a346e bellard
#endif
1562 c33a346e bellard
}
1563 c33a346e bellard
1564 34865134 bellard
/* enable or disable low levels log */
1565 34865134 bellard
void cpu_set_log(int log_flags)
1566 34865134 bellard
{
1567 34865134 bellard
    loglevel = log_flags;
1568 34865134 bellard
    if (loglevel && !logfile) {
1569 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1570 34865134 bellard
        if (!logfile) {
1571 34865134 bellard
            perror(logfilename);
1572 34865134 bellard
            _exit(1);
1573 34865134 bellard
        }
1574 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1575 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1576 9fa3e853 bellard
        {
1577 b55266b5 blueswir1
            static char logfile_buf[4096];
1578 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1579 9fa3e853 bellard
        }
1580 bf65f53f Filip Navara
#elif !defined(_WIN32)
1581 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1582 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1583 9fa3e853 bellard
#endif
1584 e735b91c pbrook
        log_append = 1;
1585 e735b91c pbrook
    }
1586 e735b91c pbrook
    if (!loglevel && logfile) {
1587 e735b91c pbrook
        fclose(logfile);
1588 e735b91c pbrook
        logfile = NULL;
1589 34865134 bellard
    }
1590 34865134 bellard
}
1591 34865134 bellard
1592 34865134 bellard
void cpu_set_log_filename(const char *filename)
1593 34865134 bellard
{
1594 34865134 bellard
    logfilename = strdup(filename);
1595 e735b91c pbrook
    if (logfile) {
1596 e735b91c pbrook
        fclose(logfile);
1597 e735b91c pbrook
        logfile = NULL;
1598 e735b91c pbrook
    }
1599 e735b91c pbrook
    cpu_set_log(loglevel);
1600 34865134 bellard
}
1601 c33a346e bellard
1602 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1603 ea041c0e bellard
{
1604 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1605 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1606 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1607 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1608 ea041c0e bellard
    TranslationBlock *tb;
1609 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1610 59817ccb bellard
1611 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1612 3098dba0 aurel32
    tb = env->current_tb;
1613 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1614 3098dba0 aurel32
       all the potentially executing TB */
1615 f76cfe56 Riku Voipio
    if (tb) {
1616 3098dba0 aurel32
        env->current_tb = NULL;
1617 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1618 be214e6c aurel32
    }
1619 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1620 3098dba0 aurel32
}
1621 3098dba0 aurel32
1622 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1623 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1624 3098dba0 aurel32
{
1625 3098dba0 aurel32
    int old_mask;
1626 be214e6c aurel32
1627 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1628 68a79315 bellard
    env->interrupt_request |= mask;
1629 3098dba0 aurel32
1630 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1631 8edac960 aliguori
    /*
1632 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1633 8edac960 aliguori
     * case its halted.
1634 8edac960 aliguori
     */
1635 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1636 8edac960 aliguori
        qemu_cpu_kick(env);
1637 8edac960 aliguori
        return;
1638 8edac960 aliguori
    }
1639 8edac960 aliguori
#endif
1640 8edac960 aliguori
1641 2e70f6ef pbrook
    if (use_icount) {
1642 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1643 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1644 2e70f6ef pbrook
        if (!can_do_io(env)
1645 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1646 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1647 2e70f6ef pbrook
        }
1648 2e70f6ef pbrook
#endif
1649 2e70f6ef pbrook
    } else {
1650 3098dba0 aurel32
        cpu_unlink_tb(env);
1651 ea041c0e bellard
    }
1652 ea041c0e bellard
}
1653 ea041c0e bellard
1654 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1655 b54ad049 bellard
{
1656 b54ad049 bellard
    env->interrupt_request &= ~mask;
1657 b54ad049 bellard
}
1658 b54ad049 bellard
1659 3098dba0 aurel32
void cpu_exit(CPUState *env)
1660 3098dba0 aurel32
{
1661 3098dba0 aurel32
    env->exit_request = 1;
1662 3098dba0 aurel32
    cpu_unlink_tb(env);
1663 3098dba0 aurel32
}
1664 3098dba0 aurel32
1665 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1666 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1667 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1668 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1669 f193c797 bellard
      "show target assembly code for each compiled TB" },
1670 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1671 57fec1fe bellard
      "show micro ops for each compiled TB" },
1672 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1673 e01a1157 blueswir1
      "show micro ops "
1674 e01a1157 blueswir1
#ifdef TARGET_I386
1675 e01a1157 blueswir1
      "before eflags optimization and "
1676 f193c797 bellard
#endif
1677 e01a1157 blueswir1
      "after liveness analysis" },
1678 f193c797 bellard
    { CPU_LOG_INT, "int",
1679 f193c797 bellard
      "show interrupts/exceptions in short format" },
1680 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1681 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1682 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1683 e91c8a77 ths
      "show CPU state before block translation" },
1684 f193c797 bellard
#ifdef TARGET_I386
1685 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1686 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1687 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1688 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1689 f193c797 bellard
#endif
1690 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1691 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1692 fd872598 bellard
      "show all i/o ports accesses" },
1693 8e3a9fd2 bellard
#endif
1694 f193c797 bellard
    { 0, NULL, NULL },
1695 f193c797 bellard
};
1696 f193c797 bellard
1697 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1698 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1699 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1700 f6f3fbca Michael S. Tsirkin
1701 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1702 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1703 9742bf26 Yoshiaki Tamura
                                  ram_addr_t phys_offset)
1704 f6f3fbca Michael S. Tsirkin
{
1705 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1706 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1707 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1708 f6f3fbca Michael S. Tsirkin
    }
1709 f6f3fbca Michael S. Tsirkin
}
1710 f6f3fbca Michael S. Tsirkin
1711 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1712 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1713 f6f3fbca Michael S. Tsirkin
{
1714 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1715 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1716 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1717 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1718 f6f3fbca Michael S. Tsirkin
            return r;
1719 f6f3fbca Michael S. Tsirkin
    }
1720 f6f3fbca Michael S. Tsirkin
    return 0;
1721 f6f3fbca Michael S. Tsirkin
}
1722 f6f3fbca Michael S. Tsirkin
1723 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1724 f6f3fbca Michael S. Tsirkin
{
1725 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1726 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1727 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1728 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1729 f6f3fbca Michael S. Tsirkin
            return r;
1730 f6f3fbca Michael S. Tsirkin
    }
1731 f6f3fbca Michael S. Tsirkin
    return 0;
1732 f6f3fbca Michael S. Tsirkin
}
1733 f6f3fbca Michael S. Tsirkin
1734 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1735 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1736 f6f3fbca Michael S. Tsirkin
{
1737 5cd2c5b6 Richard Henderson
    int i;
1738 f6f3fbca Michael S. Tsirkin
1739 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1740 5cd2c5b6 Richard Henderson
        return;
1741 5cd2c5b6 Richard Henderson
    }
1742 5cd2c5b6 Richard Henderson
    if (level == 0) {
1743 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1744 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1745 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1746 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1747 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1748 f6f3fbca Michael S. Tsirkin
            }
1749 5cd2c5b6 Richard Henderson
        }
1750 5cd2c5b6 Richard Henderson
    } else {
1751 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1752 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1753 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1754 f6f3fbca Michael S. Tsirkin
        }
1755 f6f3fbca Michael S. Tsirkin
    }
1756 f6f3fbca Michael S. Tsirkin
}
1757 f6f3fbca Michael S. Tsirkin
1758 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1759 f6f3fbca Michael S. Tsirkin
{
1760 5cd2c5b6 Richard Henderson
    int i;
1761 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1762 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1763 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1764 f6f3fbca Michael S. Tsirkin
    }
1765 f6f3fbca Michael S. Tsirkin
}
1766 f6f3fbca Michael S. Tsirkin
1767 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1768 f6f3fbca Michael S. Tsirkin
{
1769 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1770 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1771 f6f3fbca Michael S. Tsirkin
}
1772 f6f3fbca Michael S. Tsirkin
1773 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1774 f6f3fbca Michael S. Tsirkin
{
1775 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1776 f6f3fbca Michael S. Tsirkin
}
1777 f6f3fbca Michael S. Tsirkin
#endif
1778 f6f3fbca Michael S. Tsirkin
1779 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1780 f193c797 bellard
{
1781 f193c797 bellard
    if (strlen(s2) != n)
1782 f193c797 bellard
        return 0;
1783 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1784 f193c797 bellard
}
1785 3b46e624 ths
1786 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1787 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1788 f193c797 bellard
{
1789 c7cd6a37 blueswir1
    const CPULogItem *item;
1790 f193c797 bellard
    int mask;
1791 f193c797 bellard
    const char *p, *p1;
1792 f193c797 bellard
1793 f193c797 bellard
    p = str;
1794 f193c797 bellard
    mask = 0;
1795 f193c797 bellard
    for(;;) {
1796 f193c797 bellard
        p1 = strchr(p, ',');
1797 f193c797 bellard
        if (!p1)
1798 f193c797 bellard
            p1 = p + strlen(p);
1799 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1800 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1801 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1802 9742bf26 Yoshiaki Tamura
            }
1803 9742bf26 Yoshiaki Tamura
        } else {
1804 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1805 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1806 9742bf26 Yoshiaki Tamura
                    goto found;
1807 9742bf26 Yoshiaki Tamura
            }
1808 9742bf26 Yoshiaki Tamura
            return 0;
1809 f193c797 bellard
        }
1810 f193c797 bellard
    found:
1811 f193c797 bellard
        mask |= item->mask;
1812 f193c797 bellard
        if (*p1 != ',')
1813 f193c797 bellard
            break;
1814 f193c797 bellard
        p = p1 + 1;
1815 f193c797 bellard
    }
1816 f193c797 bellard
    return mask;
1817 f193c797 bellard
}
1818 ea041c0e bellard
1819 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1820 7501267e bellard
{
1821 7501267e bellard
    va_list ap;
1822 493ae1f0 pbrook
    va_list ap2;
1823 7501267e bellard
1824 7501267e bellard
    va_start(ap, fmt);
1825 493ae1f0 pbrook
    va_copy(ap2, ap);
1826 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1827 7501267e bellard
    vfprintf(stderr, fmt, ap);
1828 7501267e bellard
    fprintf(stderr, "\n");
1829 7501267e bellard
#ifdef TARGET_I386
1830 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1831 7fe48483 bellard
#else
1832 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1833 7501267e bellard
#endif
1834 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1835 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1836 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1837 93fcfe39 aliguori
        qemu_log("\n");
1838 f9373291 j_mayer
#ifdef TARGET_I386
1839 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1840 f9373291 j_mayer
#else
1841 93fcfe39 aliguori
        log_cpu_state(env, 0);
1842 f9373291 j_mayer
#endif
1843 31b1a7b4 aliguori
        qemu_log_flush();
1844 93fcfe39 aliguori
        qemu_log_close();
1845 924edcae balrog
    }
1846 493ae1f0 pbrook
    va_end(ap2);
1847 f9373291 j_mayer
    va_end(ap);
1848 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1849 fd052bf6 Riku Voipio
    {
1850 fd052bf6 Riku Voipio
        struct sigaction act;
1851 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1852 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1853 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1854 fd052bf6 Riku Voipio
    }
1855 fd052bf6 Riku Voipio
#endif
1856 7501267e bellard
    abort();
1857 7501267e bellard
}
1858 7501267e bellard
1859 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1860 c5be9f08 ths
{
1861 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1862 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1863 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1864 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1865 5a38f081 aliguori
    CPUBreakpoint *bp;
1866 5a38f081 aliguori
    CPUWatchpoint *wp;
1867 5a38f081 aliguori
#endif
1868 5a38f081 aliguori
1869 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1870 5a38f081 aliguori
1871 5a38f081 aliguori
    /* Preserve chaining and index. */
1872 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1873 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1874 5a38f081 aliguori
1875 5a38f081 aliguori
    /* Clone all break/watchpoints.
1876 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1877 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1878 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1879 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1880 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1881 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1882 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1883 5a38f081 aliguori
    }
1884 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1885 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1886 5a38f081 aliguori
                              wp->flags, NULL);
1887 5a38f081 aliguori
    }
1888 5a38f081 aliguori
#endif
1889 5a38f081 aliguori
1890 c5be9f08 ths
    return new_env;
1891 c5be9f08 ths
}
1892 c5be9f08 ths
1893 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1894 0124311e bellard
1895 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1896 5c751e99 edgar_igl
{
1897 5c751e99 edgar_igl
    unsigned int i;
1898 5c751e99 edgar_igl
1899 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1900 5c751e99 edgar_igl
       overlap the flushed page.  */
1901 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1902 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1903 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1904 5c751e99 edgar_igl
1905 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1906 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1907 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1908 5c751e99 edgar_igl
}
1909 5c751e99 edgar_igl
1910 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1911 08738984 Igor Kovalenko
    .addr_read  = -1,
1912 08738984 Igor Kovalenko
    .addr_write = -1,
1913 08738984 Igor Kovalenko
    .addr_code  = -1,
1914 08738984 Igor Kovalenko
    .addend     = -1,
1915 08738984 Igor Kovalenko
};
1916 08738984 Igor Kovalenko
1917 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1918 ee8b7021 bellard
   implemented yet) */
1919 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1920 33417e70 bellard
{
1921 33417e70 bellard
    int i;
1922 0124311e bellard
1923 9fa3e853 bellard
#if defined(DEBUG_TLB)
1924 9fa3e853 bellard
    printf("tlb_flush:\n");
1925 9fa3e853 bellard
#endif
1926 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1927 0124311e bellard
       links while we are modifying them */
1928 0124311e bellard
    env->current_tb = NULL;
1929 0124311e bellard
1930 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1931 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1932 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1933 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1934 cfde4bd9 Isaku Yamahata
        }
1935 33417e70 bellard
    }
1936 9fa3e853 bellard
1937 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1938 9fa3e853 bellard
1939 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1940 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1941 e3db7226 bellard
    tlb_flush_count++;
1942 33417e70 bellard
}
1943 33417e70 bellard
1944 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1945 61382a50 bellard
{
1946 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1947 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1948 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1949 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1950 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1951 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1952 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1953 84b7b8e7 bellard
    }
1954 61382a50 bellard
}
1955 61382a50 bellard
1956 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1957 33417e70 bellard
{
1958 8a40a180 bellard
    int i;
1959 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1960 0124311e bellard
1961 9fa3e853 bellard
#if defined(DEBUG_TLB)
1962 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1963 9fa3e853 bellard
#endif
1964 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1965 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1966 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1967 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1968 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1969 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1970 d4c430a8 Paul Brook
#endif
1971 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1972 d4c430a8 Paul Brook
        return;
1973 d4c430a8 Paul Brook
    }
1974 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1975 0124311e bellard
       links while we are modifying them */
1976 0124311e bellard
    env->current_tb = NULL;
1977 61382a50 bellard
1978 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1979 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1980 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1981 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1982 0124311e bellard
1983 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1984 9fa3e853 bellard
}
1985 9fa3e853 bellard
1986 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1987 9fa3e853 bellard
   can be detected */
1988 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1989 9fa3e853 bellard
{
1990 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1991 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1992 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1993 9fa3e853 bellard
}
1994 9fa3e853 bellard
1995 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1996 3a7d929e bellard
   tested for self modifying code */
1997 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1998 3a7d929e bellard
                                    target_ulong vaddr)
1999 9fa3e853 bellard
{
2000 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2001 1ccde1cb bellard
}
2002 1ccde1cb bellard
2003 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2004 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2005 1ccde1cb bellard
{
2006 1ccde1cb bellard
    unsigned long addr;
2007 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2008 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2009 1ccde1cb bellard
        if ((addr - start) < length) {
2010 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2011 1ccde1cb bellard
        }
2012 1ccde1cb bellard
    }
2013 1ccde1cb bellard
}
2014 1ccde1cb bellard
2015 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2016 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2017 0a962c02 bellard
                                     int dirty_flags)
2018 1ccde1cb bellard
{
2019 1ccde1cb bellard
    CPUState *env;
2020 4f2ac237 bellard
    unsigned long length, start1;
2021 f7c11b53 Yoshiaki Tamura
    int i;
2022 1ccde1cb bellard
2023 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2024 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2025 1ccde1cb bellard
2026 1ccde1cb bellard
    length = end - start;
2027 1ccde1cb bellard
    if (length == 0)
2028 1ccde1cb bellard
        return;
2029 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2030 f23db169 bellard
2031 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2032 1ccde1cb bellard
       when accessing the range */
2033 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2034 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2035 5579c7f3 pbrook
       address comparisons below.  */
2036 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2037 5579c7f3 pbrook
            != (end - 1) - start) {
2038 5579c7f3 pbrook
        abort();
2039 5579c7f3 pbrook
    }
2040 5579c7f3 pbrook
2041 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2042 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2043 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2044 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2045 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2046 cfde4bd9 Isaku Yamahata
                                      start1, length);
2047 cfde4bd9 Isaku Yamahata
        }
2048 6a00d601 bellard
    }
2049 1ccde1cb bellard
}
2050 1ccde1cb bellard
2051 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2052 74576198 aliguori
{
2053 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2054 74576198 aliguori
    in_migration = enable;
2055 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2056 f6f3fbca Michael S. Tsirkin
    return ret;
2057 74576198 aliguori
}
2058 74576198 aliguori
2059 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2060 74576198 aliguori
{
2061 74576198 aliguori
    return in_migration;
2062 74576198 aliguori
}
2063 74576198 aliguori
2064 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2065 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2066 2bec46dc aliguori
{
2067 7b8f3b78 Michael S. Tsirkin
    int ret;
2068 151f7749 Jan Kiszka
2069 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2070 151f7749 Jan Kiszka
    return ret;
2071 2bec46dc aliguori
}
2072 2bec46dc aliguori
2073 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2074 3a7d929e bellard
{
2075 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2076 5579c7f3 pbrook
    void *p;
2077 3a7d929e bellard
2078 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2079 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2080 5579c7f3 pbrook
            + tlb_entry->addend);
2081 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2082 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2083 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2084 3a7d929e bellard
        }
2085 3a7d929e bellard
    }
2086 3a7d929e bellard
}
2087 3a7d929e bellard
2088 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2089 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2090 3a7d929e bellard
{
2091 3a7d929e bellard
    int i;
2092 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2093 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2094 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2095 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2096 cfde4bd9 Isaku Yamahata
    }
2097 3a7d929e bellard
}
2098 3a7d929e bellard
2099 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2100 1ccde1cb bellard
{
2101 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2102 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2103 1ccde1cb bellard
}
2104 1ccde1cb bellard
2105 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2106 0f459d16 pbrook
   so that it is no longer dirty */
2107 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2108 1ccde1cb bellard
{
2109 1ccde1cb bellard
    int i;
2110 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2111 1ccde1cb bellard
2112 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2113 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2114 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2115 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2116 9fa3e853 bellard
}
2117 9fa3e853 bellard
2118 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2119 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2120 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2121 d4c430a8 Paul Brook
                               target_ulong size)
2122 d4c430a8 Paul Brook
{
2123 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2124 d4c430a8 Paul Brook
2125 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2126 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2127 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2128 d4c430a8 Paul Brook
        return;
2129 d4c430a8 Paul Brook
    }
2130 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2131 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2132 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2133 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2134 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2135 d4c430a8 Paul Brook
        mask <<= 1;
2136 d4c430a8 Paul Brook
    }
2137 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2138 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2139 d4c430a8 Paul Brook
}
2140 d4c430a8 Paul Brook
2141 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2142 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2143 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2144 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2145 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2146 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2147 9fa3e853 bellard
{
2148 92e873b9 bellard
    PhysPageDesc *p;
2149 4f2ac237 bellard
    unsigned long pd;
2150 9fa3e853 bellard
    unsigned int index;
2151 4f2ac237 bellard
    target_ulong address;
2152 0f459d16 pbrook
    target_ulong code_address;
2153 355b1943 Paul Brook
    unsigned long addend;
2154 84b7b8e7 bellard
    CPUTLBEntry *te;
2155 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2156 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2157 9fa3e853 bellard
2158 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2159 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2160 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2161 d4c430a8 Paul Brook
    }
2162 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2163 9fa3e853 bellard
    if (!p) {
2164 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2165 9fa3e853 bellard
    } else {
2166 9fa3e853 bellard
        pd = p->phys_offset;
2167 9fa3e853 bellard
    }
2168 9fa3e853 bellard
#if defined(DEBUG_TLB)
2169 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2170 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2171 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2172 9fa3e853 bellard
#endif
2173 9fa3e853 bellard
2174 0f459d16 pbrook
    address = vaddr;
2175 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2176 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2177 0f459d16 pbrook
        address |= TLB_MMIO;
2178 0f459d16 pbrook
    }
2179 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2180 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2181 0f459d16 pbrook
        /* Normal RAM.  */
2182 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2183 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2184 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2185 0f459d16 pbrook
        else
2186 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2187 0f459d16 pbrook
    } else {
2188 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2189 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2190 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2191 0f459d16 pbrook
           and avoid full address decoding in every device.
2192 0f459d16 pbrook
           We can't use the high bits of pd for this because
2193 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2194 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2195 8da3ff18 pbrook
        if (p) {
2196 8da3ff18 pbrook
            iotlb += p->region_offset;
2197 8da3ff18 pbrook
        } else {
2198 8da3ff18 pbrook
            iotlb += paddr;
2199 8da3ff18 pbrook
        }
2200 0f459d16 pbrook
    }
2201 0f459d16 pbrook
2202 0f459d16 pbrook
    code_address = address;
2203 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2204 0f459d16 pbrook
       watchpoint trap routines.  */
2205 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2206 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2207 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2208 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2209 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2210 bf298f83 Jun Koi
                address |= TLB_MMIO;
2211 bf298f83 Jun Koi
                break;
2212 bf298f83 Jun Koi
            }
2213 6658ffb8 pbrook
        }
2214 0f459d16 pbrook
    }
2215 d79acba4 balrog
2216 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2217 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2218 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2219 0f459d16 pbrook
    te->addend = addend - vaddr;
2220 0f459d16 pbrook
    if (prot & PAGE_READ) {
2221 0f459d16 pbrook
        te->addr_read = address;
2222 0f459d16 pbrook
    } else {
2223 0f459d16 pbrook
        te->addr_read = -1;
2224 0f459d16 pbrook
    }
2225 5c751e99 edgar_igl
2226 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2227 0f459d16 pbrook
        te->addr_code = code_address;
2228 0f459d16 pbrook
    } else {
2229 0f459d16 pbrook
        te->addr_code = -1;
2230 0f459d16 pbrook
    }
2231 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2232 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2233 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2234 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2235 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2236 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2237 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2238 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2239 9fa3e853 bellard
        } else {
2240 0f459d16 pbrook
            te->addr_write = address;
2241 9fa3e853 bellard
        }
2242 0f459d16 pbrook
    } else {
2243 0f459d16 pbrook
        te->addr_write = -1;
2244 9fa3e853 bellard
    }
2245 9fa3e853 bellard
}
2246 9fa3e853 bellard
2247 0124311e bellard
#else
2248 0124311e bellard
2249 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2250 0124311e bellard
{
2251 0124311e bellard
}
2252 0124311e bellard
2253 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2254 0124311e bellard
{
2255 0124311e bellard
}
2256 0124311e bellard
2257 edf8e2af Mika Westerberg
/*
2258 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2259 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2260 edf8e2af Mika Westerberg
 */
2261 5cd2c5b6 Richard Henderson
2262 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2263 5cd2c5b6 Richard Henderson
{
2264 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2265 5cd2c5b6 Richard Henderson
    void *priv;
2266 5cd2c5b6 Richard Henderson
    unsigned long start;
2267 5cd2c5b6 Richard Henderson
    int prot;
2268 5cd2c5b6 Richard Henderson
};
2269 5cd2c5b6 Richard Henderson
2270 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2271 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2272 5cd2c5b6 Richard Henderson
{
2273 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2274 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2275 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2276 5cd2c5b6 Richard Henderson
            return rc;
2277 5cd2c5b6 Richard Henderson
        }
2278 5cd2c5b6 Richard Henderson
    }
2279 5cd2c5b6 Richard Henderson
2280 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2281 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2282 5cd2c5b6 Richard Henderson
2283 5cd2c5b6 Richard Henderson
    return 0;
2284 5cd2c5b6 Richard Henderson
}
2285 5cd2c5b6 Richard Henderson
2286 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2287 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2288 5cd2c5b6 Richard Henderson
{
2289 b480d9b7 Paul Brook
    abi_ulong pa;
2290 5cd2c5b6 Richard Henderson
    int i, rc;
2291 5cd2c5b6 Richard Henderson
2292 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2293 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2294 5cd2c5b6 Richard Henderson
    }
2295 5cd2c5b6 Richard Henderson
2296 5cd2c5b6 Richard Henderson
    if (level == 0) {
2297 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2298 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2299 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2300 5cd2c5b6 Richard Henderson
2301 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2302 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2303 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2304 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2305 5cd2c5b6 Richard Henderson
                    return rc;
2306 9fa3e853 bellard
                }
2307 9fa3e853 bellard
            }
2308 5cd2c5b6 Richard Henderson
        }
2309 5cd2c5b6 Richard Henderson
    } else {
2310 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2311 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2312 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2313 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2314 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2315 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2316 5cd2c5b6 Richard Henderson
                return rc;
2317 5cd2c5b6 Richard Henderson
            }
2318 5cd2c5b6 Richard Henderson
        }
2319 5cd2c5b6 Richard Henderson
    }
2320 5cd2c5b6 Richard Henderson
2321 5cd2c5b6 Richard Henderson
    return 0;
2322 5cd2c5b6 Richard Henderson
}
2323 5cd2c5b6 Richard Henderson
2324 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2325 5cd2c5b6 Richard Henderson
{
2326 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2327 5cd2c5b6 Richard Henderson
    unsigned long i;
2328 5cd2c5b6 Richard Henderson
2329 5cd2c5b6 Richard Henderson
    data.fn = fn;
2330 5cd2c5b6 Richard Henderson
    data.priv = priv;
2331 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2332 5cd2c5b6 Richard Henderson
    data.prot = 0;
2333 5cd2c5b6 Richard Henderson
2334 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2335 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2336 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2337 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2338 5cd2c5b6 Richard Henderson
            return rc;
2339 9fa3e853 bellard
        }
2340 33417e70 bellard
    }
2341 5cd2c5b6 Richard Henderson
2342 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2343 edf8e2af Mika Westerberg
}
2344 edf8e2af Mika Westerberg
2345 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2346 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2347 edf8e2af Mika Westerberg
{
2348 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2349 edf8e2af Mika Westerberg
2350 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2351 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2352 edf8e2af Mika Westerberg
        start, end, end - start,
2353 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2354 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2355 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2356 edf8e2af Mika Westerberg
2357 edf8e2af Mika Westerberg
    return (0);
2358 edf8e2af Mika Westerberg
}
2359 edf8e2af Mika Westerberg
2360 edf8e2af Mika Westerberg
/* dump memory mappings */
2361 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2362 edf8e2af Mika Westerberg
{
2363 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2364 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2365 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2366 33417e70 bellard
}
2367 33417e70 bellard
2368 53a5960a pbrook
int page_get_flags(target_ulong address)
2369 33417e70 bellard
{
2370 9fa3e853 bellard
    PageDesc *p;
2371 9fa3e853 bellard
2372 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2373 33417e70 bellard
    if (!p)
2374 9fa3e853 bellard
        return 0;
2375 9fa3e853 bellard
    return p->flags;
2376 9fa3e853 bellard
}
2377 9fa3e853 bellard
2378 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2379 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2380 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2381 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2382 9fa3e853 bellard
{
2383 376a7909 Richard Henderson
    target_ulong addr, len;
2384 376a7909 Richard Henderson
2385 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2386 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2387 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2388 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2389 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2390 376a7909 Richard Henderson
#endif
2391 376a7909 Richard Henderson
    assert(start < end);
2392 9fa3e853 bellard
2393 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2394 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2395 376a7909 Richard Henderson
2396 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2397 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2398 376a7909 Richard Henderson
    }
2399 376a7909 Richard Henderson
2400 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2401 376a7909 Richard Henderson
         len != 0;
2402 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2403 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2404 376a7909 Richard Henderson
2405 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2406 376a7909 Richard Henderson
           the code inside.  */
2407 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2408 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2409 9fa3e853 bellard
            p->first_tb) {
2410 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2411 9fa3e853 bellard
        }
2412 9fa3e853 bellard
        p->flags = flags;
2413 9fa3e853 bellard
    }
2414 33417e70 bellard
}
2415 33417e70 bellard
2416 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2417 3d97b40b ths
{
2418 3d97b40b ths
    PageDesc *p;
2419 3d97b40b ths
    target_ulong end;
2420 3d97b40b ths
    target_ulong addr;
2421 3d97b40b ths
2422 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2423 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2424 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2425 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2426 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2427 376a7909 Richard Henderson
#endif
2428 376a7909 Richard Henderson
2429 3e0650a9 Richard Henderson
    if (len == 0) {
2430 3e0650a9 Richard Henderson
        return 0;
2431 3e0650a9 Richard Henderson
    }
2432 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2433 376a7909 Richard Henderson
        /* We've wrapped around.  */
2434 55f280c9 balrog
        return -1;
2435 376a7909 Richard Henderson
    }
2436 55f280c9 balrog
2437 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2438 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2439 3d97b40b ths
2440 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2441 376a7909 Richard Henderson
         len != 0;
2442 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2443 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2444 3d97b40b ths
        if( !p )
2445 3d97b40b ths
            return -1;
2446 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2447 3d97b40b ths
            return -1;
2448 3d97b40b ths
2449 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2450 3d97b40b ths
            return -1;
2451 dae3270c bellard
        if (flags & PAGE_WRITE) {
2452 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2453 dae3270c bellard
                return -1;
2454 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2455 dae3270c bellard
               contains translated code */
2456 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2457 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2458 dae3270c bellard
                    return -1;
2459 dae3270c bellard
            }
2460 dae3270c bellard
            return 0;
2461 dae3270c bellard
        }
2462 3d97b40b ths
    }
2463 3d97b40b ths
    return 0;
2464 3d97b40b ths
}
2465 3d97b40b ths
2466 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2467 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2468 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2469 9fa3e853 bellard
{
2470 45d679d6 Aurelien Jarno
    unsigned int prot;
2471 45d679d6 Aurelien Jarno
    PageDesc *p;
2472 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2473 9fa3e853 bellard
2474 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2475 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2476 c8a706fe pbrook
       practice it seems to be ok.  */
2477 c8a706fe pbrook
    mmap_lock();
2478 c8a706fe pbrook
2479 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2480 45d679d6 Aurelien Jarno
    if (!p) {
2481 c8a706fe pbrook
        mmap_unlock();
2482 9fa3e853 bellard
        return 0;
2483 c8a706fe pbrook
    }
2484 45d679d6 Aurelien Jarno
2485 9fa3e853 bellard
    /* if the page was really writable, then we change its
2486 9fa3e853 bellard
       protection back to writable */
2487 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2488 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2489 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2490 45d679d6 Aurelien Jarno
2491 45d679d6 Aurelien Jarno
        prot = 0;
2492 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2493 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2494 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2495 45d679d6 Aurelien Jarno
            prot |= p->flags;
2496 45d679d6 Aurelien Jarno
2497 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2498 9fa3e853 bellard
               the corresponding translated code. */
2499 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2500 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2501 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2502 9fa3e853 bellard
#endif
2503 9fa3e853 bellard
        }
2504 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2505 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2506 45d679d6 Aurelien Jarno
2507 45d679d6 Aurelien Jarno
        mmap_unlock();
2508 45d679d6 Aurelien Jarno
        return 1;
2509 9fa3e853 bellard
    }
2510 c8a706fe pbrook
    mmap_unlock();
2511 9fa3e853 bellard
    return 0;
2512 9fa3e853 bellard
}
2513 9fa3e853 bellard
2514 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2515 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2516 1ccde1cb bellard
{
2517 1ccde1cb bellard
}
2518 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2519 9fa3e853 bellard
2520 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2521 8da3ff18 pbrook
2522 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2523 c04b2b78 Paul Brook
typedef struct subpage_t {
2524 c04b2b78 Paul Brook
    target_phys_addr_t base;
2525 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2526 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2527 c04b2b78 Paul Brook
} subpage_t;
2528 c04b2b78 Paul Brook
2529 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2530 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2531 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2532 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2533 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2534 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2535 db7b5426 blueswir1
                      need_subpage)                                     \
2536 db7b5426 blueswir1
    do {                                                                \
2537 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2538 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2539 db7b5426 blueswir1
        else {                                                          \
2540 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2541 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2542 db7b5426 blueswir1
                need_subpage = 1;                                       \
2543 db7b5426 blueswir1
        }                                                               \
2544 db7b5426 blueswir1
                                                                        \
2545 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2546 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2547 db7b5426 blueswir1
        else {                                                          \
2548 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2549 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2550 db7b5426 blueswir1
                need_subpage = 1;                                       \
2551 db7b5426 blueswir1
        }                                                               \
2552 db7b5426 blueswir1
    } while (0)
2553 db7b5426 blueswir1
2554 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2555 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2556 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2557 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2558 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2559 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2560 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2561 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2562 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2563 c227f099 Anthony Liguori
                                         ram_addr_t size,
2564 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2565 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2566 33417e70 bellard
{
2567 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2568 92e873b9 bellard
    PhysPageDesc *p;
2569 9d42037b bellard
    CPUState *env;
2570 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2571 f6405247 Richard Henderson
    subpage_t *subpage;
2572 33417e70 bellard
2573 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2574 f6f3fbca Michael S. Tsirkin
2575 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2576 67c4d23c pbrook
        region_offset = start_addr;
2577 67c4d23c pbrook
    }
2578 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2579 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2580 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2581 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2582 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2583 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2584 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2585 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2586 db7b5426 blueswir1
            int need_subpage = 0;
2587 db7b5426 blueswir1
2588 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2589 db7b5426 blueswir1
                          need_subpage);
2590 f6405247 Richard Henderson
            if (need_subpage) {
2591 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2592 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2593 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2594 8da3ff18 pbrook
                                           p->region_offset);
2595 db7b5426 blueswir1
                } else {
2596 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2597 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2598 db7b5426 blueswir1
                }
2599 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2600 8da3ff18 pbrook
                                 region_offset);
2601 8da3ff18 pbrook
                p->region_offset = 0;
2602 db7b5426 blueswir1
            } else {
2603 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2604 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2605 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2606 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2607 db7b5426 blueswir1
            }
2608 db7b5426 blueswir1
        } else {
2609 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2610 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2611 8da3ff18 pbrook
            p->region_offset = region_offset;
2612 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2614 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2615 0e8f0967 pbrook
            } else {
2616 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2617 db7b5426 blueswir1
                int need_subpage = 0;
2618 db7b5426 blueswir1
2619 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2620 db7b5426 blueswir1
                              end_addr2, need_subpage);
2621 db7b5426 blueswir1
2622 f6405247 Richard Henderson
                if (need_subpage) {
2623 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2624 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2625 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2626 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2627 8da3ff18 pbrook
                                     phys_offset, region_offset);
2628 8da3ff18 pbrook
                    p->region_offset = 0;
2629 db7b5426 blueswir1
                }
2630 db7b5426 blueswir1
            }
2631 db7b5426 blueswir1
        }
2632 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2633 33417e70 bellard
    }
2634 3b46e624 ths
2635 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2636 9d42037b bellard
       reset the modified entries */
2637 9d42037b bellard
    /* XXX: slow ! */
2638 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2639 9d42037b bellard
        tlb_flush(env, 1);
2640 9d42037b bellard
    }
2641 33417e70 bellard
}
2642 33417e70 bellard
2643 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2644 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2645 ba863458 bellard
{
2646 ba863458 bellard
    PhysPageDesc *p;
2647 ba863458 bellard
2648 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2649 ba863458 bellard
    if (!p)
2650 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2651 ba863458 bellard
    return p->phys_offset;
2652 ba863458 bellard
}
2653 ba863458 bellard
2654 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2655 f65ed4c1 aliguori
{
2656 f65ed4c1 aliguori
    if (kvm_enabled())
2657 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2658 f65ed4c1 aliguori
}
2659 f65ed4c1 aliguori
2660 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661 f65ed4c1 aliguori
{
2662 f65ed4c1 aliguori
    if (kvm_enabled())
2663 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2664 f65ed4c1 aliguori
}
2665 f65ed4c1 aliguori
2666 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2667 62a2744c Sheng Yang
{
2668 62a2744c Sheng Yang
    if (kvm_enabled())
2669 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2670 62a2744c Sheng Yang
}
2671 62a2744c Sheng Yang
2672 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2673 c902760f Marcelo Tosatti
2674 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2675 c902760f Marcelo Tosatti
2676 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2677 c902760f Marcelo Tosatti
2678 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2679 c902760f Marcelo Tosatti
{
2680 c902760f Marcelo Tosatti
    struct statfs fs;
2681 c902760f Marcelo Tosatti
    int ret;
2682 c902760f Marcelo Tosatti
2683 c902760f Marcelo Tosatti
    do {
2684 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2685 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2686 c902760f Marcelo Tosatti
2687 c902760f Marcelo Tosatti
    if (ret != 0) {
2688 9742bf26 Yoshiaki Tamura
        perror(path);
2689 9742bf26 Yoshiaki Tamura
        return 0;
2690 c902760f Marcelo Tosatti
    }
2691 c902760f Marcelo Tosatti
2692 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2693 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2694 c902760f Marcelo Tosatti
2695 c902760f Marcelo Tosatti
    return fs.f_bsize;
2696 c902760f Marcelo Tosatti
}
2697 c902760f Marcelo Tosatti
2698 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2699 04b16653 Alex Williamson
                            ram_addr_t memory,
2700 04b16653 Alex Williamson
                            const char *path)
2701 c902760f Marcelo Tosatti
{
2702 c902760f Marcelo Tosatti
    char *filename;
2703 c902760f Marcelo Tosatti
    void *area;
2704 c902760f Marcelo Tosatti
    int fd;
2705 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2706 c902760f Marcelo Tosatti
    int flags;
2707 c902760f Marcelo Tosatti
#endif
2708 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2709 c902760f Marcelo Tosatti
2710 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2711 c902760f Marcelo Tosatti
    if (!hpagesize) {
2712 9742bf26 Yoshiaki Tamura
        return NULL;
2713 c902760f Marcelo Tosatti
    }
2714 c902760f Marcelo Tosatti
2715 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2716 c902760f Marcelo Tosatti
        return NULL;
2717 c902760f Marcelo Tosatti
    }
2718 c902760f Marcelo Tosatti
2719 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2720 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2721 c902760f Marcelo Tosatti
        return NULL;
2722 c902760f Marcelo Tosatti
    }
2723 c902760f Marcelo Tosatti
2724 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2725 9742bf26 Yoshiaki Tamura
        return NULL;
2726 c902760f Marcelo Tosatti
    }
2727 c902760f Marcelo Tosatti
2728 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2729 c902760f Marcelo Tosatti
    if (fd < 0) {
2730 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2731 9742bf26 Yoshiaki Tamura
        free(filename);
2732 9742bf26 Yoshiaki Tamura
        return NULL;
2733 c902760f Marcelo Tosatti
    }
2734 c902760f Marcelo Tosatti
    unlink(filename);
2735 c902760f Marcelo Tosatti
    free(filename);
2736 c902760f Marcelo Tosatti
2737 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2738 c902760f Marcelo Tosatti
2739 c902760f Marcelo Tosatti
    /*
2740 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2741 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2742 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2743 c902760f Marcelo Tosatti
     * mmap will fail.
2744 c902760f Marcelo Tosatti
     */
2745 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2746 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2747 c902760f Marcelo Tosatti
2748 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2749 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2750 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2751 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2752 c902760f Marcelo Tosatti
     */
2753 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2754 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2755 c902760f Marcelo Tosatti
#else
2756 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2757 c902760f Marcelo Tosatti
#endif
2758 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2759 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2760 9742bf26 Yoshiaki Tamura
        close(fd);
2761 9742bf26 Yoshiaki Tamura
        return (NULL);
2762 c902760f Marcelo Tosatti
    }
2763 04b16653 Alex Williamson
    block->fd = fd;
2764 c902760f Marcelo Tosatti
    return area;
2765 c902760f Marcelo Tosatti
}
2766 c902760f Marcelo Tosatti
#endif
2767 c902760f Marcelo Tosatti
2768 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2769 d17b5288 Alex Williamson
{
2770 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2771 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2772 04b16653 Alex Williamson
2773 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2774 04b16653 Alex Williamson
        return 0;
2775 04b16653 Alex Williamson
2776 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2777 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2778 04b16653 Alex Williamson
2779 04b16653 Alex Williamson
        end = block->offset + block->length;
2780 04b16653 Alex Williamson
2781 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2782 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2783 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2784 04b16653 Alex Williamson
            }
2785 04b16653 Alex Williamson
        }
2786 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2787 04b16653 Alex Williamson
            offset =  end;
2788 04b16653 Alex Williamson
            mingap = next - end;
2789 04b16653 Alex Williamson
        }
2790 04b16653 Alex Williamson
    }
2791 04b16653 Alex Williamson
    return offset;
2792 04b16653 Alex Williamson
}
2793 04b16653 Alex Williamson
2794 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2795 04b16653 Alex Williamson
{
2796 d17b5288 Alex Williamson
    RAMBlock *block;
2797 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2798 d17b5288 Alex Williamson
2799 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2800 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2801 d17b5288 Alex Williamson
2802 d17b5288 Alex Williamson
    return last;
2803 d17b5288 Alex Williamson
}
2804 d17b5288 Alex Williamson
2805 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2806 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2807 84b89d78 Cam Macdonell
{
2808 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2809 84b89d78 Cam Macdonell
2810 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2811 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2812 84b89d78 Cam Macdonell
2813 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2814 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2815 84b89d78 Cam Macdonell
        if (id) {
2816 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2817 84b89d78 Cam Macdonell
            qemu_free(id);
2818 84b89d78 Cam Macdonell
        }
2819 84b89d78 Cam Macdonell
    }
2820 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2821 84b89d78 Cam Macdonell
2822 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2823 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2824 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2825 84b89d78 Cam Macdonell
                    new_block->idstr);
2826 84b89d78 Cam Macdonell
            abort();
2827 84b89d78 Cam Macdonell
        }
2828 84b89d78 Cam Macdonell
    }
2829 84b89d78 Cam Macdonell
2830 6977dfe6 Yoshiaki Tamura
    if (host) {
2831 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2832 6977dfe6 Yoshiaki Tamura
    } else {
2833 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2834 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2835 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2836 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2837 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2838 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2839 6977dfe6 Yoshiaki Tamura
            }
2840 c902760f Marcelo Tosatti
#else
2841 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2842 6977dfe6 Yoshiaki Tamura
            exit(1);
2843 c902760f Marcelo Tosatti
#endif
2844 6977dfe6 Yoshiaki Tamura
        } else {
2845 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2846 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2847 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2848 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2849 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2850 6b02494d Alexander Graf
#else
2851 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2852 6b02494d Alexander Graf
#endif
2853 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2854 6977dfe6 Yoshiaki Tamura
        }
2855 c902760f Marcelo Tosatti
    }
2856 6977dfe6 Yoshiaki Tamura
2857 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2858 94a6b54f pbrook
    new_block->length = size;
2859 94a6b54f pbrook
2860 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2861 94a6b54f pbrook
2862 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2863 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2864 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2865 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2866 94a6b54f pbrook
2867 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2868 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2869 6f0437e8 Jan Kiszka
2870 94a6b54f pbrook
    return new_block->offset;
2871 94a6b54f pbrook
}
2872 e9a1ab19 bellard
2873 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2874 6977dfe6 Yoshiaki Tamura
{
2875 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2876 6977dfe6 Yoshiaki Tamura
}
2877 6977dfe6 Yoshiaki Tamura
2878 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2879 e9a1ab19 bellard
{
2880 04b16653 Alex Williamson
    RAMBlock *block;
2881 04b16653 Alex Williamson
2882 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2883 04b16653 Alex Williamson
        if (addr == block->offset) {
2884 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2885 04b16653 Alex Williamson
            if (mem_path) {
2886 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2887 04b16653 Alex Williamson
                if (block->fd) {
2888 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2889 04b16653 Alex Williamson
                    close(block->fd);
2890 04b16653 Alex Williamson
                } else {
2891 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2892 04b16653 Alex Williamson
                }
2893 04b16653 Alex Williamson
#endif
2894 04b16653 Alex Williamson
            } else {
2895 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2896 04b16653 Alex Williamson
                munmap(block->host, block->length);
2897 04b16653 Alex Williamson
#else
2898 04b16653 Alex Williamson
                qemu_vfree(block->host);
2899 04b16653 Alex Williamson
#endif
2900 04b16653 Alex Williamson
            }
2901 04b16653 Alex Williamson
            qemu_free(block);
2902 04b16653 Alex Williamson
            return;
2903 04b16653 Alex Williamson
        }
2904 04b16653 Alex Williamson
    }
2905 04b16653 Alex Williamson
2906 e9a1ab19 bellard
}
2907 e9a1ab19 bellard
2908 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2909 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2910 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2911 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2912 5579c7f3 pbrook

2913 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2914 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2915 5579c7f3 pbrook
 */
2916 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2917 dc828ca1 pbrook
{
2918 94a6b54f pbrook
    RAMBlock *block;
2919 94a6b54f pbrook
2920 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2921 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2922 f471a17e Alex Williamson
            QLIST_REMOVE(block, next);
2923 f471a17e Alex Williamson
            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2924 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2925 f471a17e Alex Williamson
        }
2926 94a6b54f pbrook
    }
2927 f471a17e Alex Williamson
2928 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2929 f471a17e Alex Williamson
    abort();
2930 f471a17e Alex Williamson
2931 f471a17e Alex Williamson
    return NULL;
2932 dc828ca1 pbrook
}
2933 dc828ca1 pbrook
2934 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2935 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2936 b2e0a138 Michael S. Tsirkin
 */
2937 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
2938 b2e0a138 Michael S. Tsirkin
{
2939 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
2940 b2e0a138 Michael S. Tsirkin
2941 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2942 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
2943 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
2944 b2e0a138 Michael S. Tsirkin
        }
2945 b2e0a138 Michael S. Tsirkin
    }
2946 b2e0a138 Michael S. Tsirkin
2947 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2948 b2e0a138 Michael S. Tsirkin
    abort();
2949 b2e0a138 Michael S. Tsirkin
2950 b2e0a138 Michael S. Tsirkin
    return NULL;
2951 b2e0a138 Michael S. Tsirkin
}
2952 b2e0a138 Michael S. Tsirkin
2953 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2954 5579c7f3 pbrook
{
2955 94a6b54f pbrook
    RAMBlock *block;
2956 94a6b54f pbrook
    uint8_t *host = ptr;
2957 94a6b54f pbrook
2958 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2959 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2960 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
2961 e890261f Marcelo Tosatti
            return 0;
2962 f471a17e Alex Williamson
        }
2963 94a6b54f pbrook
    }
2964 e890261f Marcelo Tosatti
    return -1;
2965 e890261f Marcelo Tosatti
}
2966 f471a17e Alex Williamson
2967 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
2968 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
2969 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2970 e890261f Marcelo Tosatti
{
2971 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
2972 f471a17e Alex Williamson
2973 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2974 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2975 e890261f Marcelo Tosatti
        abort();
2976 e890261f Marcelo Tosatti
    }
2977 e890261f Marcelo Tosatti
    return ram_addr;
2978 5579c7f3 pbrook
}
2979 5579c7f3 pbrook
2980 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2981 33417e70 bellard
{
2982 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2983 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2984 67d3b957 pbrook
#endif
2985 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2986 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2987 e18231a3 blueswir1
#endif
2988 e18231a3 blueswir1
    return 0;
2989 e18231a3 blueswir1
}
2990 e18231a3 blueswir1
2991 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2992 e18231a3 blueswir1
{
2993 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2994 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2995 e18231a3 blueswir1
#endif
2996 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2997 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2998 e18231a3 blueswir1
#endif
2999 e18231a3 blueswir1
    return 0;
3000 e18231a3 blueswir1
}
3001 e18231a3 blueswir1
3002 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3003 e18231a3 blueswir1
{
3004 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3005 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3006 e18231a3 blueswir1
#endif
3007 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3008 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3009 b4f0a316 blueswir1
#endif
3010 33417e70 bellard
    return 0;
3011 33417e70 bellard
}
3012 33417e70 bellard
3013 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3014 33417e70 bellard
{
3015 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3016 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3017 67d3b957 pbrook
#endif
3018 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3019 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3020 e18231a3 blueswir1
#endif
3021 e18231a3 blueswir1
}
3022 e18231a3 blueswir1
3023 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3024 e18231a3 blueswir1
{
3025 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3026 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3027 e18231a3 blueswir1
#endif
3028 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3029 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3030 e18231a3 blueswir1
#endif
3031 e18231a3 blueswir1
}
3032 e18231a3 blueswir1
3033 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3034 e18231a3 blueswir1
{
3035 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3036 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3037 e18231a3 blueswir1
#endif
3038 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3039 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3040 b4f0a316 blueswir1
#endif
3041 33417e70 bellard
}
3042 33417e70 bellard
3043 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3044 33417e70 bellard
    unassigned_mem_readb,
3045 e18231a3 blueswir1
    unassigned_mem_readw,
3046 e18231a3 blueswir1
    unassigned_mem_readl,
3047 33417e70 bellard
};
3048 33417e70 bellard
3049 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3050 33417e70 bellard
    unassigned_mem_writeb,
3051 e18231a3 blueswir1
    unassigned_mem_writew,
3052 e18231a3 blueswir1
    unassigned_mem_writel,
3053 33417e70 bellard
};
3054 33417e70 bellard
3055 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3056 0f459d16 pbrook
                                uint32_t val)
3057 9fa3e853 bellard
{
3058 3a7d929e bellard
    int dirty_flags;
3059 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3060 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3061 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3062 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3063 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3064 9fa3e853 bellard
#endif
3065 3a7d929e bellard
    }
3066 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3067 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3068 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3069 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3070 f23db169 bellard
       flushed */
3071 f23db169 bellard
    if (dirty_flags == 0xff)
3072 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3073 9fa3e853 bellard
}
3074 9fa3e853 bellard
3075 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3076 0f459d16 pbrook
                                uint32_t val)
3077 9fa3e853 bellard
{
3078 3a7d929e bellard
    int dirty_flags;
3079 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3080 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3081 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3082 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3083 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3084 9fa3e853 bellard
#endif
3085 3a7d929e bellard
    }
3086 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3087 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3088 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3089 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3090 f23db169 bellard
       flushed */
3091 f23db169 bellard
    if (dirty_flags == 0xff)
3092 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3093 9fa3e853 bellard
}
3094 9fa3e853 bellard
3095 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3096 0f459d16 pbrook
                                uint32_t val)
3097 9fa3e853 bellard
{
3098 3a7d929e bellard
    int dirty_flags;
3099 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3100 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3101 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3102 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3103 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3104 9fa3e853 bellard
#endif
3105 3a7d929e bellard
    }
3106 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3107 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3108 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3109 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3110 f23db169 bellard
       flushed */
3111 f23db169 bellard
    if (dirty_flags == 0xff)
3112 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3113 9fa3e853 bellard
}
3114 9fa3e853 bellard
3115 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3116 9fa3e853 bellard
    NULL, /* never used */
3117 9fa3e853 bellard
    NULL, /* never used */
3118 9fa3e853 bellard
    NULL, /* never used */
3119 9fa3e853 bellard
};
3120 9fa3e853 bellard
3121 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3122 1ccde1cb bellard
    notdirty_mem_writeb,
3123 1ccde1cb bellard
    notdirty_mem_writew,
3124 1ccde1cb bellard
    notdirty_mem_writel,
3125 1ccde1cb bellard
};
3126 1ccde1cb bellard
3127 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3128 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3129 0f459d16 pbrook
{
3130 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3131 06d55cc1 aliguori
    target_ulong pc, cs_base;
3132 06d55cc1 aliguori
    TranslationBlock *tb;
3133 0f459d16 pbrook
    target_ulong vaddr;
3134 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3135 06d55cc1 aliguori
    int cpu_flags;
3136 0f459d16 pbrook
3137 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3138 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3139 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3140 06d55cc1 aliguori
         * current instruction. */
3141 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3142 06d55cc1 aliguori
        return;
3143 06d55cc1 aliguori
    }
3144 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3145 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3146 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3147 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3148 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3149 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3150 6e140f28 aliguori
                env->watchpoint_hit = wp;
3151 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3152 6e140f28 aliguori
                if (!tb) {
3153 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3154 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3155 6e140f28 aliguori
                }
3156 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3157 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3158 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3159 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3160 6e140f28 aliguori
                } else {
3161 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3162 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3163 6e140f28 aliguori
                }
3164 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3165 06d55cc1 aliguori
            }
3166 6e140f28 aliguori
        } else {
3167 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3168 0f459d16 pbrook
        }
3169 0f459d16 pbrook
    }
3170 0f459d16 pbrook
}
3171 0f459d16 pbrook
3172 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3173 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3174 6658ffb8 pbrook
   phys routines.  */
3175 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3176 6658ffb8 pbrook
{
3177 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3178 6658ffb8 pbrook
    return ldub_phys(addr);
3179 6658ffb8 pbrook
}
3180 6658ffb8 pbrook
3181 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3182 6658ffb8 pbrook
{
3183 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3184 6658ffb8 pbrook
    return lduw_phys(addr);
3185 6658ffb8 pbrook
}
3186 6658ffb8 pbrook
3187 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3188 6658ffb8 pbrook
{
3189 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3190 6658ffb8 pbrook
    return ldl_phys(addr);
3191 6658ffb8 pbrook
}
3192 6658ffb8 pbrook
3193 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3194 6658ffb8 pbrook
                             uint32_t val)
3195 6658ffb8 pbrook
{
3196 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3197 6658ffb8 pbrook
    stb_phys(addr, val);
3198 6658ffb8 pbrook
}
3199 6658ffb8 pbrook
3200 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3201 6658ffb8 pbrook
                             uint32_t val)
3202 6658ffb8 pbrook
{
3203 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3204 6658ffb8 pbrook
    stw_phys(addr, val);
3205 6658ffb8 pbrook
}
3206 6658ffb8 pbrook
3207 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3208 6658ffb8 pbrook
                             uint32_t val)
3209 6658ffb8 pbrook
{
3210 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3211 6658ffb8 pbrook
    stl_phys(addr, val);
3212 6658ffb8 pbrook
}
3213 6658ffb8 pbrook
3214 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3215 6658ffb8 pbrook
    watch_mem_readb,
3216 6658ffb8 pbrook
    watch_mem_readw,
3217 6658ffb8 pbrook
    watch_mem_readl,
3218 6658ffb8 pbrook
};
3219 6658ffb8 pbrook
3220 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3221 6658ffb8 pbrook
    watch_mem_writeb,
3222 6658ffb8 pbrook
    watch_mem_writew,
3223 6658ffb8 pbrook
    watch_mem_writel,
3224 6658ffb8 pbrook
};
3225 6658ffb8 pbrook
3226 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3227 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3228 f6405247 Richard Henderson
                                        unsigned int len)
3229 db7b5426 blueswir1
{
3230 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3231 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3232 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3233 db7b5426 blueswir1
           mmio, len, addr, idx);
3234 db7b5426 blueswir1
#endif
3235 db7b5426 blueswir1
3236 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3237 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3238 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3239 db7b5426 blueswir1
}
3240 db7b5426 blueswir1
3241 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3242 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3243 db7b5426 blueswir1
{
3244 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3245 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3246 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3247 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3248 db7b5426 blueswir1
#endif
3249 f6405247 Richard Henderson
3250 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3251 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3252 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3253 db7b5426 blueswir1
}
3254 db7b5426 blueswir1
3255 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3256 db7b5426 blueswir1
{
3257 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3258 db7b5426 blueswir1
}
3259 db7b5426 blueswir1
3260 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3261 db7b5426 blueswir1
                            uint32_t value)
3262 db7b5426 blueswir1
{
3263 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3264 db7b5426 blueswir1
}
3265 db7b5426 blueswir1
3266 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3267 db7b5426 blueswir1
{
3268 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3269 db7b5426 blueswir1
}
3270 db7b5426 blueswir1
3271 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3272 db7b5426 blueswir1
                            uint32_t value)
3273 db7b5426 blueswir1
{
3274 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3275 db7b5426 blueswir1
}
3276 db7b5426 blueswir1
3277 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3278 db7b5426 blueswir1
{
3279 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3280 db7b5426 blueswir1
}
3281 db7b5426 blueswir1
3282 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3283 f6405247 Richard Henderson
                            uint32_t value)
3284 db7b5426 blueswir1
{
3285 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3286 db7b5426 blueswir1
}
3287 db7b5426 blueswir1
3288 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3289 db7b5426 blueswir1
    &subpage_readb,
3290 db7b5426 blueswir1
    &subpage_readw,
3291 db7b5426 blueswir1
    &subpage_readl,
3292 db7b5426 blueswir1
};
3293 db7b5426 blueswir1
3294 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3295 db7b5426 blueswir1
    &subpage_writeb,
3296 db7b5426 blueswir1
    &subpage_writew,
3297 db7b5426 blueswir1
    &subpage_writel,
3298 db7b5426 blueswir1
};
3299 db7b5426 blueswir1
3300 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3301 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3302 db7b5426 blueswir1
{
3303 db7b5426 blueswir1
    int idx, eidx;
3304 db7b5426 blueswir1
3305 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3306 db7b5426 blueswir1
        return -1;
3307 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3308 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3309 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3310 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3311 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3312 db7b5426 blueswir1
#endif
3313 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3314 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3315 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3316 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3317 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3318 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3319 db7b5426 blueswir1
    }
3320 db7b5426 blueswir1
3321 db7b5426 blueswir1
    return 0;
3322 db7b5426 blueswir1
}
3323 db7b5426 blueswir1
3324 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3325 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3326 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3327 db7b5426 blueswir1
{
3328 c227f099 Anthony Liguori
    subpage_t *mmio;
3329 db7b5426 blueswir1
    int subpage_memory;
3330 db7b5426 blueswir1
3331 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3332 1eec614b aliguori
3333 1eec614b aliguori
    mmio->base = base;
3334 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3335 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3336 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3337 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3338 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3339 db7b5426 blueswir1
#endif
3340 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3341 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3342 db7b5426 blueswir1
3343 db7b5426 blueswir1
    return mmio;
3344 db7b5426 blueswir1
}
3345 db7b5426 blueswir1
3346 88715657 aliguori
static int get_free_io_mem_idx(void)
3347 88715657 aliguori
{
3348 88715657 aliguori
    int i;
3349 88715657 aliguori
3350 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3351 88715657 aliguori
        if (!io_mem_used[i]) {
3352 88715657 aliguori
            io_mem_used[i] = 1;
3353 88715657 aliguori
            return i;
3354 88715657 aliguori
        }
3355 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3356 88715657 aliguori
    return -1;
3357 88715657 aliguori
}
3358 88715657 aliguori
3359 dd310534 Alexander Graf
/*
3360 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3361 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3362 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3363 dd310534 Alexander Graf
 *
3364 dd310534 Alexander Graf
 * CPU          Device           swap?
3365 dd310534 Alexander Graf
 *
3366 dd310534 Alexander Graf
 * little       little           no
3367 dd310534 Alexander Graf
 * little       big              yes
3368 dd310534 Alexander Graf
 * big          little           yes
3369 dd310534 Alexander Graf
 * big          big              no
3370 dd310534 Alexander Graf
 */
3371 dd310534 Alexander Graf
3372 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3373 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3374 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3375 dd310534 Alexander Graf
    void *opaque;
3376 dd310534 Alexander Graf
} SwapEndianContainer;
3377 dd310534 Alexander Graf
3378 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3379 dd310534 Alexander Graf
{
3380 dd310534 Alexander Graf
    uint32_t val;
3381 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3382 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3383 dd310534 Alexander Graf
    return val;
3384 dd310534 Alexander Graf
}
3385 dd310534 Alexander Graf
3386 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3387 dd310534 Alexander Graf
{
3388 dd310534 Alexander Graf
    uint32_t val;
3389 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3390 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3391 dd310534 Alexander Graf
    return val;
3392 dd310534 Alexander Graf
}
3393 dd310534 Alexander Graf
3394 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3395 dd310534 Alexander Graf
{
3396 dd310534 Alexander Graf
    uint32_t val;
3397 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3398 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3399 dd310534 Alexander Graf
    return val;
3400 dd310534 Alexander Graf
}
3401 dd310534 Alexander Graf
3402 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3403 dd310534 Alexander Graf
    swapendian_mem_readb,
3404 dd310534 Alexander Graf
    swapendian_mem_readw,
3405 dd310534 Alexander Graf
    swapendian_mem_readl
3406 dd310534 Alexander Graf
};
3407 dd310534 Alexander Graf
3408 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3409 dd310534 Alexander Graf
                                  uint32_t val)
3410 dd310534 Alexander Graf
{
3411 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3412 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3413 dd310534 Alexander Graf
}
3414 dd310534 Alexander Graf
3415 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3416 dd310534 Alexander Graf
                                  uint32_t val)
3417 dd310534 Alexander Graf
{
3418 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3419 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3420 dd310534 Alexander Graf
}
3421 dd310534 Alexander Graf
3422 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3423 dd310534 Alexander Graf
                                  uint32_t val)
3424 dd310534 Alexander Graf
{
3425 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3426 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3427 dd310534 Alexander Graf
}
3428 dd310534 Alexander Graf
3429 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3430 dd310534 Alexander Graf
    swapendian_mem_writeb,
3431 dd310534 Alexander Graf
    swapendian_mem_writew,
3432 dd310534 Alexander Graf
    swapendian_mem_writel
3433 dd310534 Alexander Graf
};
3434 dd310534 Alexander Graf
3435 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3436 dd310534 Alexander Graf
{
3437 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3438 dd310534 Alexander Graf
    int i;
3439 dd310534 Alexander Graf
3440 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3441 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3442 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3443 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3444 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3445 dd310534 Alexander Graf
3446 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3447 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3448 dd310534 Alexander Graf
    }
3449 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3450 dd310534 Alexander Graf
}
3451 dd310534 Alexander Graf
3452 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3453 dd310534 Alexander Graf
{
3454 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3455 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3456 dd310534 Alexander Graf
    }
3457 dd310534 Alexander Graf
}
3458 dd310534 Alexander Graf
3459 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3460 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3461 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3462 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3463 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3464 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3465 4254fab8 blueswir1
   returned if error. */
3466 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3467 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3468 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3469 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3470 33417e70 bellard
{
3471 3cab721d Richard Henderson
    int i;
3472 3cab721d Richard Henderson
3473 33417e70 bellard
    if (io_index <= 0) {
3474 88715657 aliguori
        io_index = get_free_io_mem_idx();
3475 88715657 aliguori
        if (io_index == -1)
3476 88715657 aliguori
            return io_index;
3477 33417e70 bellard
    } else {
3478 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3479 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3480 33417e70 bellard
            return -1;
3481 33417e70 bellard
    }
3482 b5ff1b31 bellard
3483 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3484 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3485 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3486 3cab721d Richard Henderson
    }
3487 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3488 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3489 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3490 3cab721d Richard Henderson
    }
3491 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3492 f6405247 Richard Henderson
3493 dd310534 Alexander Graf
    switch (endian) {
3494 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3495 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3496 dd310534 Alexander Graf
        swapendian_init(io_index);
3497 dd310534 Alexander Graf
#endif
3498 dd310534 Alexander Graf
        break;
3499 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3500 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3501 dd310534 Alexander Graf
        swapendian_init(io_index);
3502 dd310534 Alexander Graf
#endif
3503 dd310534 Alexander Graf
        break;
3504 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3505 dd310534 Alexander Graf
    default:
3506 dd310534 Alexander Graf
        break;
3507 dd310534 Alexander Graf
    }
3508 dd310534 Alexander Graf
3509 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3510 33417e70 bellard
}
3511 61382a50 bellard
3512 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3513 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3514 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3515 1eed09cb Avi Kivity
{
3516 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3517 1eed09cb Avi Kivity
}
3518 1eed09cb Avi Kivity
3519 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3520 88715657 aliguori
{
3521 88715657 aliguori
    int i;
3522 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3523 88715657 aliguori
3524 dd310534 Alexander Graf
    swapendian_del(io_index);
3525 dd310534 Alexander Graf
3526 88715657 aliguori
    for (i=0;i < 3; i++) {
3527 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3528 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3529 88715657 aliguori
    }
3530 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3531 88715657 aliguori
    io_mem_used[io_index] = 0;
3532 88715657 aliguori
}
3533 88715657 aliguori
3534 e9179ce1 Avi Kivity
static void io_mem_init(void)
3535 e9179ce1 Avi Kivity
{
3536 e9179ce1 Avi Kivity
    int i;
3537 e9179ce1 Avi Kivity
3538 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3539 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3540 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3541 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3542 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3543 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3544 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3545 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3546 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3547 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3548 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3549 e9179ce1 Avi Kivity
3550 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3551 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3552 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3553 e9179ce1 Avi Kivity
}
3554 e9179ce1 Avi Kivity
3555 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3556 e2eef170 pbrook
3557 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3558 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3559 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3560 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3561 13eb76e0 bellard
{
3562 13eb76e0 bellard
    int l, flags;
3563 13eb76e0 bellard
    target_ulong page;
3564 53a5960a pbrook
    void * p;
3565 13eb76e0 bellard
3566 13eb76e0 bellard
    while (len > 0) {
3567 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3568 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3569 13eb76e0 bellard
        if (l > len)
3570 13eb76e0 bellard
            l = len;
3571 13eb76e0 bellard
        flags = page_get_flags(page);
3572 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3573 a68fe89c Paul Brook
            return -1;
3574 13eb76e0 bellard
        if (is_write) {
3575 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3576 a68fe89c Paul Brook
                return -1;
3577 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3578 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3579 a68fe89c Paul Brook
                return -1;
3580 72fb7daa aurel32
            memcpy(p, buf, l);
3581 72fb7daa aurel32
            unlock_user(p, addr, l);
3582 13eb76e0 bellard
        } else {
3583 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3584 a68fe89c Paul Brook
                return -1;
3585 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3586 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3587 a68fe89c Paul Brook
                return -1;
3588 72fb7daa aurel32
            memcpy(buf, p, l);
3589 5b257578 aurel32
            unlock_user(p, addr, 0);
3590 13eb76e0 bellard
        }
3591 13eb76e0 bellard
        len -= l;
3592 13eb76e0 bellard
        buf += l;
3593 13eb76e0 bellard
        addr += l;
3594 13eb76e0 bellard
    }
3595 a68fe89c Paul Brook
    return 0;
3596 13eb76e0 bellard
}
3597 8df1cd07 bellard
3598 13eb76e0 bellard
#else
3599 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3600 13eb76e0 bellard
                            int len, int is_write)
3601 13eb76e0 bellard
{
3602 13eb76e0 bellard
    int l, io_index;
3603 13eb76e0 bellard
    uint8_t *ptr;
3604 13eb76e0 bellard
    uint32_t val;
3605 c227f099 Anthony Liguori
    target_phys_addr_t page;
3606 2e12669a bellard
    unsigned long pd;
3607 92e873b9 bellard
    PhysPageDesc *p;
3608 3b46e624 ths
3609 13eb76e0 bellard
    while (len > 0) {
3610 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3611 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3612 13eb76e0 bellard
        if (l > len)
3613 13eb76e0 bellard
            l = len;
3614 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3615 13eb76e0 bellard
        if (!p) {
3616 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3617 13eb76e0 bellard
        } else {
3618 13eb76e0 bellard
            pd = p->phys_offset;
3619 13eb76e0 bellard
        }
3620 3b46e624 ths
3621 13eb76e0 bellard
        if (is_write) {
3622 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3623 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3624 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3625 8da3ff18 pbrook
                if (p)
3626 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3627 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3628 6a00d601 bellard
                   potential bugs */
3629 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3630 1c213d19 bellard
                    /* 32 bit write access */
3631 c27004ec bellard
                    val = ldl_p(buf);
3632 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3633 13eb76e0 bellard
                    l = 4;
3634 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3635 1c213d19 bellard
                    /* 16 bit write access */
3636 c27004ec bellard
                    val = lduw_p(buf);
3637 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3638 13eb76e0 bellard
                    l = 2;
3639 13eb76e0 bellard
                } else {
3640 1c213d19 bellard
                    /* 8 bit write access */
3641 c27004ec bellard
                    val = ldub_p(buf);
3642 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3643 13eb76e0 bellard
                    l = 1;
3644 13eb76e0 bellard
                }
3645 13eb76e0 bellard
            } else {
3646 b448f2f3 bellard
                unsigned long addr1;
3647 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3648 13eb76e0 bellard
                /* RAM case */
3649 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3650 13eb76e0 bellard
                memcpy(ptr, buf, l);
3651 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3652 3a7d929e bellard
                    /* invalidate code */
3653 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3654 3a7d929e bellard
                    /* set dirty bit */
3655 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3656 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3657 3a7d929e bellard
                }
3658 13eb76e0 bellard
            }
3659 13eb76e0 bellard
        } else {
3660 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3661 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3662 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3663 13eb76e0 bellard
                /* I/O case */
3664 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3665 8da3ff18 pbrook
                if (p)
3666 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3667 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3668 13eb76e0 bellard
                    /* 32 bit read access */
3669 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3670 c27004ec bellard
                    stl_p(buf, val);
3671 13eb76e0 bellard
                    l = 4;
3672 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3673 13eb76e0 bellard
                    /* 16 bit read access */
3674 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3675 c27004ec bellard
                    stw_p(buf, val);
3676 13eb76e0 bellard
                    l = 2;
3677 13eb76e0 bellard
                } else {
3678 1c213d19 bellard
                    /* 8 bit read access */
3679 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3680 c27004ec bellard
                    stb_p(buf, val);
3681 13eb76e0 bellard
                    l = 1;
3682 13eb76e0 bellard
                }
3683 13eb76e0 bellard
            } else {
3684 13eb76e0 bellard
                /* RAM case */
3685 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3686 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3687 13eb76e0 bellard
                memcpy(buf, ptr, l);
3688 13eb76e0 bellard
            }
3689 13eb76e0 bellard
        }
3690 13eb76e0 bellard
        len -= l;
3691 13eb76e0 bellard
        buf += l;
3692 13eb76e0 bellard
        addr += l;
3693 13eb76e0 bellard
    }
3694 13eb76e0 bellard
}
3695 8df1cd07 bellard
3696 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3697 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3698 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3699 d0ecd2aa bellard
{
3700 d0ecd2aa bellard
    int l;
3701 d0ecd2aa bellard
    uint8_t *ptr;
3702 c227f099 Anthony Liguori
    target_phys_addr_t page;
3703 d0ecd2aa bellard
    unsigned long pd;
3704 d0ecd2aa bellard
    PhysPageDesc *p;
3705 3b46e624 ths
3706 d0ecd2aa bellard
    while (len > 0) {
3707 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3708 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3709 d0ecd2aa bellard
        if (l > len)
3710 d0ecd2aa bellard
            l = len;
3711 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3712 d0ecd2aa bellard
        if (!p) {
3713 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3714 d0ecd2aa bellard
        } else {
3715 d0ecd2aa bellard
            pd = p->phys_offset;
3716 d0ecd2aa bellard
        }
3717 3b46e624 ths
3718 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3719 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3720 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3721 d0ecd2aa bellard
            /* do nothing */
3722 d0ecd2aa bellard
        } else {
3723 d0ecd2aa bellard
            unsigned long addr1;
3724 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3725 d0ecd2aa bellard
            /* ROM/RAM case */
3726 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3727 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3728 d0ecd2aa bellard
        }
3729 d0ecd2aa bellard
        len -= l;
3730 d0ecd2aa bellard
        buf += l;
3731 d0ecd2aa bellard
        addr += l;
3732 d0ecd2aa bellard
    }
3733 d0ecd2aa bellard
}
3734 d0ecd2aa bellard
3735 6d16c2f8 aliguori
typedef struct {
3736 6d16c2f8 aliguori
    void *buffer;
3737 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3738 c227f099 Anthony Liguori
    target_phys_addr_t len;
3739 6d16c2f8 aliguori
} BounceBuffer;
3740 6d16c2f8 aliguori
3741 6d16c2f8 aliguori
static BounceBuffer bounce;
3742 6d16c2f8 aliguori
3743 ba223c29 aliguori
typedef struct MapClient {
3744 ba223c29 aliguori
    void *opaque;
3745 ba223c29 aliguori
    void (*callback)(void *opaque);
3746 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3747 ba223c29 aliguori
} MapClient;
3748 ba223c29 aliguori
3749 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3750 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3751 ba223c29 aliguori
3752 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3753 ba223c29 aliguori
{
3754 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3755 ba223c29 aliguori
3756 ba223c29 aliguori
    client->opaque = opaque;
3757 ba223c29 aliguori
    client->callback = callback;
3758 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3759 ba223c29 aliguori
    return client;
3760 ba223c29 aliguori
}
3761 ba223c29 aliguori
3762 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3763 ba223c29 aliguori
{
3764 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3765 ba223c29 aliguori
3766 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3767 34d5e948 Isaku Yamahata
    qemu_free(client);
3768 ba223c29 aliguori
}
3769 ba223c29 aliguori
3770 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3771 ba223c29 aliguori
{
3772 ba223c29 aliguori
    MapClient *client;
3773 ba223c29 aliguori
3774 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3775 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3776 ba223c29 aliguori
        client->callback(client->opaque);
3777 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3778 ba223c29 aliguori
    }
3779 ba223c29 aliguori
}
3780 ba223c29 aliguori
3781 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3782 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3783 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3784 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3785 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3786 ba223c29 aliguori
 * likely to succeed.
3787 6d16c2f8 aliguori
 */
3788 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3789 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3790 6d16c2f8 aliguori
                              int is_write)
3791 6d16c2f8 aliguori
{
3792 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3793 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3794 6d16c2f8 aliguori
    int l;
3795 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3796 6d16c2f8 aliguori
    uint8_t *ptr;
3797 c227f099 Anthony Liguori
    target_phys_addr_t page;
3798 6d16c2f8 aliguori
    unsigned long pd;
3799 6d16c2f8 aliguori
    PhysPageDesc *p;
3800 6d16c2f8 aliguori
    unsigned long addr1;
3801 6d16c2f8 aliguori
3802 6d16c2f8 aliguori
    while (len > 0) {
3803 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3804 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3805 6d16c2f8 aliguori
        if (l > len)
3806 6d16c2f8 aliguori
            l = len;
3807 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3808 6d16c2f8 aliguori
        if (!p) {
3809 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3810 6d16c2f8 aliguori
        } else {
3811 6d16c2f8 aliguori
            pd = p->phys_offset;
3812 6d16c2f8 aliguori
        }
3813 6d16c2f8 aliguori
3814 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3815 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3816 6d16c2f8 aliguori
                break;
3817 6d16c2f8 aliguori
            }
3818 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3819 6d16c2f8 aliguori
            bounce.addr = addr;
3820 6d16c2f8 aliguori
            bounce.len = l;
3821 6d16c2f8 aliguori
            if (!is_write) {
3822 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3823 6d16c2f8 aliguori
            }
3824 6d16c2f8 aliguori
            ptr = bounce.buffer;
3825 6d16c2f8 aliguori
        } else {
3826 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3827 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3828 6d16c2f8 aliguori
        }
3829 6d16c2f8 aliguori
        if (!done) {
3830 6d16c2f8 aliguori
            ret = ptr;
3831 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3832 6d16c2f8 aliguori
            break;
3833 6d16c2f8 aliguori
        }
3834 6d16c2f8 aliguori
3835 6d16c2f8 aliguori
        len -= l;
3836 6d16c2f8 aliguori
        addr += l;
3837 6d16c2f8 aliguori
        done += l;
3838 6d16c2f8 aliguori
    }
3839 6d16c2f8 aliguori
    *plen = done;
3840 6d16c2f8 aliguori
    return ret;
3841 6d16c2f8 aliguori
}
3842 6d16c2f8 aliguori
3843 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3844 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3845 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3846 6d16c2f8 aliguori
 */
3847 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3848 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3849 6d16c2f8 aliguori
{
3850 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3851 6d16c2f8 aliguori
        if (is_write) {
3852 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3853 6d16c2f8 aliguori
            while (access_len) {
3854 6d16c2f8 aliguori
                unsigned l;
3855 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3856 6d16c2f8 aliguori
                if (l > access_len)
3857 6d16c2f8 aliguori
                    l = access_len;
3858 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3859 6d16c2f8 aliguori
                    /* invalidate code */
3860 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3861 6d16c2f8 aliguori
                    /* set dirty bit */
3862 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3863 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3864 6d16c2f8 aliguori
                }
3865 6d16c2f8 aliguori
                addr1 += l;
3866 6d16c2f8 aliguori
                access_len -= l;
3867 6d16c2f8 aliguori
            }
3868 6d16c2f8 aliguori
        }
3869 6d16c2f8 aliguori
        return;
3870 6d16c2f8 aliguori
    }
3871 6d16c2f8 aliguori
    if (is_write) {
3872 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3873 6d16c2f8 aliguori
    }
3874 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3875 6d16c2f8 aliguori
    bounce.buffer = NULL;
3876 ba223c29 aliguori
    cpu_notify_map_clients();
3877 6d16c2f8 aliguori
}
3878 d0ecd2aa bellard
3879 8df1cd07 bellard
/* warning: addr must be aligned */
3880 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3881 8df1cd07 bellard
{
3882 8df1cd07 bellard
    int io_index;
3883 8df1cd07 bellard
    uint8_t *ptr;
3884 8df1cd07 bellard
    uint32_t val;
3885 8df1cd07 bellard
    unsigned long pd;
3886 8df1cd07 bellard
    PhysPageDesc *p;
3887 8df1cd07 bellard
3888 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3889 8df1cd07 bellard
    if (!p) {
3890 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3891 8df1cd07 bellard
    } else {
3892 8df1cd07 bellard
        pd = p->phys_offset;
3893 8df1cd07 bellard
    }
3894 3b46e624 ths
3895 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3896 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3897 8df1cd07 bellard
        /* I/O case */
3898 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3899 8da3ff18 pbrook
        if (p)
3900 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3901 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3902 8df1cd07 bellard
    } else {
3903 8df1cd07 bellard
        /* RAM case */
3904 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3905 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3906 8df1cd07 bellard
        val = ldl_p(ptr);
3907 8df1cd07 bellard
    }
3908 8df1cd07 bellard
    return val;
3909 8df1cd07 bellard
}
3910 8df1cd07 bellard
3911 84b7b8e7 bellard
/* warning: addr must be aligned */
3912 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3913 84b7b8e7 bellard
{
3914 84b7b8e7 bellard
    int io_index;
3915 84b7b8e7 bellard
    uint8_t *ptr;
3916 84b7b8e7 bellard
    uint64_t val;
3917 84b7b8e7 bellard
    unsigned long pd;
3918 84b7b8e7 bellard
    PhysPageDesc *p;
3919 84b7b8e7 bellard
3920 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3921 84b7b8e7 bellard
    if (!p) {
3922 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3923 84b7b8e7 bellard
    } else {
3924 84b7b8e7 bellard
        pd = p->phys_offset;
3925 84b7b8e7 bellard
    }
3926 3b46e624 ths
3927 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3928 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3929 84b7b8e7 bellard
        /* I/O case */
3930 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3931 8da3ff18 pbrook
        if (p)
3932 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3933 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3934 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3935 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3936 84b7b8e7 bellard
#else
3937 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3938 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3939 84b7b8e7 bellard
#endif
3940 84b7b8e7 bellard
    } else {
3941 84b7b8e7 bellard
        /* RAM case */
3942 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3943 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3944 84b7b8e7 bellard
        val = ldq_p(ptr);
3945 84b7b8e7 bellard
    }
3946 84b7b8e7 bellard
    return val;
3947 84b7b8e7 bellard
}
3948 84b7b8e7 bellard
3949 aab33094 bellard
/* XXX: optimize */
3950 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3951 aab33094 bellard
{
3952 aab33094 bellard
    uint8_t val;
3953 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3954 aab33094 bellard
    return val;
3955 aab33094 bellard
}
3956 aab33094 bellard
3957 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3958 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3959 aab33094 bellard
{
3960 733f0b02 Michael S. Tsirkin
    int io_index;
3961 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3962 733f0b02 Michael S. Tsirkin
    uint64_t val;
3963 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3964 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3965 733f0b02 Michael S. Tsirkin
3966 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3967 733f0b02 Michael S. Tsirkin
    if (!p) {
3968 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3969 733f0b02 Michael S. Tsirkin
    } else {
3970 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3971 733f0b02 Michael S. Tsirkin
    }
3972 733f0b02 Michael S. Tsirkin
3973 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3974 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
3975 733f0b02 Michael S. Tsirkin
        /* I/O case */
3976 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3977 733f0b02 Michael S. Tsirkin
        if (p)
3978 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3979 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3980 733f0b02 Michael S. Tsirkin
    } else {
3981 733f0b02 Michael S. Tsirkin
        /* RAM case */
3982 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3983 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3984 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
3985 733f0b02 Michael S. Tsirkin
    }
3986 733f0b02 Michael S. Tsirkin
    return val;
3987 aab33094 bellard
}
3988 aab33094 bellard
3989 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3990 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3991 8df1cd07 bellard
   bits are used to track modified PTEs */
3992 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3993 8df1cd07 bellard
{
3994 8df1cd07 bellard
    int io_index;
3995 8df1cd07 bellard
    uint8_t *ptr;
3996 8df1cd07 bellard
    unsigned long pd;
3997 8df1cd07 bellard
    PhysPageDesc *p;
3998 8df1cd07 bellard
3999 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4000 8df1cd07 bellard
    if (!p) {
4001 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4002 8df1cd07 bellard
    } else {
4003 8df1cd07 bellard
        pd = p->phys_offset;
4004 8df1cd07 bellard
    }
4005 3b46e624 ths
4006 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4007 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4008 8da3ff18 pbrook
        if (p)
4009 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4010 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4011 8df1cd07 bellard
    } else {
4012 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4013 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4014 8df1cd07 bellard
        stl_p(ptr, val);
4015 74576198 aliguori
4016 74576198 aliguori
        if (unlikely(in_migration)) {
4017 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4018 74576198 aliguori
                /* invalidate code */
4019 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4020 74576198 aliguori
                /* set dirty bit */
4021 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4022 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4023 74576198 aliguori
            }
4024 74576198 aliguori
        }
4025 8df1cd07 bellard
    }
4026 8df1cd07 bellard
}
4027 8df1cd07 bellard
4028 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4029 bc98a7ef j_mayer
{
4030 bc98a7ef j_mayer
    int io_index;
4031 bc98a7ef j_mayer
    uint8_t *ptr;
4032 bc98a7ef j_mayer
    unsigned long pd;
4033 bc98a7ef j_mayer
    PhysPageDesc *p;
4034 bc98a7ef j_mayer
4035 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4036 bc98a7ef j_mayer
    if (!p) {
4037 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4038 bc98a7ef j_mayer
    } else {
4039 bc98a7ef j_mayer
        pd = p->phys_offset;
4040 bc98a7ef j_mayer
    }
4041 3b46e624 ths
4042 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4043 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4044 8da3ff18 pbrook
        if (p)
4045 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4046 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4047 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4048 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4049 bc98a7ef j_mayer
#else
4050 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4051 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4052 bc98a7ef j_mayer
#endif
4053 bc98a7ef j_mayer
    } else {
4054 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4055 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4056 bc98a7ef j_mayer
        stq_p(ptr, val);
4057 bc98a7ef j_mayer
    }
4058 bc98a7ef j_mayer
}
4059 bc98a7ef j_mayer
4060 8df1cd07 bellard
/* warning: addr must be aligned */
4061 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4062 8df1cd07 bellard
{
4063 8df1cd07 bellard
    int io_index;
4064 8df1cd07 bellard
    uint8_t *ptr;
4065 8df1cd07 bellard
    unsigned long pd;
4066 8df1cd07 bellard
    PhysPageDesc *p;
4067 8df1cd07 bellard
4068 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4069 8df1cd07 bellard
    if (!p) {
4070 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4071 8df1cd07 bellard
    } else {
4072 8df1cd07 bellard
        pd = p->phys_offset;
4073 8df1cd07 bellard
    }
4074 3b46e624 ths
4075 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4076 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4077 8da3ff18 pbrook
        if (p)
4078 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4079 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4080 8df1cd07 bellard
    } else {
4081 8df1cd07 bellard
        unsigned long addr1;
4082 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4083 8df1cd07 bellard
        /* RAM case */
4084 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4085 8df1cd07 bellard
        stl_p(ptr, val);
4086 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4087 3a7d929e bellard
            /* invalidate code */
4088 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4089 3a7d929e bellard
            /* set dirty bit */
4090 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4091 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4092 3a7d929e bellard
        }
4093 8df1cd07 bellard
    }
4094 8df1cd07 bellard
}
4095 8df1cd07 bellard
4096 aab33094 bellard
/* XXX: optimize */
4097 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4098 aab33094 bellard
{
4099 aab33094 bellard
    uint8_t v = val;
4100 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4101 aab33094 bellard
}
4102 aab33094 bellard
4103 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4104 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4105 aab33094 bellard
{
4106 733f0b02 Michael S. Tsirkin
    int io_index;
4107 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4108 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4109 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4110 733f0b02 Michael S. Tsirkin
4111 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4112 733f0b02 Michael S. Tsirkin
    if (!p) {
4113 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4114 733f0b02 Michael S. Tsirkin
    } else {
4115 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4116 733f0b02 Michael S. Tsirkin
    }
4117 733f0b02 Michael S. Tsirkin
4118 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4119 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4120 733f0b02 Michael S. Tsirkin
        if (p)
4121 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4122 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4123 733f0b02 Michael S. Tsirkin
    } else {
4124 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4125 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4126 733f0b02 Michael S. Tsirkin
        /* RAM case */
4127 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4128 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4129 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4130 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4131 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4132 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4133 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4134 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4135 733f0b02 Michael S. Tsirkin
        }
4136 733f0b02 Michael S. Tsirkin
    }
4137 aab33094 bellard
}
4138 aab33094 bellard
4139 aab33094 bellard
/* XXX: optimize */
4140 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4141 aab33094 bellard
{
4142 aab33094 bellard
    val = tswap64(val);
4143 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4144 aab33094 bellard
}
4145 aab33094 bellard
4146 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4147 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4148 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4149 13eb76e0 bellard
{
4150 13eb76e0 bellard
    int l;
4151 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4152 9b3c35e0 j_mayer
    target_ulong page;
4153 13eb76e0 bellard
4154 13eb76e0 bellard
    while (len > 0) {
4155 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4156 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4157 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4158 13eb76e0 bellard
        if (phys_addr == -1)
4159 13eb76e0 bellard
            return -1;
4160 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4161 13eb76e0 bellard
        if (l > len)
4162 13eb76e0 bellard
            l = len;
4163 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4164 5e2972fd aliguori
        if (is_write)
4165 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4166 5e2972fd aliguori
        else
4167 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4168 13eb76e0 bellard
        len -= l;
4169 13eb76e0 bellard
        buf += l;
4170 13eb76e0 bellard
        addr += l;
4171 13eb76e0 bellard
    }
4172 13eb76e0 bellard
    return 0;
4173 13eb76e0 bellard
}
4174 a68fe89c Paul Brook
#endif
4175 13eb76e0 bellard
4176 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4177 2e70f6ef pbrook
   must be at the end of the TB */
4178 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4179 2e70f6ef pbrook
{
4180 2e70f6ef pbrook
    TranslationBlock *tb;
4181 2e70f6ef pbrook
    uint32_t n, cflags;
4182 2e70f6ef pbrook
    target_ulong pc, cs_base;
4183 2e70f6ef pbrook
    uint64_t flags;
4184 2e70f6ef pbrook
4185 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4186 2e70f6ef pbrook
    if (!tb) {
4187 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4188 2e70f6ef pbrook
                  retaddr);
4189 2e70f6ef pbrook
    }
4190 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4191 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4192 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4193 bf20dc07 ths
       occurred.  */
4194 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4195 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4196 2e70f6ef pbrook
    n++;
4197 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4198 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4199 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4200 2e70f6ef pbrook
       branch.  */
4201 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4202 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4203 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4204 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4205 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4206 2e70f6ef pbrook
    }
4207 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4208 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4209 2e70f6ef pbrook
            && n > 1) {
4210 2e70f6ef pbrook
        env->pc -= 2;
4211 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4212 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4213 2e70f6ef pbrook
    }
4214 2e70f6ef pbrook
#endif
4215 2e70f6ef pbrook
    /* This should never happen.  */
4216 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4217 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4218 2e70f6ef pbrook
4219 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4220 2e70f6ef pbrook
    pc = tb->pc;
4221 2e70f6ef pbrook
    cs_base = tb->cs_base;
4222 2e70f6ef pbrook
    flags = tb->flags;
4223 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4224 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4225 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4226 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4227 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4228 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4229 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4230 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4231 2e70f6ef pbrook
       second new TB.  */
4232 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4233 2e70f6ef pbrook
}
4234 2e70f6ef pbrook
4235 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4236 b3755a91 Paul Brook
4237 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4238 e3db7226 bellard
{
4239 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4240 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4241 e3db7226 bellard
    TranslationBlock *tb;
4242 3b46e624 ths
4243 e3db7226 bellard
    target_code_size = 0;
4244 e3db7226 bellard
    max_target_code_size = 0;
4245 e3db7226 bellard
    cross_page = 0;
4246 e3db7226 bellard
    direct_jmp_count = 0;
4247 e3db7226 bellard
    direct_jmp2_count = 0;
4248 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4249 e3db7226 bellard
        tb = &tbs[i];
4250 e3db7226 bellard
        target_code_size += tb->size;
4251 e3db7226 bellard
        if (tb->size > max_target_code_size)
4252 e3db7226 bellard
            max_target_code_size = tb->size;
4253 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4254 e3db7226 bellard
            cross_page++;
4255 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4256 e3db7226 bellard
            direct_jmp_count++;
4257 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4258 e3db7226 bellard
                direct_jmp2_count++;
4259 e3db7226 bellard
            }
4260 e3db7226 bellard
        }
4261 e3db7226 bellard
    }
4262 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4263 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4264 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4265 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4266 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4267 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4268 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4269 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4270 e3db7226 bellard
                max_target_code_size);
4271 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4272 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4273 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4274 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4275 5fafdf24 ths
            cross_page,
4276 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4277 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4278 5fafdf24 ths
                direct_jmp_count,
4279 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4280 e3db7226 bellard
                direct_jmp2_count,
4281 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4282 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4283 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4284 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4285 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4286 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4287 e3db7226 bellard
}
4288 e3db7226 bellard
4289 61382a50 bellard
#define MMUSUFFIX _cmmu
4290 61382a50 bellard
#define GETPC() NULL
4291 61382a50 bellard
#define env cpu_single_env
4292 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4293 61382a50 bellard
4294 61382a50 bellard
#define SHIFT 0
4295 61382a50 bellard
#include "softmmu_template.h"
4296 61382a50 bellard
4297 61382a50 bellard
#define SHIFT 1
4298 61382a50 bellard
#include "softmmu_template.h"
4299 61382a50 bellard
4300 61382a50 bellard
#define SHIFT 2
4301 61382a50 bellard
#include "softmmu_template.h"
4302 61382a50 bellard
4303 61382a50 bellard
#define SHIFT 3
4304 61382a50 bellard
#include "softmmu_template.h"
4305 61382a50 bellard
4306 61382a50 bellard
#undef env
4307 61382a50 bellard
4308 61382a50 bellard
#endif