Statistics
| Branch: | Revision:

root / exec.c @ e890261f

History | View | Annotate | Download (122.3 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 cc9e98cb Alex Williamson
#include "hw/qdev.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 29e922b6 Blue Swirl
#include "qemu-timer.h"
43 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
44 53a5960a pbrook
#include <qemu.h>
45 fd052bf6 Riku Voipio
#include <signal.h>
46 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
47 f01576f1 Juergen Lock
#include <sys/param.h>
48 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
49 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
50 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
51 f01576f1 Juergen Lock
#include <sys/time.h>
52 f01576f1 Juergen Lock
#include <sys/proc.h>
53 f01576f1 Juergen Lock
#include <machine/profile.h>
54 f01576f1 Juergen Lock
#define _KERNEL
55 f01576f1 Juergen Lock
#include <sys/user.h>
56 f01576f1 Juergen Lock
#undef _KERNEL
57 f01576f1 Juergen Lock
#undef sigqueue
58 f01576f1 Juergen Lock
#include <libutil.h>
59 f01576f1 Juergen Lock
#endif
60 f01576f1 Juergen Lock
#endif
61 53a5960a pbrook
#endif
62 54936004 bellard
63 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
64 66e85a21 bellard
//#define DEBUG_FLUSH
65 9fa3e853 bellard
//#define DEBUG_TLB
66 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
67 fd6ce8f6 bellard
68 fd6ce8f6 bellard
/* make various TB consistency checks */
69 5fafdf24 ths
//#define DEBUG_TB_CHECK
70 5fafdf24 ths
//#define DEBUG_TLB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 f8e2af11 Stefan Weil
#elif defined(_WIN32)
97 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
98 f8e2af11 Stefan Weil
#define code_gen_section                                \
99 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
100 d03d860b blueswir1
#else
101 d03d860b blueswir1
#define code_gen_section                                \
102 d03d860b blueswir1
    __attribute__((aligned (32)))
103 d03d860b blueswir1
#endif
104 d03d860b blueswir1
105 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
106 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
107 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
108 26a5f13b bellard
/* threshold to flush the translated code buffer */
109 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
110 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
111 fd6ce8f6 bellard
112 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
113 9fa3e853 bellard
int phys_ram_fd;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
117 e2eef170 pbrook
#endif
118 9fa3e853 bellard
119 6a00d601 bellard
CPUState *first_cpu;
120 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
121 6a00d601 bellard
   cpu_exec() */
122 5fafdf24 ths
CPUState *cpu_single_env;
123 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
124 bf20dc07 ths
   1 = Precise instruction counting.
125 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
126 2e70f6ef pbrook
int use_icount = 0;
127 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
128 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
129 2e70f6ef pbrook
int64_t qemu_icount;
130 6a00d601 bellard
131 54936004 bellard
typedef struct PageDesc {
132 92e873b9 bellard
    /* list of TBs intersecting this ram page */
133 fd6ce8f6 bellard
    TranslationBlock *first_tb;
134 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
135 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
136 9fa3e853 bellard
    unsigned int code_write_count;
137 9fa3e853 bellard
    uint8_t *code_bitmap;
138 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
139 9fa3e853 bellard
    unsigned long flags;
140 9fa3e853 bellard
#endif
141 54936004 bellard
} PageDesc;
142 54936004 bellard
143 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
144 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
145 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
146 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
148 41c1b1c9 Paul Brook
#else
149 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
150 41c1b1c9 Paul Brook
#endif
151 bedb69ea j_mayer
#else
152 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
153 bedb69ea j_mayer
#endif
154 54936004 bellard
155 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
156 5cd2c5b6 Richard Henderson
#define L2_BITS 10
157 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
158 54936004 bellard
159 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
161 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
163 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
166 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
167 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
168 5cd2c5b6 Richard Henderson
#else
169 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
170 5cd2c5b6 Richard Henderson
#endif
171 5cd2c5b6 Richard Henderson
172 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
173 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
174 5cd2c5b6 Richard Henderson
#else
175 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
176 5cd2c5b6 Richard Henderson
#endif
177 5cd2c5b6 Richard Henderson
178 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
179 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
180 5cd2c5b6 Richard Henderson
181 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183 5cd2c5b6 Richard Henderson
184 83fb7adf bellard
unsigned long qemu_real_host_page_size;
185 83fb7adf bellard
unsigned long qemu_host_page_bits;
186 83fb7adf bellard
unsigned long qemu_host_page_size;
187 83fb7adf bellard
unsigned long qemu_host_page_mask;
188 54936004 bellard
189 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
190 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
191 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
192 54936004 bellard
193 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
194 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
195 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
196 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
197 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
198 41c1b1c9 Paul Brook
} PhysPageDesc;
199 41c1b1c9 Paul Brook
200 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
201 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
202 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
203 6d9a1304 Paul Brook
204 e2eef170 pbrook
static void io_mem_init(void);
205 e2eef170 pbrook
206 33417e70 bellard
/* io memory support */
207 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
208 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
209 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
210 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
211 6658ffb8 pbrook
static int io_mem_watch;
212 6658ffb8 pbrook
#endif
213 33417e70 bellard
214 34865134 bellard
/* log support */
215 1e8b27ca Juha Riihimรคki
#ifdef WIN32
216 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
217 1e8b27ca Juha Riihimรคki
#else
218 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
219 1e8b27ca Juha Riihimรคki
#endif
220 34865134 bellard
FILE *logfile;
221 34865134 bellard
int loglevel;
222 e735b91c pbrook
static int log_append = 0;
223 34865134 bellard
224 e3db7226 bellard
/* statistics */
225 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
226 e3db7226 bellard
static int tlb_flush_count;
227 b3755a91 Paul Brook
#endif
228 e3db7226 bellard
static int tb_flush_count;
229 e3db7226 bellard
static int tb_phys_invalidate_count;
230 e3db7226 bellard
231 7cb69cae bellard
#ifdef _WIN32
232 7cb69cae bellard
static void map_exec(void *addr, long size)
233 7cb69cae bellard
{
234 7cb69cae bellard
    DWORD old_protect;
235 7cb69cae bellard
    VirtualProtect(addr, size,
236 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
237 7cb69cae bellard
    
238 7cb69cae bellard
}
239 7cb69cae bellard
#else
240 7cb69cae bellard
static void map_exec(void *addr, long size)
241 7cb69cae bellard
{
242 4369415f bellard
    unsigned long start, end, page_size;
243 7cb69cae bellard
    
244 4369415f bellard
    page_size = getpagesize();
245 7cb69cae bellard
    start = (unsigned long)addr;
246 4369415f bellard
    start &= ~(page_size - 1);
247 7cb69cae bellard
    
248 7cb69cae bellard
    end = (unsigned long)addr + size;
249 4369415f bellard
    end += page_size - 1;
250 4369415f bellard
    end &= ~(page_size - 1);
251 7cb69cae bellard
    
252 7cb69cae bellard
    mprotect((void *)start, end - start,
253 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
254 7cb69cae bellard
}
255 7cb69cae bellard
#endif
256 7cb69cae bellard
257 b346ff46 bellard
static void page_init(void)
258 54936004 bellard
{
259 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
260 54936004 bellard
       TARGET_PAGE_SIZE */
261 c2b48b69 aliguori
#ifdef _WIN32
262 c2b48b69 aliguori
    {
263 c2b48b69 aliguori
        SYSTEM_INFO system_info;
264 c2b48b69 aliguori
265 c2b48b69 aliguori
        GetSystemInfo(&system_info);
266 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
267 c2b48b69 aliguori
    }
268 c2b48b69 aliguori
#else
269 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
270 c2b48b69 aliguori
#endif
271 83fb7adf bellard
    if (qemu_host_page_size == 0)
272 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
273 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
274 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
275 83fb7adf bellard
    qemu_host_page_bits = 0;
276 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
277 83fb7adf bellard
        qemu_host_page_bits++;
278 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
279 50a9569b balrog
280 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 50a9569b balrog
    {
282 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
283 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
284 f01576f1 Juergen Lock
        int i, cnt;
285 f01576f1 Juergen Lock
286 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
287 f01576f1 Juergen Lock
        if (freep) {
288 f01576f1 Juergen Lock
            mmap_lock();
289 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
290 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
291 f01576f1 Juergen Lock
292 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
293 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
294 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
295 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296 f01576f1 Juergen Lock
297 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
298 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
299 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 f01576f1 Juergen Lock
                    } else {
301 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 f01576f1 Juergen Lock
                        endaddr = ~0ul;
303 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 f01576f1 Juergen Lock
#endif
305 f01576f1 Juergen Lock
                    }
306 f01576f1 Juergen Lock
                }
307 f01576f1 Juergen Lock
            }
308 f01576f1 Juergen Lock
            free(freep);
309 f01576f1 Juergen Lock
            mmap_unlock();
310 f01576f1 Juergen Lock
        }
311 f01576f1 Juergen Lock
#else
312 50a9569b balrog
        FILE *f;
313 50a9569b balrog
314 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
315 5cd2c5b6 Richard Henderson
316 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
317 50a9569b balrog
        if (f) {
318 5cd2c5b6 Richard Henderson
            mmap_lock();
319 5cd2c5b6 Richard Henderson
320 50a9569b balrog
            do {
321 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
322 5cd2c5b6 Richard Henderson
                int n;
323 5cd2c5b6 Richard Henderson
324 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325 5cd2c5b6 Richard Henderson
326 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
327 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 5cd2c5b6 Richard Henderson
329 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
330 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
331 5cd2c5b6 Richard Henderson
                    } else {
332 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
333 5cd2c5b6 Richard Henderson
                    }
334 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 50a9569b balrog
                }
336 50a9569b balrog
            } while (!feof(f));
337 5cd2c5b6 Richard Henderson
338 50a9569b balrog
            fclose(f);
339 5cd2c5b6 Richard Henderson
            mmap_unlock();
340 50a9569b balrog
        }
341 f01576f1 Juergen Lock
#endif
342 50a9569b balrog
    }
343 50a9569b balrog
#endif
344 54936004 bellard
}
345 54936004 bellard
346 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 54936004 bellard
{
348 41c1b1c9 Paul Brook
    PageDesc *pd;
349 41c1b1c9 Paul Brook
    void **lp;
350 41c1b1c9 Paul Brook
    int i;
351 41c1b1c9 Paul Brook
352 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
353 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
354 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
355 5cd2c5b6 Richard Henderson
    do {                                                \
356 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
357 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
358 5cd2c5b6 Richard Henderson
    } while (0)
359 5cd2c5b6 Richard Henderson
#else
360 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
361 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
362 17e2377a pbrook
#endif
363 434929bf aliguori
364 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
365 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366 5cd2c5b6 Richard Henderson
367 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
368 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 5cd2c5b6 Richard Henderson
        void **p = *lp;
370 5cd2c5b6 Richard Henderson
371 5cd2c5b6 Richard Henderson
        if (p == NULL) {
372 5cd2c5b6 Richard Henderson
            if (!alloc) {
373 5cd2c5b6 Richard Henderson
                return NULL;
374 5cd2c5b6 Richard Henderson
            }
375 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
376 5cd2c5b6 Richard Henderson
            *lp = p;
377 17e2377a pbrook
        }
378 5cd2c5b6 Richard Henderson
379 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
380 5cd2c5b6 Richard Henderson
    }
381 5cd2c5b6 Richard Henderson
382 5cd2c5b6 Richard Henderson
    pd = *lp;
383 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
384 5cd2c5b6 Richard Henderson
        if (!alloc) {
385 5cd2c5b6 Richard Henderson
            return NULL;
386 5cd2c5b6 Richard Henderson
        }
387 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 5cd2c5b6 Richard Henderson
        *lp = pd;
389 54936004 bellard
    }
390 5cd2c5b6 Richard Henderson
391 5cd2c5b6 Richard Henderson
#undef ALLOC
392 5cd2c5b6 Richard Henderson
393 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
394 54936004 bellard
}
395 54936004 bellard
396 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
397 54936004 bellard
{
398 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
399 fd6ce8f6 bellard
}
400 fd6ce8f6 bellard
401 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
402 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
403 92e873b9 bellard
{
404 e3f4e2a4 pbrook
    PhysPageDesc *pd;
405 5cd2c5b6 Richard Henderson
    void **lp;
406 5cd2c5b6 Richard Henderson
    int i;
407 92e873b9 bellard
408 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
409 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
410 108c49b8 bellard
411 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
412 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 5cd2c5b6 Richard Henderson
        void **p = *lp;
414 5cd2c5b6 Richard Henderson
        if (p == NULL) {
415 5cd2c5b6 Richard Henderson
            if (!alloc) {
416 5cd2c5b6 Richard Henderson
                return NULL;
417 5cd2c5b6 Richard Henderson
            }
418 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
419 5cd2c5b6 Richard Henderson
        }
420 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
421 108c49b8 bellard
    }
422 5cd2c5b6 Richard Henderson
423 e3f4e2a4 pbrook
    pd = *lp;
424 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
425 e3f4e2a4 pbrook
        int i;
426 5cd2c5b6 Richard Henderson
427 5cd2c5b6 Richard Henderson
        if (!alloc) {
428 108c49b8 bellard
            return NULL;
429 5cd2c5b6 Richard Henderson
        }
430 5cd2c5b6 Richard Henderson
431 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432 5cd2c5b6 Richard Henderson
433 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
434 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
435 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
436 67c4d23c pbrook
        }
437 92e873b9 bellard
    }
438 5cd2c5b6 Richard Henderson
439 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
440 92e873b9 bellard
}
441 92e873b9 bellard
442 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
443 92e873b9 bellard
{
444 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
445 92e873b9 bellard
}
446 92e873b9 bellard
447 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
448 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
449 3a7d929e bellard
                                    target_ulong vaddr);
450 c8a706fe pbrook
#define mmap_lock() do { } while(0)
451 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
452 9fa3e853 bellard
#endif
453 fd6ce8f6 bellard
454 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 4369415f bellard
456 4369415f bellard
#if defined(CONFIG_USER_ONLY)
457 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
458 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
459 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
460 4369415f bellard
#endif
461 4369415f bellard
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
465 4369415f bellard
#endif
466 4369415f bellard
467 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
468 26a5f13b bellard
{
469 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
470 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
471 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
473 4369415f bellard
#else
474 26a5f13b bellard
    code_gen_buffer_size = tb_size;
475 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
476 4369415f bellard
#if defined(CONFIG_USER_ONLY)
477 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
478 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
479 4369415f bellard
#else
480 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
481 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
482 4369415f bellard
#endif
483 26a5f13b bellard
    }
484 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
485 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
486 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
487 26a5f13b bellard
       the host cpu and OS */
488 26a5f13b bellard
#if defined(__linux__) 
489 26a5f13b bellard
    {
490 26a5f13b bellard
        int flags;
491 141ac468 blueswir1
        void *start = NULL;
492 141ac468 blueswir1
493 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
494 26a5f13b bellard
#if defined(__x86_64__)
495 26a5f13b bellard
        flags |= MAP_32BIT;
496 26a5f13b bellard
        /* Cannot map more than that */
497 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
498 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
499 141ac468 blueswir1
#elif defined(__sparc_v9__)
500 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
501 141ac468 blueswir1
        flags |= MAP_FIXED;
502 141ac468 blueswir1
        start = (void *) 0x60000000UL;
503 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
504 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
505 1cb0661e balrog
#elif defined(__arm__)
506 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
507 1cb0661e balrog
        flags |= MAP_FIXED;
508 1cb0661e balrog
        start = (void *) 0x01000000UL;
509 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
510 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
511 eba0b893 Richard Henderson
#elif defined(__s390x__)
512 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
513 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
514 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
515 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
516 eba0b893 Richard Henderson
        }
517 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
518 26a5f13b bellard
#endif
519 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
520 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
521 26a5f13b bellard
                               flags, -1, 0);
522 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
523 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
524 26a5f13b bellard
            exit(1);
525 26a5f13b bellard
        }
526 26a5f13b bellard
    }
527 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
528 06e67a82 aliguori
    {
529 06e67a82 aliguori
        int flags;
530 06e67a82 aliguori
        void *addr = NULL;
531 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
532 06e67a82 aliguori
#if defined(__x86_64__)
533 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
534 06e67a82 aliguori
         * 0x40000000 is free */
535 06e67a82 aliguori
        flags |= MAP_FIXED;
536 06e67a82 aliguori
        addr = (void *)0x40000000;
537 06e67a82 aliguori
        /* Cannot map more than that */
538 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
539 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
540 06e67a82 aliguori
#endif
541 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
542 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
543 06e67a82 aliguori
                               flags, -1, 0);
544 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
545 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
546 06e67a82 aliguori
            exit(1);
547 06e67a82 aliguori
        }
548 06e67a82 aliguori
    }
549 26a5f13b bellard
#else
550 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
551 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
552 26a5f13b bellard
#endif
553 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
554 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
555 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
556 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
557 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
558 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
559 26a5f13b bellard
}
560 26a5f13b bellard
561 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
562 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
563 26a5f13b bellard
   size. */
564 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
565 26a5f13b bellard
{
566 26a5f13b bellard
    cpu_gen_init();
567 26a5f13b bellard
    code_gen_alloc(tb_size);
568 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
569 4369415f bellard
    page_init();
570 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
571 26a5f13b bellard
    io_mem_init();
572 e2eef170 pbrook
#endif
573 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
575 9002ec79 Richard Henderson
       initialize the prologue now.  */
576 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
577 9002ec79 Richard Henderson
#endif
578 26a5f13b bellard
}
579 26a5f13b bellard
580 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
581 9656f324 pbrook
582 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
583 e7f4eff7 Juan Quintela
{
584 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
585 9656f324 pbrook
586 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
587 3098dba0 aurel32
       version_id is increased. */
588 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
589 9656f324 pbrook
    tlb_flush(env, 1);
590 9656f324 pbrook
591 9656f324 pbrook
    return 0;
592 9656f324 pbrook
}
593 e7f4eff7 Juan Quintela
594 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
595 e7f4eff7 Juan Quintela
    .name = "cpu_common",
596 e7f4eff7 Juan Quintela
    .version_id = 1,
597 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
598 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
599 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
600 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
601 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
602 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
603 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
604 e7f4eff7 Juan Quintela
    }
605 e7f4eff7 Juan Quintela
};
606 9656f324 pbrook
#endif
607 9656f324 pbrook
608 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
609 950f1472 Glauber Costa
{
610 950f1472 Glauber Costa
    CPUState *env = first_cpu;
611 950f1472 Glauber Costa
612 950f1472 Glauber Costa
    while (env) {
613 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
614 950f1472 Glauber Costa
            break;
615 950f1472 Glauber Costa
        env = env->next_cpu;
616 950f1472 Glauber Costa
    }
617 950f1472 Glauber Costa
618 950f1472 Glauber Costa
    return env;
619 950f1472 Glauber Costa
}
620 950f1472 Glauber Costa
621 6a00d601 bellard
void cpu_exec_init(CPUState *env)
622 fd6ce8f6 bellard
{
623 6a00d601 bellard
    CPUState **penv;
624 6a00d601 bellard
    int cpu_index;
625 6a00d601 bellard
626 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
627 c2764719 pbrook
    cpu_list_lock();
628 c2764719 pbrook
#endif
629 6a00d601 bellard
    env->next_cpu = NULL;
630 6a00d601 bellard
    penv = &first_cpu;
631 6a00d601 bellard
    cpu_index = 0;
632 6a00d601 bellard
    while (*penv != NULL) {
633 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
634 6a00d601 bellard
        cpu_index++;
635 6a00d601 bellard
    }
636 6a00d601 bellard
    env->cpu_index = cpu_index;
637 268a362c aliguori
    env->numa_node = 0;
638 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
639 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
640 6a00d601 bellard
    *penv = env;
641 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
642 c2764719 pbrook
    cpu_list_unlock();
643 c2764719 pbrook
#endif
644 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
646 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
647 b3c7724c pbrook
                    cpu_save, cpu_load, env);
648 b3c7724c pbrook
#endif
649 fd6ce8f6 bellard
}
650 fd6ce8f6 bellard
651 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
652 9fa3e853 bellard
{
653 9fa3e853 bellard
    if (p->code_bitmap) {
654 59817ccb bellard
        qemu_free(p->code_bitmap);
655 9fa3e853 bellard
        p->code_bitmap = NULL;
656 9fa3e853 bellard
    }
657 9fa3e853 bellard
    p->code_write_count = 0;
658 9fa3e853 bellard
}
659 9fa3e853 bellard
660 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
661 5cd2c5b6 Richard Henderson
662 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
663 fd6ce8f6 bellard
{
664 5cd2c5b6 Richard Henderson
    int i;
665 fd6ce8f6 bellard
666 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
667 5cd2c5b6 Richard Henderson
        return;
668 5cd2c5b6 Richard Henderson
    }
669 5cd2c5b6 Richard Henderson
    if (level == 0) {
670 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
671 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
672 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
673 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
674 fd6ce8f6 bellard
        }
675 5cd2c5b6 Richard Henderson
    } else {
676 5cd2c5b6 Richard Henderson
        void **pp = *lp;
677 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
678 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
679 5cd2c5b6 Richard Henderson
        }
680 5cd2c5b6 Richard Henderson
    }
681 5cd2c5b6 Richard Henderson
}
682 5cd2c5b6 Richard Henderson
683 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
684 5cd2c5b6 Richard Henderson
{
685 5cd2c5b6 Richard Henderson
    int i;
686 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
687 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
688 fd6ce8f6 bellard
    }
689 fd6ce8f6 bellard
}
690 fd6ce8f6 bellard
691 fd6ce8f6 bellard
/* flush all the translation blocks */
692 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
693 6a00d601 bellard
void tb_flush(CPUState *env1)
694 fd6ce8f6 bellard
{
695 6a00d601 bellard
    CPUState *env;
696 0124311e bellard
#if defined(DEBUG_FLUSH)
697 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
698 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
699 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
700 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
701 fd6ce8f6 bellard
#endif
702 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
703 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
704 a208e54a pbrook
705 fd6ce8f6 bellard
    nb_tbs = 0;
706 3b46e624 ths
707 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
708 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
709 6a00d601 bellard
    }
710 9fa3e853 bellard
711 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
712 fd6ce8f6 bellard
    page_flush_tb();
713 9fa3e853 bellard
714 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
715 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
716 d4e8164f bellard
       expensive */
717 e3db7226 bellard
    tb_flush_count++;
718 fd6ce8f6 bellard
}
719 fd6ce8f6 bellard
720 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
721 fd6ce8f6 bellard
722 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
723 fd6ce8f6 bellard
{
724 fd6ce8f6 bellard
    TranslationBlock *tb;
725 fd6ce8f6 bellard
    int i;
726 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
727 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
728 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
729 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
730 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
731 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
732 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
733 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
734 fd6ce8f6 bellard
            }
735 fd6ce8f6 bellard
        }
736 fd6ce8f6 bellard
    }
737 fd6ce8f6 bellard
}
738 fd6ce8f6 bellard
739 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
740 fd6ce8f6 bellard
static void tb_page_check(void)
741 fd6ce8f6 bellard
{
742 fd6ce8f6 bellard
    TranslationBlock *tb;
743 fd6ce8f6 bellard
    int i, flags1, flags2;
744 3b46e624 ths
745 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
746 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
747 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
748 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
749 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
750 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
751 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
752 fd6ce8f6 bellard
            }
753 fd6ce8f6 bellard
        }
754 fd6ce8f6 bellard
    }
755 fd6ce8f6 bellard
}
756 fd6ce8f6 bellard
757 fd6ce8f6 bellard
#endif
758 fd6ce8f6 bellard
759 fd6ce8f6 bellard
/* invalidate one TB */
760 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
761 fd6ce8f6 bellard
                             int next_offset)
762 fd6ce8f6 bellard
{
763 fd6ce8f6 bellard
    TranslationBlock *tb1;
764 fd6ce8f6 bellard
    for(;;) {
765 fd6ce8f6 bellard
        tb1 = *ptb;
766 fd6ce8f6 bellard
        if (tb1 == tb) {
767 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
768 fd6ce8f6 bellard
            break;
769 fd6ce8f6 bellard
        }
770 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
771 fd6ce8f6 bellard
    }
772 fd6ce8f6 bellard
}
773 fd6ce8f6 bellard
774 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
775 9fa3e853 bellard
{
776 9fa3e853 bellard
    TranslationBlock *tb1;
777 9fa3e853 bellard
    unsigned int n1;
778 9fa3e853 bellard
779 9fa3e853 bellard
    for(;;) {
780 9fa3e853 bellard
        tb1 = *ptb;
781 9fa3e853 bellard
        n1 = (long)tb1 & 3;
782 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 9fa3e853 bellard
        if (tb1 == tb) {
784 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
785 9fa3e853 bellard
            break;
786 9fa3e853 bellard
        }
787 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
788 9fa3e853 bellard
    }
789 9fa3e853 bellard
}
790 9fa3e853 bellard
791 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
792 d4e8164f bellard
{
793 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
794 d4e8164f bellard
    unsigned int n1;
795 d4e8164f bellard
796 d4e8164f bellard
    ptb = &tb->jmp_next[n];
797 d4e8164f bellard
    tb1 = *ptb;
798 d4e8164f bellard
    if (tb1) {
799 d4e8164f bellard
        /* find tb(n) in circular list */
800 d4e8164f bellard
        for(;;) {
801 d4e8164f bellard
            tb1 = *ptb;
802 d4e8164f bellard
            n1 = (long)tb1 & 3;
803 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
804 d4e8164f bellard
            if (n1 == n && tb1 == tb)
805 d4e8164f bellard
                break;
806 d4e8164f bellard
            if (n1 == 2) {
807 d4e8164f bellard
                ptb = &tb1->jmp_first;
808 d4e8164f bellard
            } else {
809 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
810 d4e8164f bellard
            }
811 d4e8164f bellard
        }
812 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
813 d4e8164f bellard
        *ptb = tb->jmp_next[n];
814 d4e8164f bellard
815 d4e8164f bellard
        tb->jmp_next[n] = NULL;
816 d4e8164f bellard
    }
817 d4e8164f bellard
}
818 d4e8164f bellard
819 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
820 d4e8164f bellard
   another TB */
821 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
822 d4e8164f bellard
{
823 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
824 d4e8164f bellard
}
825 d4e8164f bellard
826 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
827 fd6ce8f6 bellard
{
828 6a00d601 bellard
    CPUState *env;
829 8a40a180 bellard
    PageDesc *p;
830 d4e8164f bellard
    unsigned int h, n1;
831 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
832 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
833 3b46e624 ths
834 8a40a180 bellard
    /* remove the TB from the hash list */
835 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
836 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
837 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
838 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
839 8a40a180 bellard
840 8a40a180 bellard
    /* remove the TB from the page list */
841 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
842 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
843 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
844 8a40a180 bellard
        invalidate_page_bitmap(p);
845 8a40a180 bellard
    }
846 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
847 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
848 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
849 8a40a180 bellard
        invalidate_page_bitmap(p);
850 8a40a180 bellard
    }
851 8a40a180 bellard
852 36bdbe54 bellard
    tb_invalidated_flag = 1;
853 59817ccb bellard
854 fd6ce8f6 bellard
    /* remove the TB from the hash list */
855 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
856 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
857 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
858 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
859 6a00d601 bellard
    }
860 d4e8164f bellard
861 d4e8164f bellard
    /* suppress this TB from the two jump lists */
862 d4e8164f bellard
    tb_jmp_remove(tb, 0);
863 d4e8164f bellard
    tb_jmp_remove(tb, 1);
864 d4e8164f bellard
865 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
866 d4e8164f bellard
    tb1 = tb->jmp_first;
867 d4e8164f bellard
    for(;;) {
868 d4e8164f bellard
        n1 = (long)tb1 & 3;
869 d4e8164f bellard
        if (n1 == 2)
870 d4e8164f bellard
            break;
871 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
872 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
873 d4e8164f bellard
        tb_reset_jump(tb1, n1);
874 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
875 d4e8164f bellard
        tb1 = tb2;
876 d4e8164f bellard
    }
877 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
878 9fa3e853 bellard
879 e3db7226 bellard
    tb_phys_invalidate_count++;
880 9fa3e853 bellard
}
881 9fa3e853 bellard
882 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
883 9fa3e853 bellard
{
884 9fa3e853 bellard
    int end, mask, end1;
885 9fa3e853 bellard
886 9fa3e853 bellard
    end = start + len;
887 9fa3e853 bellard
    tab += start >> 3;
888 9fa3e853 bellard
    mask = 0xff << (start & 7);
889 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
890 9fa3e853 bellard
        if (start < end) {
891 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
892 9fa3e853 bellard
            *tab |= mask;
893 9fa3e853 bellard
        }
894 9fa3e853 bellard
    } else {
895 9fa3e853 bellard
        *tab++ |= mask;
896 9fa3e853 bellard
        start = (start + 8) & ~7;
897 9fa3e853 bellard
        end1 = end & ~7;
898 9fa3e853 bellard
        while (start < end1) {
899 9fa3e853 bellard
            *tab++ = 0xff;
900 9fa3e853 bellard
            start += 8;
901 9fa3e853 bellard
        }
902 9fa3e853 bellard
        if (start < end) {
903 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
904 9fa3e853 bellard
            *tab |= mask;
905 9fa3e853 bellard
        }
906 9fa3e853 bellard
    }
907 9fa3e853 bellard
}
908 9fa3e853 bellard
909 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
910 9fa3e853 bellard
{
911 9fa3e853 bellard
    int n, tb_start, tb_end;
912 9fa3e853 bellard
    TranslationBlock *tb;
913 3b46e624 ths
914 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
915 9fa3e853 bellard
916 9fa3e853 bellard
    tb = p->first_tb;
917 9fa3e853 bellard
    while (tb != NULL) {
918 9fa3e853 bellard
        n = (long)tb & 3;
919 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
920 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
921 9fa3e853 bellard
        if (n == 0) {
922 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
923 9fa3e853 bellard
               it is not a problem */
924 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
925 9fa3e853 bellard
            tb_end = tb_start + tb->size;
926 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
927 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
928 9fa3e853 bellard
        } else {
929 9fa3e853 bellard
            tb_start = 0;
930 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
931 9fa3e853 bellard
        }
932 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
933 9fa3e853 bellard
        tb = tb->page_next[n];
934 9fa3e853 bellard
    }
935 9fa3e853 bellard
}
936 9fa3e853 bellard
937 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
938 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
939 2e70f6ef pbrook
                              int flags, int cflags)
940 d720b93d bellard
{
941 d720b93d bellard
    TranslationBlock *tb;
942 d720b93d bellard
    uint8_t *tc_ptr;
943 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
944 41c1b1c9 Paul Brook
    target_ulong virt_page2;
945 d720b93d bellard
    int code_gen_size;
946 d720b93d bellard
947 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
948 c27004ec bellard
    tb = tb_alloc(pc);
949 d720b93d bellard
    if (!tb) {
950 d720b93d bellard
        /* flush must be done */
951 d720b93d bellard
        tb_flush(env);
952 d720b93d bellard
        /* cannot fail at this point */
953 c27004ec bellard
        tb = tb_alloc(pc);
954 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
955 2e70f6ef pbrook
        tb_invalidated_flag = 1;
956 d720b93d bellard
    }
957 d720b93d bellard
    tc_ptr = code_gen_ptr;
958 d720b93d bellard
    tb->tc_ptr = tc_ptr;
959 d720b93d bellard
    tb->cs_base = cs_base;
960 d720b93d bellard
    tb->flags = flags;
961 d720b93d bellard
    tb->cflags = cflags;
962 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
963 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
964 3b46e624 ths
965 d720b93d bellard
    /* check next page if needed */
966 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
967 d720b93d bellard
    phys_page2 = -1;
968 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
969 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
970 d720b93d bellard
    }
971 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
972 2e70f6ef pbrook
    return tb;
973 d720b93d bellard
}
974 3b46e624 ths
975 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
976 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
977 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
978 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
979 d720b93d bellard
   TB if code is modified inside this TB. */
980 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
981 d720b93d bellard
                                   int is_cpu_write_access)
982 d720b93d bellard
{
983 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
984 d720b93d bellard
    CPUState *env = cpu_single_env;
985 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
986 6b917547 aliguori
    PageDesc *p;
987 6b917547 aliguori
    int n;
988 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
989 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
990 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
991 6b917547 aliguori
    int current_tb_modified = 0;
992 6b917547 aliguori
    target_ulong current_pc = 0;
993 6b917547 aliguori
    target_ulong current_cs_base = 0;
994 6b917547 aliguori
    int current_flags = 0;
995 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
996 9fa3e853 bellard
997 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
998 5fafdf24 ths
    if (!p)
999 9fa3e853 bellard
        return;
1000 5fafdf24 ths
    if (!p->code_bitmap &&
1001 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1002 d720b93d bellard
        is_cpu_write_access) {
1003 9fa3e853 bellard
        /* build code bitmap */
1004 9fa3e853 bellard
        build_page_bitmap(p);
1005 9fa3e853 bellard
    }
1006 9fa3e853 bellard
1007 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1008 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1009 9fa3e853 bellard
    tb = p->first_tb;
1010 9fa3e853 bellard
    while (tb != NULL) {
1011 9fa3e853 bellard
        n = (long)tb & 3;
1012 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1013 9fa3e853 bellard
        tb_next = tb->page_next[n];
1014 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1015 9fa3e853 bellard
        if (n == 0) {
1016 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1017 9fa3e853 bellard
               it is not a problem */
1018 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1019 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1020 9fa3e853 bellard
        } else {
1021 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1022 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 9fa3e853 bellard
        }
1024 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1025 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1026 d720b93d bellard
            if (current_tb_not_found) {
1027 d720b93d bellard
                current_tb_not_found = 0;
1028 d720b93d bellard
                current_tb = NULL;
1029 2e70f6ef pbrook
                if (env->mem_io_pc) {
1030 d720b93d bellard
                    /* now we have a real cpu fault */
1031 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1032 d720b93d bellard
                }
1033 d720b93d bellard
            }
1034 d720b93d bellard
            if (current_tb == tb &&
1035 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1036 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1037 d720b93d bellard
                its execution. We could be more precise by checking
1038 d720b93d bellard
                that the modification is after the current PC, but it
1039 d720b93d bellard
                would require a specialized function to partially
1040 d720b93d bellard
                restore the CPU state */
1041 3b46e624 ths
1042 d720b93d bellard
                current_tb_modified = 1;
1043 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1044 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1045 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1046 6b917547 aliguori
                                     &current_flags);
1047 d720b93d bellard
            }
1048 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1049 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1050 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1051 6f5a9f7e bellard
            saved_tb = NULL;
1052 6f5a9f7e bellard
            if (env) {
1053 6f5a9f7e bellard
                saved_tb = env->current_tb;
1054 6f5a9f7e bellard
                env->current_tb = NULL;
1055 6f5a9f7e bellard
            }
1056 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1057 6f5a9f7e bellard
            if (env) {
1058 6f5a9f7e bellard
                env->current_tb = saved_tb;
1059 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1060 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1061 6f5a9f7e bellard
            }
1062 9fa3e853 bellard
        }
1063 9fa3e853 bellard
        tb = tb_next;
1064 9fa3e853 bellard
    }
1065 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1066 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1067 9fa3e853 bellard
    if (!p->first_tb) {
1068 9fa3e853 bellard
        invalidate_page_bitmap(p);
1069 d720b93d bellard
        if (is_cpu_write_access) {
1070 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1071 d720b93d bellard
        }
1072 d720b93d bellard
    }
1073 d720b93d bellard
#endif
1074 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1075 d720b93d bellard
    if (current_tb_modified) {
1076 d720b93d bellard
        /* we generate a block containing just the instruction
1077 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1078 d720b93d bellard
           itself */
1079 ea1c1802 bellard
        env->current_tb = NULL;
1080 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1081 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1082 9fa3e853 bellard
    }
1083 fd6ce8f6 bellard
#endif
1084 9fa3e853 bellard
}
1085 fd6ce8f6 bellard
1086 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1087 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1088 9fa3e853 bellard
{
1089 9fa3e853 bellard
    PageDesc *p;
1090 9fa3e853 bellard
    int offset, b;
1091 59817ccb bellard
#if 0
1092 a4193c8a bellard
    if (1) {
1093 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1094 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1095 93fcfe39 aliguori
                  cpu_single_env->eip,
1096 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1097 59817ccb bellard
    }
1098 59817ccb bellard
#endif
1099 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1100 5fafdf24 ths
    if (!p)
1101 9fa3e853 bellard
        return;
1102 9fa3e853 bellard
    if (p->code_bitmap) {
1103 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1104 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1105 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1106 9fa3e853 bellard
            goto do_invalidate;
1107 9fa3e853 bellard
    } else {
1108 9fa3e853 bellard
    do_invalidate:
1109 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1110 9fa3e853 bellard
    }
1111 9fa3e853 bellard
}
1112 9fa3e853 bellard
1113 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1114 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1115 d720b93d bellard
                                    unsigned long pc, void *puc)
1116 9fa3e853 bellard
{
1117 6b917547 aliguori
    TranslationBlock *tb;
1118 9fa3e853 bellard
    PageDesc *p;
1119 6b917547 aliguori
    int n;
1120 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1121 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1122 d720b93d bellard
    CPUState *env = cpu_single_env;
1123 6b917547 aliguori
    int current_tb_modified = 0;
1124 6b917547 aliguori
    target_ulong current_pc = 0;
1125 6b917547 aliguori
    target_ulong current_cs_base = 0;
1126 6b917547 aliguori
    int current_flags = 0;
1127 d720b93d bellard
#endif
1128 9fa3e853 bellard
1129 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1130 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1131 5fafdf24 ths
    if (!p)
1132 9fa3e853 bellard
        return;
1133 9fa3e853 bellard
    tb = p->first_tb;
1134 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1135 d720b93d bellard
    if (tb && pc != 0) {
1136 d720b93d bellard
        current_tb = tb_find_pc(pc);
1137 d720b93d bellard
    }
1138 d720b93d bellard
#endif
1139 9fa3e853 bellard
    while (tb != NULL) {
1140 9fa3e853 bellard
        n = (long)tb & 3;
1141 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1142 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1143 d720b93d bellard
        if (current_tb == tb &&
1144 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1145 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1146 d720b93d bellard
                   its execution. We could be more precise by checking
1147 d720b93d bellard
                   that the modification is after the current PC, but it
1148 d720b93d bellard
                   would require a specialized function to partially
1149 d720b93d bellard
                   restore the CPU state */
1150 3b46e624 ths
1151 d720b93d bellard
            current_tb_modified = 1;
1152 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1153 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 6b917547 aliguori
                                 &current_flags);
1155 d720b93d bellard
        }
1156 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1157 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1158 9fa3e853 bellard
        tb = tb->page_next[n];
1159 9fa3e853 bellard
    }
1160 fd6ce8f6 bellard
    p->first_tb = NULL;
1161 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1162 d720b93d bellard
    if (current_tb_modified) {
1163 d720b93d bellard
        /* we generate a block containing just the instruction
1164 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1165 d720b93d bellard
           itself */
1166 ea1c1802 bellard
        env->current_tb = NULL;
1167 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1168 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1169 d720b93d bellard
    }
1170 d720b93d bellard
#endif
1171 fd6ce8f6 bellard
}
1172 9fa3e853 bellard
#endif
1173 fd6ce8f6 bellard
1174 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1175 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1176 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1177 fd6ce8f6 bellard
{
1178 fd6ce8f6 bellard
    PageDesc *p;
1179 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1180 9fa3e853 bellard
1181 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1182 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1183 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1184 9fa3e853 bellard
    last_first_tb = p->first_tb;
1185 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1186 9fa3e853 bellard
    invalidate_page_bitmap(p);
1187 fd6ce8f6 bellard
1188 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1189 d720b93d bellard
1190 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1191 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1192 53a5960a pbrook
        target_ulong addr;
1193 53a5960a pbrook
        PageDesc *p2;
1194 9fa3e853 bellard
        int prot;
1195 9fa3e853 bellard
1196 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1197 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1198 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1199 fd6ce8f6 bellard
        prot = 0;
1200 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1201 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1202 53a5960a pbrook
1203 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1204 53a5960a pbrook
            if (!p2)
1205 53a5960a pbrook
                continue;
1206 53a5960a pbrook
            prot |= p2->flags;
1207 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1208 53a5960a pbrook
          }
1209 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1210 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1211 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1212 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1213 53a5960a pbrook
               page_addr);
1214 fd6ce8f6 bellard
#endif
1215 fd6ce8f6 bellard
    }
1216 9fa3e853 bellard
#else
1217 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1218 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1219 9fa3e853 bellard
       allocated in a physical page */
1220 9fa3e853 bellard
    if (!last_first_tb) {
1221 6a00d601 bellard
        tlb_protect_code(page_addr);
1222 9fa3e853 bellard
    }
1223 9fa3e853 bellard
#endif
1224 d720b93d bellard
1225 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1226 fd6ce8f6 bellard
}
1227 fd6ce8f6 bellard
1228 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1229 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1230 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1231 fd6ce8f6 bellard
{
1232 fd6ce8f6 bellard
    TranslationBlock *tb;
1233 fd6ce8f6 bellard
1234 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1235 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1236 d4e8164f bellard
        return NULL;
1237 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1238 fd6ce8f6 bellard
    tb->pc = pc;
1239 b448f2f3 bellard
    tb->cflags = 0;
1240 d4e8164f bellard
    return tb;
1241 d4e8164f bellard
}
1242 d4e8164f bellard
1243 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1244 2e70f6ef pbrook
{
1245 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1246 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1247 2e70f6ef pbrook
       be the last one generated.  */
1248 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1249 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1250 2e70f6ef pbrook
        nb_tbs--;
1251 2e70f6ef pbrook
    }
1252 2e70f6ef pbrook
}
1253 2e70f6ef pbrook
1254 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1255 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1256 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1257 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1258 d4e8164f bellard
{
1259 9fa3e853 bellard
    unsigned int h;
1260 9fa3e853 bellard
    TranslationBlock **ptb;
1261 9fa3e853 bellard
1262 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1263 c8a706fe pbrook
       before we are done.  */
1264 c8a706fe pbrook
    mmap_lock();
1265 9fa3e853 bellard
    /* add in the physical hash table */
1266 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1267 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1268 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1269 9fa3e853 bellard
    *ptb = tb;
1270 fd6ce8f6 bellard
1271 fd6ce8f6 bellard
    /* add in the page list */
1272 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1273 9fa3e853 bellard
    if (phys_page2 != -1)
1274 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1275 9fa3e853 bellard
    else
1276 9fa3e853 bellard
        tb->page_addr[1] = -1;
1277 9fa3e853 bellard
1278 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1279 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1280 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1281 d4e8164f bellard
1282 d4e8164f bellard
    /* init original jump addresses */
1283 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1284 d4e8164f bellard
        tb_reset_jump(tb, 0);
1285 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1286 d4e8164f bellard
        tb_reset_jump(tb, 1);
1287 8a40a180 bellard
1288 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1289 8a40a180 bellard
    tb_page_check();
1290 8a40a180 bellard
#endif
1291 c8a706fe pbrook
    mmap_unlock();
1292 fd6ce8f6 bellard
}
1293 fd6ce8f6 bellard
1294 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1295 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1296 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1297 fd6ce8f6 bellard
{
1298 9fa3e853 bellard
    int m_min, m_max, m;
1299 9fa3e853 bellard
    unsigned long v;
1300 9fa3e853 bellard
    TranslationBlock *tb;
1301 a513fe19 bellard
1302 a513fe19 bellard
    if (nb_tbs <= 0)
1303 a513fe19 bellard
        return NULL;
1304 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1305 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1306 a513fe19 bellard
        return NULL;
1307 a513fe19 bellard
    /* binary search (cf Knuth) */
1308 a513fe19 bellard
    m_min = 0;
1309 a513fe19 bellard
    m_max = nb_tbs - 1;
1310 a513fe19 bellard
    while (m_min <= m_max) {
1311 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1312 a513fe19 bellard
        tb = &tbs[m];
1313 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1314 a513fe19 bellard
        if (v == tc_ptr)
1315 a513fe19 bellard
            return tb;
1316 a513fe19 bellard
        else if (tc_ptr < v) {
1317 a513fe19 bellard
            m_max = m - 1;
1318 a513fe19 bellard
        } else {
1319 a513fe19 bellard
            m_min = m + 1;
1320 a513fe19 bellard
        }
1321 5fafdf24 ths
    }
1322 a513fe19 bellard
    return &tbs[m_max];
1323 a513fe19 bellard
}
1324 7501267e bellard
1325 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1326 ea041c0e bellard
1327 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1328 ea041c0e bellard
{
1329 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1330 ea041c0e bellard
    unsigned int n1;
1331 ea041c0e bellard
1332 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1333 ea041c0e bellard
    if (tb1 != NULL) {
1334 ea041c0e bellard
        /* find head of list */
1335 ea041c0e bellard
        for(;;) {
1336 ea041c0e bellard
            n1 = (long)tb1 & 3;
1337 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1338 ea041c0e bellard
            if (n1 == 2)
1339 ea041c0e bellard
                break;
1340 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1341 ea041c0e bellard
        }
1342 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1343 ea041c0e bellard
        tb_next = tb1;
1344 ea041c0e bellard
1345 ea041c0e bellard
        /* remove tb from the jmp_first list */
1346 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1347 ea041c0e bellard
        for(;;) {
1348 ea041c0e bellard
            tb1 = *ptb;
1349 ea041c0e bellard
            n1 = (long)tb1 & 3;
1350 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1352 ea041c0e bellard
                break;
1353 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1354 ea041c0e bellard
        }
1355 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1356 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1357 3b46e624 ths
1358 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1359 ea041c0e bellard
        tb_reset_jump(tb, n);
1360 ea041c0e bellard
1361 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1362 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1363 ea041c0e bellard
    }
1364 ea041c0e bellard
}
1365 ea041c0e bellard
1366 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1367 ea041c0e bellard
{
1368 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1369 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1370 ea041c0e bellard
}
1371 ea041c0e bellard
1372 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1373 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1374 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1375 94df27fd Paul Brook
{
1376 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1377 94df27fd Paul Brook
}
1378 94df27fd Paul Brook
#else
1379 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380 d720b93d bellard
{
1381 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1382 9b3c35e0 j_mayer
    target_ulong pd;
1383 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1384 c2f07f81 pbrook
    PhysPageDesc *p;
1385 d720b93d bellard
1386 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1387 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1388 c2f07f81 pbrook
    if (!p) {
1389 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1390 c2f07f81 pbrook
    } else {
1391 c2f07f81 pbrook
        pd = p->phys_offset;
1392 c2f07f81 pbrook
    }
1393 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1394 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1395 d720b93d bellard
}
1396 c27004ec bellard
#endif
1397 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1398 d720b93d bellard
1399 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1400 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1401 c527ee8f Paul Brook
1402 c527ee8f Paul Brook
{
1403 c527ee8f Paul Brook
}
1404 c527ee8f Paul Brook
1405 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1407 c527ee8f Paul Brook
{
1408 c527ee8f Paul Brook
    return -ENOSYS;
1409 c527ee8f Paul Brook
}
1410 c527ee8f Paul Brook
#else
1411 6658ffb8 pbrook
/* Add a watchpoint.  */
1412 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1413 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1414 6658ffb8 pbrook
{
1415 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1416 c0ce998e aliguori
    CPUWatchpoint *wp;
1417 6658ffb8 pbrook
1418 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1419 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1420 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1421 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1422 b4051334 aliguori
        return -EINVAL;
1423 b4051334 aliguori
    }
1424 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1425 a1d1bb31 aliguori
1426 a1d1bb31 aliguori
    wp->vaddr = addr;
1427 b4051334 aliguori
    wp->len_mask = len_mask;
1428 a1d1bb31 aliguori
    wp->flags = flags;
1429 a1d1bb31 aliguori
1430 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1431 c0ce998e aliguori
    if (flags & BP_GDB)
1432 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1433 c0ce998e aliguori
    else
1434 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1435 6658ffb8 pbrook
1436 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1437 a1d1bb31 aliguori
1438 a1d1bb31 aliguori
    if (watchpoint)
1439 a1d1bb31 aliguori
        *watchpoint = wp;
1440 a1d1bb31 aliguori
    return 0;
1441 6658ffb8 pbrook
}
1442 6658ffb8 pbrook
1443 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1444 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1445 a1d1bb31 aliguori
                          int flags)
1446 6658ffb8 pbrook
{
1447 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1448 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1449 6658ffb8 pbrook
1450 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1451 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1452 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1453 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1454 6658ffb8 pbrook
            return 0;
1455 6658ffb8 pbrook
        }
1456 6658ffb8 pbrook
    }
1457 a1d1bb31 aliguori
    return -ENOENT;
1458 6658ffb8 pbrook
}
1459 6658ffb8 pbrook
1460 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1461 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1462 a1d1bb31 aliguori
{
1463 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1464 7d03f82f edgar_igl
1465 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1466 a1d1bb31 aliguori
1467 a1d1bb31 aliguori
    qemu_free(watchpoint);
1468 a1d1bb31 aliguori
}
1469 a1d1bb31 aliguori
1470 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1471 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1472 a1d1bb31 aliguori
{
1473 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1474 a1d1bb31 aliguori
1475 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1476 a1d1bb31 aliguori
        if (wp->flags & mask)
1477 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1478 c0ce998e aliguori
    }
1479 7d03f82f edgar_igl
}
1480 c527ee8f Paul Brook
#endif
1481 7d03f82f edgar_igl
1482 a1d1bb31 aliguori
/* Add a breakpoint.  */
1483 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1484 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1485 4c3a88a2 bellard
{
1486 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1487 c0ce998e aliguori
    CPUBreakpoint *bp;
1488 3b46e624 ths
1489 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1490 4c3a88a2 bellard
1491 a1d1bb31 aliguori
    bp->pc = pc;
1492 a1d1bb31 aliguori
    bp->flags = flags;
1493 a1d1bb31 aliguori
1494 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1495 c0ce998e aliguori
    if (flags & BP_GDB)
1496 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1497 c0ce998e aliguori
    else
1498 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1499 3b46e624 ths
1500 d720b93d bellard
    breakpoint_invalidate(env, pc);
1501 a1d1bb31 aliguori
1502 a1d1bb31 aliguori
    if (breakpoint)
1503 a1d1bb31 aliguori
        *breakpoint = bp;
1504 4c3a88a2 bellard
    return 0;
1505 4c3a88a2 bellard
#else
1506 a1d1bb31 aliguori
    return -ENOSYS;
1507 4c3a88a2 bellard
#endif
1508 4c3a88a2 bellard
}
1509 4c3a88a2 bellard
1510 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1511 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1512 a1d1bb31 aliguori
{
1513 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1514 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1515 a1d1bb31 aliguori
1516 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1517 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1518 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1519 a1d1bb31 aliguori
            return 0;
1520 a1d1bb31 aliguori
        }
1521 7d03f82f edgar_igl
    }
1522 a1d1bb31 aliguori
    return -ENOENT;
1523 a1d1bb31 aliguori
#else
1524 a1d1bb31 aliguori
    return -ENOSYS;
1525 7d03f82f edgar_igl
#endif
1526 7d03f82f edgar_igl
}
1527 7d03f82f edgar_igl
1528 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1529 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1530 4c3a88a2 bellard
{
1531 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1532 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1533 d720b93d bellard
1534 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1535 a1d1bb31 aliguori
1536 a1d1bb31 aliguori
    qemu_free(breakpoint);
1537 a1d1bb31 aliguori
#endif
1538 a1d1bb31 aliguori
}
1539 a1d1bb31 aliguori
1540 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1541 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1542 a1d1bb31 aliguori
{
1543 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1544 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1545 a1d1bb31 aliguori
1546 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1547 a1d1bb31 aliguori
        if (bp->flags & mask)
1548 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1549 c0ce998e aliguori
    }
1550 4c3a88a2 bellard
#endif
1551 4c3a88a2 bellard
}
1552 4c3a88a2 bellard
1553 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1554 c33a346e bellard
   CPU loop after each instruction */
1555 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1556 c33a346e bellard
{
1557 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1558 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1559 c33a346e bellard
        env->singlestep_enabled = enabled;
1560 e22a25c9 aliguori
        if (kvm_enabled())
1561 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1562 e22a25c9 aliguori
        else {
1563 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1564 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1565 e22a25c9 aliguori
            tb_flush(env);
1566 e22a25c9 aliguori
        }
1567 c33a346e bellard
    }
1568 c33a346e bellard
#endif
1569 c33a346e bellard
}
1570 c33a346e bellard
1571 34865134 bellard
/* enable or disable low levels log */
1572 34865134 bellard
void cpu_set_log(int log_flags)
1573 34865134 bellard
{
1574 34865134 bellard
    loglevel = log_flags;
1575 34865134 bellard
    if (loglevel && !logfile) {
1576 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1577 34865134 bellard
        if (!logfile) {
1578 34865134 bellard
            perror(logfilename);
1579 34865134 bellard
            _exit(1);
1580 34865134 bellard
        }
1581 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1582 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1583 9fa3e853 bellard
        {
1584 b55266b5 blueswir1
            static char logfile_buf[4096];
1585 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1586 9fa3e853 bellard
        }
1587 bf65f53f Filip Navara
#elif !defined(_WIN32)
1588 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1589 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1590 9fa3e853 bellard
#endif
1591 e735b91c pbrook
        log_append = 1;
1592 e735b91c pbrook
    }
1593 e735b91c pbrook
    if (!loglevel && logfile) {
1594 e735b91c pbrook
        fclose(logfile);
1595 e735b91c pbrook
        logfile = NULL;
1596 34865134 bellard
    }
1597 34865134 bellard
}
1598 34865134 bellard
1599 34865134 bellard
void cpu_set_log_filename(const char *filename)
1600 34865134 bellard
{
1601 34865134 bellard
    logfilename = strdup(filename);
1602 e735b91c pbrook
    if (logfile) {
1603 e735b91c pbrook
        fclose(logfile);
1604 e735b91c pbrook
        logfile = NULL;
1605 e735b91c pbrook
    }
1606 e735b91c pbrook
    cpu_set_log(loglevel);
1607 34865134 bellard
}
1608 c33a346e bellard
1609 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1610 ea041c0e bellard
{
1611 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1612 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1613 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1614 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1615 ea041c0e bellard
    TranslationBlock *tb;
1616 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1617 59817ccb bellard
1618 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1619 3098dba0 aurel32
    tb = env->current_tb;
1620 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1621 3098dba0 aurel32
       all the potentially executing TB */
1622 f76cfe56 Riku Voipio
    if (tb) {
1623 3098dba0 aurel32
        env->current_tb = NULL;
1624 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1625 be214e6c aurel32
    }
1626 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1627 3098dba0 aurel32
}
1628 3098dba0 aurel32
1629 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1630 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1631 3098dba0 aurel32
{
1632 3098dba0 aurel32
    int old_mask;
1633 be214e6c aurel32
1634 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1635 68a79315 bellard
    env->interrupt_request |= mask;
1636 3098dba0 aurel32
1637 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1638 8edac960 aliguori
    /*
1639 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1640 8edac960 aliguori
     * case its halted.
1641 8edac960 aliguori
     */
1642 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1643 8edac960 aliguori
        qemu_cpu_kick(env);
1644 8edac960 aliguori
        return;
1645 8edac960 aliguori
    }
1646 8edac960 aliguori
#endif
1647 8edac960 aliguori
1648 2e70f6ef pbrook
    if (use_icount) {
1649 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1650 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1651 2e70f6ef pbrook
        if (!can_do_io(env)
1652 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1653 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1654 2e70f6ef pbrook
        }
1655 2e70f6ef pbrook
#endif
1656 2e70f6ef pbrook
    } else {
1657 3098dba0 aurel32
        cpu_unlink_tb(env);
1658 ea041c0e bellard
    }
1659 ea041c0e bellard
}
1660 ea041c0e bellard
1661 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1662 b54ad049 bellard
{
1663 b54ad049 bellard
    env->interrupt_request &= ~mask;
1664 b54ad049 bellard
}
1665 b54ad049 bellard
1666 3098dba0 aurel32
void cpu_exit(CPUState *env)
1667 3098dba0 aurel32
{
1668 3098dba0 aurel32
    env->exit_request = 1;
1669 3098dba0 aurel32
    cpu_unlink_tb(env);
1670 3098dba0 aurel32
}
1671 3098dba0 aurel32
1672 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1673 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1674 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1675 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1676 f193c797 bellard
      "show target assembly code for each compiled TB" },
1677 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1678 57fec1fe bellard
      "show micro ops for each compiled TB" },
1679 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1680 e01a1157 blueswir1
      "show micro ops "
1681 e01a1157 blueswir1
#ifdef TARGET_I386
1682 e01a1157 blueswir1
      "before eflags optimization and "
1683 f193c797 bellard
#endif
1684 e01a1157 blueswir1
      "after liveness analysis" },
1685 f193c797 bellard
    { CPU_LOG_INT, "int",
1686 f193c797 bellard
      "show interrupts/exceptions in short format" },
1687 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1688 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1689 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1690 e91c8a77 ths
      "show CPU state before block translation" },
1691 f193c797 bellard
#ifdef TARGET_I386
1692 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1693 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1694 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1695 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1696 f193c797 bellard
#endif
1697 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1698 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1699 fd872598 bellard
      "show all i/o ports accesses" },
1700 8e3a9fd2 bellard
#endif
1701 f193c797 bellard
    { 0, NULL, NULL },
1702 f193c797 bellard
};
1703 f193c797 bellard
1704 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1705 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1706 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1707 f6f3fbca Michael S. Tsirkin
1708 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1709 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1710 9742bf26 Yoshiaki Tamura
                                  ram_addr_t phys_offset)
1711 f6f3fbca Michael S. Tsirkin
{
1712 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1713 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1714 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1715 f6f3fbca Michael S. Tsirkin
    }
1716 f6f3fbca Michael S. Tsirkin
}
1717 f6f3fbca Michael S. Tsirkin
1718 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1719 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1720 f6f3fbca Michael S. Tsirkin
{
1721 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1722 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1723 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1724 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1725 f6f3fbca Michael S. Tsirkin
            return r;
1726 f6f3fbca Michael S. Tsirkin
    }
1727 f6f3fbca Michael S. Tsirkin
    return 0;
1728 f6f3fbca Michael S. Tsirkin
}
1729 f6f3fbca Michael S. Tsirkin
1730 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1731 f6f3fbca Michael S. Tsirkin
{
1732 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1733 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1734 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1735 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1736 f6f3fbca Michael S. Tsirkin
            return r;
1737 f6f3fbca Michael S. Tsirkin
    }
1738 f6f3fbca Michael S. Tsirkin
    return 0;
1739 f6f3fbca Michael S. Tsirkin
}
1740 f6f3fbca Michael S. Tsirkin
1741 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1742 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1743 f6f3fbca Michael S. Tsirkin
{
1744 5cd2c5b6 Richard Henderson
    int i;
1745 f6f3fbca Michael S. Tsirkin
1746 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1747 5cd2c5b6 Richard Henderson
        return;
1748 5cd2c5b6 Richard Henderson
    }
1749 5cd2c5b6 Richard Henderson
    if (level == 0) {
1750 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1751 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1752 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1753 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1754 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1755 f6f3fbca Michael S. Tsirkin
            }
1756 5cd2c5b6 Richard Henderson
        }
1757 5cd2c5b6 Richard Henderson
    } else {
1758 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1759 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1760 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1761 f6f3fbca Michael S. Tsirkin
        }
1762 f6f3fbca Michael S. Tsirkin
    }
1763 f6f3fbca Michael S. Tsirkin
}
1764 f6f3fbca Michael S. Tsirkin
1765 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1766 f6f3fbca Michael S. Tsirkin
{
1767 5cd2c5b6 Richard Henderson
    int i;
1768 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1769 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1770 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1771 f6f3fbca Michael S. Tsirkin
    }
1772 f6f3fbca Michael S. Tsirkin
}
1773 f6f3fbca Michael S. Tsirkin
1774 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1775 f6f3fbca Michael S. Tsirkin
{
1776 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1777 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1778 f6f3fbca Michael S. Tsirkin
}
1779 f6f3fbca Michael S. Tsirkin
1780 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1781 f6f3fbca Michael S. Tsirkin
{
1782 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1783 f6f3fbca Michael S. Tsirkin
}
1784 f6f3fbca Michael S. Tsirkin
#endif
1785 f6f3fbca Michael S. Tsirkin
1786 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1787 f193c797 bellard
{
1788 f193c797 bellard
    if (strlen(s2) != n)
1789 f193c797 bellard
        return 0;
1790 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1791 f193c797 bellard
}
1792 3b46e624 ths
1793 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1794 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1795 f193c797 bellard
{
1796 c7cd6a37 blueswir1
    const CPULogItem *item;
1797 f193c797 bellard
    int mask;
1798 f193c797 bellard
    const char *p, *p1;
1799 f193c797 bellard
1800 f193c797 bellard
    p = str;
1801 f193c797 bellard
    mask = 0;
1802 f193c797 bellard
    for(;;) {
1803 f193c797 bellard
        p1 = strchr(p, ',');
1804 f193c797 bellard
        if (!p1)
1805 f193c797 bellard
            p1 = p + strlen(p);
1806 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1807 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1808 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1809 9742bf26 Yoshiaki Tamura
            }
1810 9742bf26 Yoshiaki Tamura
        } else {
1811 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1812 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1813 9742bf26 Yoshiaki Tamura
                    goto found;
1814 9742bf26 Yoshiaki Tamura
            }
1815 9742bf26 Yoshiaki Tamura
            return 0;
1816 f193c797 bellard
        }
1817 f193c797 bellard
    found:
1818 f193c797 bellard
        mask |= item->mask;
1819 f193c797 bellard
        if (*p1 != ',')
1820 f193c797 bellard
            break;
1821 f193c797 bellard
        p = p1 + 1;
1822 f193c797 bellard
    }
1823 f193c797 bellard
    return mask;
1824 f193c797 bellard
}
1825 ea041c0e bellard
1826 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1827 7501267e bellard
{
1828 7501267e bellard
    va_list ap;
1829 493ae1f0 pbrook
    va_list ap2;
1830 7501267e bellard
1831 7501267e bellard
    va_start(ap, fmt);
1832 493ae1f0 pbrook
    va_copy(ap2, ap);
1833 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1834 7501267e bellard
    vfprintf(stderr, fmt, ap);
1835 7501267e bellard
    fprintf(stderr, "\n");
1836 7501267e bellard
#ifdef TARGET_I386
1837 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1838 7fe48483 bellard
#else
1839 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1840 7501267e bellard
#endif
1841 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1842 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1843 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1844 93fcfe39 aliguori
        qemu_log("\n");
1845 f9373291 j_mayer
#ifdef TARGET_I386
1846 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1847 f9373291 j_mayer
#else
1848 93fcfe39 aliguori
        log_cpu_state(env, 0);
1849 f9373291 j_mayer
#endif
1850 31b1a7b4 aliguori
        qemu_log_flush();
1851 93fcfe39 aliguori
        qemu_log_close();
1852 924edcae balrog
    }
1853 493ae1f0 pbrook
    va_end(ap2);
1854 f9373291 j_mayer
    va_end(ap);
1855 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1856 fd052bf6 Riku Voipio
    {
1857 fd052bf6 Riku Voipio
        struct sigaction act;
1858 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1859 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1860 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1861 fd052bf6 Riku Voipio
    }
1862 fd052bf6 Riku Voipio
#endif
1863 7501267e bellard
    abort();
1864 7501267e bellard
}
1865 7501267e bellard
1866 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1867 c5be9f08 ths
{
1868 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1869 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1870 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1871 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1872 5a38f081 aliguori
    CPUBreakpoint *bp;
1873 5a38f081 aliguori
    CPUWatchpoint *wp;
1874 5a38f081 aliguori
#endif
1875 5a38f081 aliguori
1876 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1877 5a38f081 aliguori
1878 5a38f081 aliguori
    /* Preserve chaining and index. */
1879 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1880 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1881 5a38f081 aliguori
1882 5a38f081 aliguori
    /* Clone all break/watchpoints.
1883 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1884 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1885 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1886 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1887 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1888 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1889 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1890 5a38f081 aliguori
    }
1891 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1892 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1893 5a38f081 aliguori
                              wp->flags, NULL);
1894 5a38f081 aliguori
    }
1895 5a38f081 aliguori
#endif
1896 5a38f081 aliguori
1897 c5be9f08 ths
    return new_env;
1898 c5be9f08 ths
}
1899 c5be9f08 ths
1900 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1901 0124311e bellard
1902 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1903 5c751e99 edgar_igl
{
1904 5c751e99 edgar_igl
    unsigned int i;
1905 5c751e99 edgar_igl
1906 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1907 5c751e99 edgar_igl
       overlap the flushed page.  */
1908 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1909 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1910 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1911 5c751e99 edgar_igl
1912 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1913 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1914 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915 5c751e99 edgar_igl
}
1916 5c751e99 edgar_igl
1917 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1918 08738984 Igor Kovalenko
    .addr_read  = -1,
1919 08738984 Igor Kovalenko
    .addr_write = -1,
1920 08738984 Igor Kovalenko
    .addr_code  = -1,
1921 08738984 Igor Kovalenko
    .addend     = -1,
1922 08738984 Igor Kovalenko
};
1923 08738984 Igor Kovalenko
1924 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1925 ee8b7021 bellard
   implemented yet) */
1926 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1927 33417e70 bellard
{
1928 33417e70 bellard
    int i;
1929 0124311e bellard
1930 9fa3e853 bellard
#if defined(DEBUG_TLB)
1931 9fa3e853 bellard
    printf("tlb_flush:\n");
1932 9fa3e853 bellard
#endif
1933 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1934 0124311e bellard
       links while we are modifying them */
1935 0124311e bellard
    env->current_tb = NULL;
1936 0124311e bellard
1937 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1938 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1939 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1940 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1941 cfde4bd9 Isaku Yamahata
        }
1942 33417e70 bellard
    }
1943 9fa3e853 bellard
1944 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1945 9fa3e853 bellard
1946 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1947 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1948 e3db7226 bellard
    tlb_flush_count++;
1949 33417e70 bellard
}
1950 33417e70 bellard
1951 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1952 61382a50 bellard
{
1953 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1954 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1955 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1956 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1957 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1958 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1959 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1960 84b7b8e7 bellard
    }
1961 61382a50 bellard
}
1962 61382a50 bellard
1963 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1964 33417e70 bellard
{
1965 8a40a180 bellard
    int i;
1966 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1967 0124311e bellard
1968 9fa3e853 bellard
#if defined(DEBUG_TLB)
1969 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1970 9fa3e853 bellard
#endif
1971 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1972 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1973 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1974 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1975 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1976 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1977 d4c430a8 Paul Brook
#endif
1978 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1979 d4c430a8 Paul Brook
        return;
1980 d4c430a8 Paul Brook
    }
1981 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1982 0124311e bellard
       links while we are modifying them */
1983 0124311e bellard
    env->current_tb = NULL;
1984 61382a50 bellard
1985 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1986 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1987 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1988 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1989 0124311e bellard
1990 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1991 9fa3e853 bellard
}
1992 9fa3e853 bellard
1993 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1994 9fa3e853 bellard
   can be detected */
1995 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1996 9fa3e853 bellard
{
1997 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1998 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1999 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2000 9fa3e853 bellard
}
2001 9fa3e853 bellard
2002 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2003 3a7d929e bellard
   tested for self modifying code */
2004 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2005 3a7d929e bellard
                                    target_ulong vaddr)
2006 9fa3e853 bellard
{
2007 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2008 1ccde1cb bellard
}
2009 1ccde1cb bellard
2010 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2011 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2012 1ccde1cb bellard
{
2013 1ccde1cb bellard
    unsigned long addr;
2014 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2015 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2016 1ccde1cb bellard
        if ((addr - start) < length) {
2017 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2018 1ccde1cb bellard
        }
2019 1ccde1cb bellard
    }
2020 1ccde1cb bellard
}
2021 1ccde1cb bellard
2022 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2023 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2024 0a962c02 bellard
                                     int dirty_flags)
2025 1ccde1cb bellard
{
2026 1ccde1cb bellard
    CPUState *env;
2027 4f2ac237 bellard
    unsigned long length, start1;
2028 f7c11b53 Yoshiaki Tamura
    int i;
2029 1ccde1cb bellard
2030 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2031 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2032 1ccde1cb bellard
2033 1ccde1cb bellard
    length = end - start;
2034 1ccde1cb bellard
    if (length == 0)
2035 1ccde1cb bellard
        return;
2036 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2037 f23db169 bellard
2038 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2039 1ccde1cb bellard
       when accessing the range */
2040 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2041 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2042 5579c7f3 pbrook
       address comparisons below.  */
2043 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2044 5579c7f3 pbrook
            != (end - 1) - start) {
2045 5579c7f3 pbrook
        abort();
2046 5579c7f3 pbrook
    }
2047 5579c7f3 pbrook
2048 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2049 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2050 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2052 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2053 cfde4bd9 Isaku Yamahata
                                      start1, length);
2054 cfde4bd9 Isaku Yamahata
        }
2055 6a00d601 bellard
    }
2056 1ccde1cb bellard
}
2057 1ccde1cb bellard
2058 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2059 74576198 aliguori
{
2060 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2061 74576198 aliguori
    in_migration = enable;
2062 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2063 f6f3fbca Michael S. Tsirkin
    return ret;
2064 74576198 aliguori
}
2065 74576198 aliguori
2066 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2067 74576198 aliguori
{
2068 74576198 aliguori
    return in_migration;
2069 74576198 aliguori
}
2070 74576198 aliguori
2071 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2072 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2073 2bec46dc aliguori
{
2074 7b8f3b78 Michael S. Tsirkin
    int ret;
2075 151f7749 Jan Kiszka
2076 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2077 151f7749 Jan Kiszka
    return ret;
2078 2bec46dc aliguori
}
2079 2bec46dc aliguori
2080 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2081 3a7d929e bellard
{
2082 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2083 5579c7f3 pbrook
    void *p;
2084 3a7d929e bellard
2085 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2086 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2087 5579c7f3 pbrook
            + tlb_entry->addend);
2088 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2089 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2090 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2091 3a7d929e bellard
        }
2092 3a7d929e bellard
    }
2093 3a7d929e bellard
}
2094 3a7d929e bellard
2095 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2096 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2097 3a7d929e bellard
{
2098 3a7d929e bellard
    int i;
2099 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2100 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2101 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2102 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2103 cfde4bd9 Isaku Yamahata
    }
2104 3a7d929e bellard
}
2105 3a7d929e bellard
2106 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2107 1ccde1cb bellard
{
2108 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2109 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2110 1ccde1cb bellard
}
2111 1ccde1cb bellard
2112 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2113 0f459d16 pbrook
   so that it is no longer dirty */
2114 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2115 1ccde1cb bellard
{
2116 1ccde1cb bellard
    int i;
2117 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2118 1ccde1cb bellard
2119 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2120 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2121 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2122 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2123 9fa3e853 bellard
}
2124 9fa3e853 bellard
2125 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2126 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2127 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2128 d4c430a8 Paul Brook
                               target_ulong size)
2129 d4c430a8 Paul Brook
{
2130 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2131 d4c430a8 Paul Brook
2132 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2133 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2134 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2135 d4c430a8 Paul Brook
        return;
2136 d4c430a8 Paul Brook
    }
2137 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2138 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2139 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2140 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2141 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2142 d4c430a8 Paul Brook
        mask <<= 1;
2143 d4c430a8 Paul Brook
    }
2144 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2145 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2146 d4c430a8 Paul Brook
}
2147 d4c430a8 Paul Brook
2148 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2149 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2150 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2151 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2152 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2153 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2154 9fa3e853 bellard
{
2155 92e873b9 bellard
    PhysPageDesc *p;
2156 4f2ac237 bellard
    unsigned long pd;
2157 9fa3e853 bellard
    unsigned int index;
2158 4f2ac237 bellard
    target_ulong address;
2159 0f459d16 pbrook
    target_ulong code_address;
2160 355b1943 Paul Brook
    unsigned long addend;
2161 84b7b8e7 bellard
    CPUTLBEntry *te;
2162 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2163 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2164 9fa3e853 bellard
2165 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2166 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2167 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2168 d4c430a8 Paul Brook
    }
2169 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2170 9fa3e853 bellard
    if (!p) {
2171 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2172 9fa3e853 bellard
    } else {
2173 9fa3e853 bellard
        pd = p->phys_offset;
2174 9fa3e853 bellard
    }
2175 9fa3e853 bellard
#if defined(DEBUG_TLB)
2176 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2177 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2178 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2179 9fa3e853 bellard
#endif
2180 9fa3e853 bellard
2181 0f459d16 pbrook
    address = vaddr;
2182 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2183 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2184 0f459d16 pbrook
        address |= TLB_MMIO;
2185 0f459d16 pbrook
    }
2186 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2187 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2188 0f459d16 pbrook
        /* Normal RAM.  */
2189 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2190 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2191 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2192 0f459d16 pbrook
        else
2193 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2194 0f459d16 pbrook
    } else {
2195 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2196 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2197 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2198 0f459d16 pbrook
           and avoid full address decoding in every device.
2199 0f459d16 pbrook
           We can't use the high bits of pd for this because
2200 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2201 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2202 8da3ff18 pbrook
        if (p) {
2203 8da3ff18 pbrook
            iotlb += p->region_offset;
2204 8da3ff18 pbrook
        } else {
2205 8da3ff18 pbrook
            iotlb += paddr;
2206 8da3ff18 pbrook
        }
2207 0f459d16 pbrook
    }
2208 0f459d16 pbrook
2209 0f459d16 pbrook
    code_address = address;
2210 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2211 0f459d16 pbrook
       watchpoint trap routines.  */
2212 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2213 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2214 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2215 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2216 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2217 bf298f83 Jun Koi
                address |= TLB_MMIO;
2218 bf298f83 Jun Koi
                break;
2219 bf298f83 Jun Koi
            }
2220 6658ffb8 pbrook
        }
2221 0f459d16 pbrook
    }
2222 d79acba4 balrog
2223 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2224 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2225 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2226 0f459d16 pbrook
    te->addend = addend - vaddr;
2227 0f459d16 pbrook
    if (prot & PAGE_READ) {
2228 0f459d16 pbrook
        te->addr_read = address;
2229 0f459d16 pbrook
    } else {
2230 0f459d16 pbrook
        te->addr_read = -1;
2231 0f459d16 pbrook
    }
2232 5c751e99 edgar_igl
2233 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2234 0f459d16 pbrook
        te->addr_code = code_address;
2235 0f459d16 pbrook
    } else {
2236 0f459d16 pbrook
        te->addr_code = -1;
2237 0f459d16 pbrook
    }
2238 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2239 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2240 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2241 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2242 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2243 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2244 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2245 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2246 9fa3e853 bellard
        } else {
2247 0f459d16 pbrook
            te->addr_write = address;
2248 9fa3e853 bellard
        }
2249 0f459d16 pbrook
    } else {
2250 0f459d16 pbrook
        te->addr_write = -1;
2251 9fa3e853 bellard
    }
2252 9fa3e853 bellard
}
2253 9fa3e853 bellard
2254 0124311e bellard
#else
2255 0124311e bellard
2256 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2257 0124311e bellard
{
2258 0124311e bellard
}
2259 0124311e bellard
2260 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2261 0124311e bellard
{
2262 0124311e bellard
}
2263 0124311e bellard
2264 edf8e2af Mika Westerberg
/*
2265 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2266 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2267 edf8e2af Mika Westerberg
 */
2268 5cd2c5b6 Richard Henderson
2269 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2270 5cd2c5b6 Richard Henderson
{
2271 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2272 5cd2c5b6 Richard Henderson
    void *priv;
2273 5cd2c5b6 Richard Henderson
    unsigned long start;
2274 5cd2c5b6 Richard Henderson
    int prot;
2275 5cd2c5b6 Richard Henderson
};
2276 5cd2c5b6 Richard Henderson
2277 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2278 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2279 5cd2c5b6 Richard Henderson
{
2280 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2281 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2282 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2283 5cd2c5b6 Richard Henderson
            return rc;
2284 5cd2c5b6 Richard Henderson
        }
2285 5cd2c5b6 Richard Henderson
    }
2286 5cd2c5b6 Richard Henderson
2287 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2288 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2289 5cd2c5b6 Richard Henderson
2290 5cd2c5b6 Richard Henderson
    return 0;
2291 5cd2c5b6 Richard Henderson
}
2292 5cd2c5b6 Richard Henderson
2293 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2294 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2295 5cd2c5b6 Richard Henderson
{
2296 b480d9b7 Paul Brook
    abi_ulong pa;
2297 5cd2c5b6 Richard Henderson
    int i, rc;
2298 5cd2c5b6 Richard Henderson
2299 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2300 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2301 5cd2c5b6 Richard Henderson
    }
2302 5cd2c5b6 Richard Henderson
2303 5cd2c5b6 Richard Henderson
    if (level == 0) {
2304 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2305 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2306 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2307 5cd2c5b6 Richard Henderson
2308 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2309 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2310 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2311 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2312 5cd2c5b6 Richard Henderson
                    return rc;
2313 9fa3e853 bellard
                }
2314 9fa3e853 bellard
            }
2315 5cd2c5b6 Richard Henderson
        }
2316 5cd2c5b6 Richard Henderson
    } else {
2317 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2318 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2319 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2320 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2321 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2322 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2323 5cd2c5b6 Richard Henderson
                return rc;
2324 5cd2c5b6 Richard Henderson
            }
2325 5cd2c5b6 Richard Henderson
        }
2326 5cd2c5b6 Richard Henderson
    }
2327 5cd2c5b6 Richard Henderson
2328 5cd2c5b6 Richard Henderson
    return 0;
2329 5cd2c5b6 Richard Henderson
}
2330 5cd2c5b6 Richard Henderson
2331 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332 5cd2c5b6 Richard Henderson
{
2333 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2334 5cd2c5b6 Richard Henderson
    unsigned long i;
2335 5cd2c5b6 Richard Henderson
2336 5cd2c5b6 Richard Henderson
    data.fn = fn;
2337 5cd2c5b6 Richard Henderson
    data.priv = priv;
2338 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2339 5cd2c5b6 Richard Henderson
    data.prot = 0;
2340 5cd2c5b6 Richard Henderson
2341 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2342 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2343 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2344 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2345 5cd2c5b6 Richard Henderson
            return rc;
2346 9fa3e853 bellard
        }
2347 33417e70 bellard
    }
2348 5cd2c5b6 Richard Henderson
2349 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2350 edf8e2af Mika Westerberg
}
2351 edf8e2af Mika Westerberg
2352 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2353 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2354 edf8e2af Mika Westerberg
{
2355 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2356 edf8e2af Mika Westerberg
2357 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2358 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2359 edf8e2af Mika Westerberg
        start, end, end - start,
2360 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2361 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2362 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2363 edf8e2af Mika Westerberg
2364 edf8e2af Mika Westerberg
    return (0);
2365 edf8e2af Mika Westerberg
}
2366 edf8e2af Mika Westerberg
2367 edf8e2af Mika Westerberg
/* dump memory mappings */
2368 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2369 edf8e2af Mika Westerberg
{
2370 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2371 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2372 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2373 33417e70 bellard
}
2374 33417e70 bellard
2375 53a5960a pbrook
int page_get_flags(target_ulong address)
2376 33417e70 bellard
{
2377 9fa3e853 bellard
    PageDesc *p;
2378 9fa3e853 bellard
2379 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2380 33417e70 bellard
    if (!p)
2381 9fa3e853 bellard
        return 0;
2382 9fa3e853 bellard
    return p->flags;
2383 9fa3e853 bellard
}
2384 9fa3e853 bellard
2385 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2386 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2387 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2388 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2389 9fa3e853 bellard
{
2390 376a7909 Richard Henderson
    target_ulong addr, len;
2391 376a7909 Richard Henderson
2392 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2393 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2394 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2395 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2396 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2397 376a7909 Richard Henderson
#endif
2398 376a7909 Richard Henderson
    assert(start < end);
2399 9fa3e853 bellard
2400 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2401 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2402 376a7909 Richard Henderson
2403 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2404 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2405 376a7909 Richard Henderson
    }
2406 376a7909 Richard Henderson
2407 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2408 376a7909 Richard Henderson
         len != 0;
2409 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2410 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411 376a7909 Richard Henderson
2412 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2413 376a7909 Richard Henderson
           the code inside.  */
2414 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2415 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2416 9fa3e853 bellard
            p->first_tb) {
2417 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2418 9fa3e853 bellard
        }
2419 9fa3e853 bellard
        p->flags = flags;
2420 9fa3e853 bellard
    }
2421 33417e70 bellard
}
2422 33417e70 bellard
2423 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2424 3d97b40b ths
{
2425 3d97b40b ths
    PageDesc *p;
2426 3d97b40b ths
    target_ulong end;
2427 3d97b40b ths
    target_ulong addr;
2428 3d97b40b ths
2429 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2430 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2431 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2432 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2433 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2434 376a7909 Richard Henderson
#endif
2435 376a7909 Richard Henderson
2436 3e0650a9 Richard Henderson
    if (len == 0) {
2437 3e0650a9 Richard Henderson
        return 0;
2438 3e0650a9 Richard Henderson
    }
2439 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2440 376a7909 Richard Henderson
        /* We've wrapped around.  */
2441 55f280c9 balrog
        return -1;
2442 376a7909 Richard Henderson
    }
2443 55f280c9 balrog
2444 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2445 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2446 3d97b40b ths
2447 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2448 376a7909 Richard Henderson
         len != 0;
2449 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2450 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2451 3d97b40b ths
        if( !p )
2452 3d97b40b ths
            return -1;
2453 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2454 3d97b40b ths
            return -1;
2455 3d97b40b ths
2456 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2457 3d97b40b ths
            return -1;
2458 dae3270c bellard
        if (flags & PAGE_WRITE) {
2459 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2460 dae3270c bellard
                return -1;
2461 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2462 dae3270c bellard
               contains translated code */
2463 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2464 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2465 dae3270c bellard
                    return -1;
2466 dae3270c bellard
            }
2467 dae3270c bellard
            return 0;
2468 dae3270c bellard
        }
2469 3d97b40b ths
    }
2470 3d97b40b ths
    return 0;
2471 3d97b40b ths
}
2472 3d97b40b ths
2473 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2474 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2475 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2476 9fa3e853 bellard
{
2477 45d679d6 Aurelien Jarno
    unsigned int prot;
2478 45d679d6 Aurelien Jarno
    PageDesc *p;
2479 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2480 9fa3e853 bellard
2481 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2482 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2483 c8a706fe pbrook
       practice it seems to be ok.  */
2484 c8a706fe pbrook
    mmap_lock();
2485 c8a706fe pbrook
2486 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2487 45d679d6 Aurelien Jarno
    if (!p) {
2488 c8a706fe pbrook
        mmap_unlock();
2489 9fa3e853 bellard
        return 0;
2490 c8a706fe pbrook
    }
2491 45d679d6 Aurelien Jarno
2492 9fa3e853 bellard
    /* if the page was really writable, then we change its
2493 9fa3e853 bellard
       protection back to writable */
2494 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2495 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2496 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2497 45d679d6 Aurelien Jarno
2498 45d679d6 Aurelien Jarno
        prot = 0;
2499 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2500 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2501 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2502 45d679d6 Aurelien Jarno
            prot |= p->flags;
2503 45d679d6 Aurelien Jarno
2504 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2505 9fa3e853 bellard
               the corresponding translated code. */
2506 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2507 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2508 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2509 9fa3e853 bellard
#endif
2510 9fa3e853 bellard
        }
2511 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2512 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2513 45d679d6 Aurelien Jarno
2514 45d679d6 Aurelien Jarno
        mmap_unlock();
2515 45d679d6 Aurelien Jarno
        return 1;
2516 9fa3e853 bellard
    }
2517 c8a706fe pbrook
    mmap_unlock();
2518 9fa3e853 bellard
    return 0;
2519 9fa3e853 bellard
}
2520 9fa3e853 bellard
2521 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2522 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2523 1ccde1cb bellard
{
2524 1ccde1cb bellard
}
2525 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2526 9fa3e853 bellard
2527 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2528 8da3ff18 pbrook
2529 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2530 c04b2b78 Paul Brook
typedef struct subpage_t {
2531 c04b2b78 Paul Brook
    target_phys_addr_t base;
2532 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2533 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2534 c04b2b78 Paul Brook
} subpage_t;
2535 c04b2b78 Paul Brook
2536 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2537 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2538 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2539 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2540 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2541 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2542 db7b5426 blueswir1
                      need_subpage)                                     \
2543 db7b5426 blueswir1
    do {                                                                \
2544 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2545 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2546 db7b5426 blueswir1
        else {                                                          \
2547 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2548 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2549 db7b5426 blueswir1
                need_subpage = 1;                                       \
2550 db7b5426 blueswir1
        }                                                               \
2551 db7b5426 blueswir1
                                                                        \
2552 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2553 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2554 db7b5426 blueswir1
        else {                                                          \
2555 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2556 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2557 db7b5426 blueswir1
                need_subpage = 1;                                       \
2558 db7b5426 blueswir1
        }                                                               \
2559 db7b5426 blueswir1
    } while (0)
2560 db7b5426 blueswir1
2561 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2562 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2563 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2564 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2565 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2566 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2567 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2568 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2569 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2570 c227f099 Anthony Liguori
                                         ram_addr_t size,
2571 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2572 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2573 33417e70 bellard
{
2574 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2575 92e873b9 bellard
    PhysPageDesc *p;
2576 9d42037b bellard
    CPUState *env;
2577 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2578 f6405247 Richard Henderson
    subpage_t *subpage;
2579 33417e70 bellard
2580 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2581 f6f3fbca Michael S. Tsirkin
2582 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2583 67c4d23c pbrook
        region_offset = start_addr;
2584 67c4d23c pbrook
    }
2585 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2586 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2587 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2588 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2589 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2590 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2591 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2592 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2593 db7b5426 blueswir1
            int need_subpage = 0;
2594 db7b5426 blueswir1
2595 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2596 db7b5426 blueswir1
                          need_subpage);
2597 f6405247 Richard Henderson
            if (need_subpage) {
2598 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2599 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2600 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2601 8da3ff18 pbrook
                                           p->region_offset);
2602 db7b5426 blueswir1
                } else {
2603 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2604 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2605 db7b5426 blueswir1
                }
2606 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2607 8da3ff18 pbrook
                                 region_offset);
2608 8da3ff18 pbrook
                p->region_offset = 0;
2609 db7b5426 blueswir1
            } else {
2610 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2611 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2612 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2613 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2614 db7b5426 blueswir1
            }
2615 db7b5426 blueswir1
        } else {
2616 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2617 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2618 8da3ff18 pbrook
            p->region_offset = region_offset;
2619 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2620 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2621 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2622 0e8f0967 pbrook
            } else {
2623 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2624 db7b5426 blueswir1
                int need_subpage = 0;
2625 db7b5426 blueswir1
2626 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2627 db7b5426 blueswir1
                              end_addr2, need_subpage);
2628 db7b5426 blueswir1
2629 f6405247 Richard Henderson
                if (need_subpage) {
2630 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2631 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2632 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2633 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2634 8da3ff18 pbrook
                                     phys_offset, region_offset);
2635 8da3ff18 pbrook
                    p->region_offset = 0;
2636 db7b5426 blueswir1
                }
2637 db7b5426 blueswir1
            }
2638 db7b5426 blueswir1
        }
2639 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2640 33417e70 bellard
    }
2641 3b46e624 ths
2642 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2643 9d42037b bellard
       reset the modified entries */
2644 9d42037b bellard
    /* XXX: slow ! */
2645 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2646 9d42037b bellard
        tlb_flush(env, 1);
2647 9d42037b bellard
    }
2648 33417e70 bellard
}
2649 33417e70 bellard
2650 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2651 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2652 ba863458 bellard
{
2653 ba863458 bellard
    PhysPageDesc *p;
2654 ba863458 bellard
2655 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2656 ba863458 bellard
    if (!p)
2657 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2658 ba863458 bellard
    return p->phys_offset;
2659 ba863458 bellard
}
2660 ba863458 bellard
2661 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2662 f65ed4c1 aliguori
{
2663 f65ed4c1 aliguori
    if (kvm_enabled())
2664 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2665 f65ed4c1 aliguori
}
2666 f65ed4c1 aliguori
2667 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2668 f65ed4c1 aliguori
{
2669 f65ed4c1 aliguori
    if (kvm_enabled())
2670 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2671 f65ed4c1 aliguori
}
2672 f65ed4c1 aliguori
2673 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2674 62a2744c Sheng Yang
{
2675 62a2744c Sheng Yang
    if (kvm_enabled())
2676 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2677 62a2744c Sheng Yang
}
2678 62a2744c Sheng Yang
2679 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2680 c902760f Marcelo Tosatti
2681 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2682 c902760f Marcelo Tosatti
2683 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2684 c902760f Marcelo Tosatti
2685 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2686 c902760f Marcelo Tosatti
{
2687 c902760f Marcelo Tosatti
    struct statfs fs;
2688 c902760f Marcelo Tosatti
    int ret;
2689 c902760f Marcelo Tosatti
2690 c902760f Marcelo Tosatti
    do {
2691 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2692 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2693 c902760f Marcelo Tosatti
2694 c902760f Marcelo Tosatti
    if (ret != 0) {
2695 9742bf26 Yoshiaki Tamura
        perror(path);
2696 9742bf26 Yoshiaki Tamura
        return 0;
2697 c902760f Marcelo Tosatti
    }
2698 c902760f Marcelo Tosatti
2699 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2700 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2701 c902760f Marcelo Tosatti
2702 c902760f Marcelo Tosatti
    return fs.f_bsize;
2703 c902760f Marcelo Tosatti
}
2704 c902760f Marcelo Tosatti
2705 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2706 04b16653 Alex Williamson
                            ram_addr_t memory,
2707 04b16653 Alex Williamson
                            const char *path)
2708 c902760f Marcelo Tosatti
{
2709 c902760f Marcelo Tosatti
    char *filename;
2710 c902760f Marcelo Tosatti
    void *area;
2711 c902760f Marcelo Tosatti
    int fd;
2712 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2713 c902760f Marcelo Tosatti
    int flags;
2714 c902760f Marcelo Tosatti
#endif
2715 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2716 c902760f Marcelo Tosatti
2717 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2718 c902760f Marcelo Tosatti
    if (!hpagesize) {
2719 9742bf26 Yoshiaki Tamura
        return NULL;
2720 c902760f Marcelo Tosatti
    }
2721 c902760f Marcelo Tosatti
2722 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2723 c902760f Marcelo Tosatti
        return NULL;
2724 c902760f Marcelo Tosatti
    }
2725 c902760f Marcelo Tosatti
2726 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2727 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2728 c902760f Marcelo Tosatti
        return NULL;
2729 c902760f Marcelo Tosatti
    }
2730 c902760f Marcelo Tosatti
2731 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2732 9742bf26 Yoshiaki Tamura
        return NULL;
2733 c902760f Marcelo Tosatti
    }
2734 c902760f Marcelo Tosatti
2735 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2736 c902760f Marcelo Tosatti
    if (fd < 0) {
2737 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2738 9742bf26 Yoshiaki Tamura
        free(filename);
2739 9742bf26 Yoshiaki Tamura
        return NULL;
2740 c902760f Marcelo Tosatti
    }
2741 c902760f Marcelo Tosatti
    unlink(filename);
2742 c902760f Marcelo Tosatti
    free(filename);
2743 c902760f Marcelo Tosatti
2744 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2745 c902760f Marcelo Tosatti
2746 c902760f Marcelo Tosatti
    /*
2747 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2748 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2749 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2750 c902760f Marcelo Tosatti
     * mmap will fail.
2751 c902760f Marcelo Tosatti
     */
2752 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2753 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2754 c902760f Marcelo Tosatti
2755 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2756 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2757 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2758 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2759 c902760f Marcelo Tosatti
     */
2760 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2761 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2762 c902760f Marcelo Tosatti
#else
2763 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2764 c902760f Marcelo Tosatti
#endif
2765 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2766 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2767 9742bf26 Yoshiaki Tamura
        close(fd);
2768 9742bf26 Yoshiaki Tamura
        return (NULL);
2769 c902760f Marcelo Tosatti
    }
2770 04b16653 Alex Williamson
    block->fd = fd;
2771 c902760f Marcelo Tosatti
    return area;
2772 c902760f Marcelo Tosatti
}
2773 c902760f Marcelo Tosatti
#endif
2774 c902760f Marcelo Tosatti
2775 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2776 d17b5288 Alex Williamson
{
2777 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2778 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2779 04b16653 Alex Williamson
2780 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2781 04b16653 Alex Williamson
        return 0;
2782 04b16653 Alex Williamson
2783 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2784 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2785 04b16653 Alex Williamson
2786 04b16653 Alex Williamson
        end = block->offset + block->length;
2787 04b16653 Alex Williamson
2788 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2789 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2790 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2791 04b16653 Alex Williamson
            }
2792 04b16653 Alex Williamson
        }
2793 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2794 04b16653 Alex Williamson
            offset =  end;
2795 04b16653 Alex Williamson
            mingap = next - end;
2796 04b16653 Alex Williamson
        }
2797 04b16653 Alex Williamson
    }
2798 04b16653 Alex Williamson
    return offset;
2799 04b16653 Alex Williamson
}
2800 04b16653 Alex Williamson
2801 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2802 04b16653 Alex Williamson
{
2803 d17b5288 Alex Williamson
    RAMBlock *block;
2804 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2805 d17b5288 Alex Williamson
2806 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2807 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2808 d17b5288 Alex Williamson
2809 d17b5288 Alex Williamson
    return last;
2810 d17b5288 Alex Williamson
}
2811 d17b5288 Alex Williamson
2812 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2813 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2814 84b89d78 Cam Macdonell
{
2815 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2816 84b89d78 Cam Macdonell
2817 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2818 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2819 84b89d78 Cam Macdonell
2820 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2821 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2822 84b89d78 Cam Macdonell
        if (id) {
2823 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2824 84b89d78 Cam Macdonell
            qemu_free(id);
2825 84b89d78 Cam Macdonell
        }
2826 84b89d78 Cam Macdonell
    }
2827 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2828 84b89d78 Cam Macdonell
2829 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2830 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2831 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2832 84b89d78 Cam Macdonell
                    new_block->idstr);
2833 84b89d78 Cam Macdonell
            abort();
2834 84b89d78 Cam Macdonell
        }
2835 84b89d78 Cam Macdonell
    }
2836 84b89d78 Cam Macdonell
2837 6977dfe6 Yoshiaki Tamura
    if (host) {
2838 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2839 6977dfe6 Yoshiaki Tamura
    } else {
2840 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2841 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2842 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2843 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2844 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2845 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2846 6977dfe6 Yoshiaki Tamura
            }
2847 c902760f Marcelo Tosatti
#else
2848 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2849 6977dfe6 Yoshiaki Tamura
            exit(1);
2850 c902760f Marcelo Tosatti
#endif
2851 6977dfe6 Yoshiaki Tamura
        } else {
2852 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2853 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2854 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2855 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2856 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2857 6b02494d Alexander Graf
#else
2858 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2859 6b02494d Alexander Graf
#endif
2860 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2861 6977dfe6 Yoshiaki Tamura
        }
2862 c902760f Marcelo Tosatti
    }
2863 6977dfe6 Yoshiaki Tamura
2864 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2865 94a6b54f pbrook
    new_block->length = size;
2866 94a6b54f pbrook
2867 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2868 94a6b54f pbrook
2869 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2870 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2871 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2872 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2873 94a6b54f pbrook
2874 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2875 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2876 6f0437e8 Jan Kiszka
2877 94a6b54f pbrook
    return new_block->offset;
2878 94a6b54f pbrook
}
2879 e9a1ab19 bellard
2880 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2881 6977dfe6 Yoshiaki Tamura
{
2882 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2883 6977dfe6 Yoshiaki Tamura
}
2884 6977dfe6 Yoshiaki Tamura
2885 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2886 e9a1ab19 bellard
{
2887 04b16653 Alex Williamson
    RAMBlock *block;
2888 04b16653 Alex Williamson
2889 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 04b16653 Alex Williamson
        if (addr == block->offset) {
2891 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2892 04b16653 Alex Williamson
            if (mem_path) {
2893 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2894 04b16653 Alex Williamson
                if (block->fd) {
2895 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2896 04b16653 Alex Williamson
                    close(block->fd);
2897 04b16653 Alex Williamson
                } else {
2898 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2899 04b16653 Alex Williamson
                }
2900 04b16653 Alex Williamson
#endif
2901 04b16653 Alex Williamson
            } else {
2902 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2903 04b16653 Alex Williamson
                munmap(block->host, block->length);
2904 04b16653 Alex Williamson
#else
2905 04b16653 Alex Williamson
                qemu_vfree(block->host);
2906 04b16653 Alex Williamson
#endif
2907 04b16653 Alex Williamson
            }
2908 04b16653 Alex Williamson
            qemu_free(block);
2909 04b16653 Alex Williamson
            return;
2910 04b16653 Alex Williamson
        }
2911 04b16653 Alex Williamson
    }
2912 04b16653 Alex Williamson
2913 e9a1ab19 bellard
}
2914 e9a1ab19 bellard
2915 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2916 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2917 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2918 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2919 5579c7f3 pbrook

2920 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2921 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2922 5579c7f3 pbrook
 */
2923 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2924 dc828ca1 pbrook
{
2925 94a6b54f pbrook
    RAMBlock *block;
2926 94a6b54f pbrook
2927 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2928 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2929 f471a17e Alex Williamson
            QLIST_REMOVE(block, next);
2930 f471a17e Alex Williamson
            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2931 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2932 f471a17e Alex Williamson
        }
2933 94a6b54f pbrook
    }
2934 f471a17e Alex Williamson
2935 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2936 f471a17e Alex Williamson
    abort();
2937 f471a17e Alex Williamson
2938 f471a17e Alex Williamson
    return NULL;
2939 dc828ca1 pbrook
}
2940 dc828ca1 pbrook
2941 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2942 5579c7f3 pbrook
{
2943 94a6b54f pbrook
    RAMBlock *block;
2944 94a6b54f pbrook
    uint8_t *host = ptr;
2945 94a6b54f pbrook
2946 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2947 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2948 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
2949 e890261f Marcelo Tosatti
            return 0;
2950 f471a17e Alex Williamson
        }
2951 94a6b54f pbrook
    }
2952 e890261f Marcelo Tosatti
    return -1;
2953 e890261f Marcelo Tosatti
}
2954 f471a17e Alex Williamson
2955 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
2956 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
2957 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2958 e890261f Marcelo Tosatti
{
2959 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
2960 f471a17e Alex Williamson
2961 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2962 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2963 e890261f Marcelo Tosatti
        abort();
2964 e890261f Marcelo Tosatti
    }
2965 e890261f Marcelo Tosatti
    return ram_addr;
2966 5579c7f3 pbrook
}
2967 5579c7f3 pbrook
2968 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2969 33417e70 bellard
{
2970 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2971 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2972 67d3b957 pbrook
#endif
2973 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2974 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2975 e18231a3 blueswir1
#endif
2976 e18231a3 blueswir1
    return 0;
2977 e18231a3 blueswir1
}
2978 e18231a3 blueswir1
2979 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2980 e18231a3 blueswir1
{
2981 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2982 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2983 e18231a3 blueswir1
#endif
2984 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2985 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2986 e18231a3 blueswir1
#endif
2987 e18231a3 blueswir1
    return 0;
2988 e18231a3 blueswir1
}
2989 e18231a3 blueswir1
2990 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2991 e18231a3 blueswir1
{
2992 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2993 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2994 e18231a3 blueswir1
#endif
2995 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2996 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2997 b4f0a316 blueswir1
#endif
2998 33417e70 bellard
    return 0;
2999 33417e70 bellard
}
3000 33417e70 bellard
3001 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3002 33417e70 bellard
{
3003 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3004 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3005 67d3b957 pbrook
#endif
3006 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3007 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3008 e18231a3 blueswir1
#endif
3009 e18231a3 blueswir1
}
3010 e18231a3 blueswir1
3011 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3012 e18231a3 blueswir1
{
3013 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3014 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3015 e18231a3 blueswir1
#endif
3016 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3017 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3018 e18231a3 blueswir1
#endif
3019 e18231a3 blueswir1
}
3020 e18231a3 blueswir1
3021 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3022 e18231a3 blueswir1
{
3023 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3024 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3025 e18231a3 blueswir1
#endif
3026 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3027 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3028 b4f0a316 blueswir1
#endif
3029 33417e70 bellard
}
3030 33417e70 bellard
3031 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3032 33417e70 bellard
    unassigned_mem_readb,
3033 e18231a3 blueswir1
    unassigned_mem_readw,
3034 e18231a3 blueswir1
    unassigned_mem_readl,
3035 33417e70 bellard
};
3036 33417e70 bellard
3037 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3038 33417e70 bellard
    unassigned_mem_writeb,
3039 e18231a3 blueswir1
    unassigned_mem_writew,
3040 e18231a3 blueswir1
    unassigned_mem_writel,
3041 33417e70 bellard
};
3042 33417e70 bellard
3043 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3044 0f459d16 pbrook
                                uint32_t val)
3045 9fa3e853 bellard
{
3046 3a7d929e bellard
    int dirty_flags;
3047 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3048 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3049 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3050 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3051 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3052 9fa3e853 bellard
#endif
3053 3a7d929e bellard
    }
3054 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3055 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3056 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3057 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3058 f23db169 bellard
       flushed */
3059 f23db169 bellard
    if (dirty_flags == 0xff)
3060 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3061 9fa3e853 bellard
}
3062 9fa3e853 bellard
3063 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3064 0f459d16 pbrook
                                uint32_t val)
3065 9fa3e853 bellard
{
3066 3a7d929e bellard
    int dirty_flags;
3067 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3068 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3069 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3070 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3071 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3072 9fa3e853 bellard
#endif
3073 3a7d929e bellard
    }
3074 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3075 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3076 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3077 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3078 f23db169 bellard
       flushed */
3079 f23db169 bellard
    if (dirty_flags == 0xff)
3080 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3081 9fa3e853 bellard
}
3082 9fa3e853 bellard
3083 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3084 0f459d16 pbrook
                                uint32_t val)
3085 9fa3e853 bellard
{
3086 3a7d929e bellard
    int dirty_flags;
3087 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3088 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3089 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3090 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3091 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3092 9fa3e853 bellard
#endif
3093 3a7d929e bellard
    }
3094 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3095 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3096 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3097 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3098 f23db169 bellard
       flushed */
3099 f23db169 bellard
    if (dirty_flags == 0xff)
3100 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3101 9fa3e853 bellard
}
3102 9fa3e853 bellard
3103 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3104 9fa3e853 bellard
    NULL, /* never used */
3105 9fa3e853 bellard
    NULL, /* never used */
3106 9fa3e853 bellard
    NULL, /* never used */
3107 9fa3e853 bellard
};
3108 9fa3e853 bellard
3109 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3110 1ccde1cb bellard
    notdirty_mem_writeb,
3111 1ccde1cb bellard
    notdirty_mem_writew,
3112 1ccde1cb bellard
    notdirty_mem_writel,
3113 1ccde1cb bellard
};
3114 1ccde1cb bellard
3115 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3116 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3117 0f459d16 pbrook
{
3118 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3119 06d55cc1 aliguori
    target_ulong pc, cs_base;
3120 06d55cc1 aliguori
    TranslationBlock *tb;
3121 0f459d16 pbrook
    target_ulong vaddr;
3122 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3123 06d55cc1 aliguori
    int cpu_flags;
3124 0f459d16 pbrook
3125 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3126 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3127 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3128 06d55cc1 aliguori
         * current instruction. */
3129 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3130 06d55cc1 aliguori
        return;
3131 06d55cc1 aliguori
    }
3132 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3133 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3134 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3135 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3136 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3137 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3138 6e140f28 aliguori
                env->watchpoint_hit = wp;
3139 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3140 6e140f28 aliguori
                if (!tb) {
3141 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3142 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3143 6e140f28 aliguori
                }
3144 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3145 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3146 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3147 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3148 6e140f28 aliguori
                } else {
3149 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3150 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3151 6e140f28 aliguori
                }
3152 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3153 06d55cc1 aliguori
            }
3154 6e140f28 aliguori
        } else {
3155 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3156 0f459d16 pbrook
        }
3157 0f459d16 pbrook
    }
3158 0f459d16 pbrook
}
3159 0f459d16 pbrook
3160 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3161 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3162 6658ffb8 pbrook
   phys routines.  */
3163 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3164 6658ffb8 pbrook
{
3165 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3166 6658ffb8 pbrook
    return ldub_phys(addr);
3167 6658ffb8 pbrook
}
3168 6658ffb8 pbrook
3169 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3170 6658ffb8 pbrook
{
3171 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3172 6658ffb8 pbrook
    return lduw_phys(addr);
3173 6658ffb8 pbrook
}
3174 6658ffb8 pbrook
3175 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3176 6658ffb8 pbrook
{
3177 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3178 6658ffb8 pbrook
    return ldl_phys(addr);
3179 6658ffb8 pbrook
}
3180 6658ffb8 pbrook
3181 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3182 6658ffb8 pbrook
                             uint32_t val)
3183 6658ffb8 pbrook
{
3184 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3185 6658ffb8 pbrook
    stb_phys(addr, val);
3186 6658ffb8 pbrook
}
3187 6658ffb8 pbrook
3188 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3189 6658ffb8 pbrook
                             uint32_t val)
3190 6658ffb8 pbrook
{
3191 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3192 6658ffb8 pbrook
    stw_phys(addr, val);
3193 6658ffb8 pbrook
}
3194 6658ffb8 pbrook
3195 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3196 6658ffb8 pbrook
                             uint32_t val)
3197 6658ffb8 pbrook
{
3198 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3199 6658ffb8 pbrook
    stl_phys(addr, val);
3200 6658ffb8 pbrook
}
3201 6658ffb8 pbrook
3202 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3203 6658ffb8 pbrook
    watch_mem_readb,
3204 6658ffb8 pbrook
    watch_mem_readw,
3205 6658ffb8 pbrook
    watch_mem_readl,
3206 6658ffb8 pbrook
};
3207 6658ffb8 pbrook
3208 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3209 6658ffb8 pbrook
    watch_mem_writeb,
3210 6658ffb8 pbrook
    watch_mem_writew,
3211 6658ffb8 pbrook
    watch_mem_writel,
3212 6658ffb8 pbrook
};
3213 6658ffb8 pbrook
3214 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3215 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3216 f6405247 Richard Henderson
                                        unsigned int len)
3217 db7b5426 blueswir1
{
3218 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3219 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3220 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3221 db7b5426 blueswir1
           mmio, len, addr, idx);
3222 db7b5426 blueswir1
#endif
3223 db7b5426 blueswir1
3224 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3225 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3226 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3227 db7b5426 blueswir1
}
3228 db7b5426 blueswir1
3229 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3230 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3231 db7b5426 blueswir1
{
3232 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3233 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3234 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3235 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3236 db7b5426 blueswir1
#endif
3237 f6405247 Richard Henderson
3238 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3239 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3240 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3241 db7b5426 blueswir1
}
3242 db7b5426 blueswir1
3243 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3244 db7b5426 blueswir1
{
3245 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3246 db7b5426 blueswir1
}
3247 db7b5426 blueswir1
3248 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3249 db7b5426 blueswir1
                            uint32_t value)
3250 db7b5426 blueswir1
{
3251 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3252 db7b5426 blueswir1
}
3253 db7b5426 blueswir1
3254 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3255 db7b5426 blueswir1
{
3256 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3257 db7b5426 blueswir1
}
3258 db7b5426 blueswir1
3259 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3260 db7b5426 blueswir1
                            uint32_t value)
3261 db7b5426 blueswir1
{
3262 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3263 db7b5426 blueswir1
}
3264 db7b5426 blueswir1
3265 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3266 db7b5426 blueswir1
{
3267 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3268 db7b5426 blueswir1
}
3269 db7b5426 blueswir1
3270 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3271 f6405247 Richard Henderson
                            uint32_t value)
3272 db7b5426 blueswir1
{
3273 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3274 db7b5426 blueswir1
}
3275 db7b5426 blueswir1
3276 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3277 db7b5426 blueswir1
    &subpage_readb,
3278 db7b5426 blueswir1
    &subpage_readw,
3279 db7b5426 blueswir1
    &subpage_readl,
3280 db7b5426 blueswir1
};
3281 db7b5426 blueswir1
3282 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3283 db7b5426 blueswir1
    &subpage_writeb,
3284 db7b5426 blueswir1
    &subpage_writew,
3285 db7b5426 blueswir1
    &subpage_writel,
3286 db7b5426 blueswir1
};
3287 db7b5426 blueswir1
3288 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3289 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3290 db7b5426 blueswir1
{
3291 db7b5426 blueswir1
    int idx, eidx;
3292 db7b5426 blueswir1
3293 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3294 db7b5426 blueswir1
        return -1;
3295 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3296 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3297 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3298 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3299 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3300 db7b5426 blueswir1
#endif
3301 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3302 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3303 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3304 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3305 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3306 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3307 db7b5426 blueswir1
    }
3308 db7b5426 blueswir1
3309 db7b5426 blueswir1
    return 0;
3310 db7b5426 blueswir1
}
3311 db7b5426 blueswir1
3312 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3313 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3314 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3315 db7b5426 blueswir1
{
3316 c227f099 Anthony Liguori
    subpage_t *mmio;
3317 db7b5426 blueswir1
    int subpage_memory;
3318 db7b5426 blueswir1
3319 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3320 1eec614b aliguori
3321 1eec614b aliguori
    mmio->base = base;
3322 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3323 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3324 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3325 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3326 db7b5426 blueswir1
#endif
3327 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3328 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3329 db7b5426 blueswir1
3330 db7b5426 blueswir1
    return mmio;
3331 db7b5426 blueswir1
}
3332 db7b5426 blueswir1
3333 88715657 aliguori
static int get_free_io_mem_idx(void)
3334 88715657 aliguori
{
3335 88715657 aliguori
    int i;
3336 88715657 aliguori
3337 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3338 88715657 aliguori
        if (!io_mem_used[i]) {
3339 88715657 aliguori
            io_mem_used[i] = 1;
3340 88715657 aliguori
            return i;
3341 88715657 aliguori
        }
3342 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3343 88715657 aliguori
    return -1;
3344 88715657 aliguori
}
3345 88715657 aliguori
3346 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3347 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3348 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3349 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3350 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3351 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3352 4254fab8 blueswir1
   returned if error. */
3353 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3354 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3355 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3356 1eed09cb Avi Kivity
                                        void *opaque)
3357 33417e70 bellard
{
3358 3cab721d Richard Henderson
    int i;
3359 3cab721d Richard Henderson
3360 33417e70 bellard
    if (io_index <= 0) {
3361 88715657 aliguori
        io_index = get_free_io_mem_idx();
3362 88715657 aliguori
        if (io_index == -1)
3363 88715657 aliguori
            return io_index;
3364 33417e70 bellard
    } else {
3365 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3366 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3367 33417e70 bellard
            return -1;
3368 33417e70 bellard
    }
3369 b5ff1b31 bellard
3370 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3371 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3372 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3373 3cab721d Richard Henderson
    }
3374 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3375 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3376 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3377 3cab721d Richard Henderson
    }
3378 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3379 f6405247 Richard Henderson
3380 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3381 33417e70 bellard
}
3382 61382a50 bellard
3383 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3384 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3385 1eed09cb Avi Kivity
                           void *opaque)
3386 1eed09cb Avi Kivity
{
3387 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3388 1eed09cb Avi Kivity
}
3389 1eed09cb Avi Kivity
3390 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3391 88715657 aliguori
{
3392 88715657 aliguori
    int i;
3393 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3394 88715657 aliguori
3395 88715657 aliguori
    for (i=0;i < 3; i++) {
3396 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3397 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3398 88715657 aliguori
    }
3399 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3400 88715657 aliguori
    io_mem_used[io_index] = 0;
3401 88715657 aliguori
}
3402 88715657 aliguori
3403 e9179ce1 Avi Kivity
static void io_mem_init(void)
3404 e9179ce1 Avi Kivity
{
3405 e9179ce1 Avi Kivity
    int i;
3406 e9179ce1 Avi Kivity
3407 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3408 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3409 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3410 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3411 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3412 e9179ce1 Avi Kivity
3413 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3414 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3415 e9179ce1 Avi Kivity
}
3416 e9179ce1 Avi Kivity
3417 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3418 e2eef170 pbrook
3419 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3420 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3421 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3422 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3423 13eb76e0 bellard
{
3424 13eb76e0 bellard
    int l, flags;
3425 13eb76e0 bellard
    target_ulong page;
3426 53a5960a pbrook
    void * p;
3427 13eb76e0 bellard
3428 13eb76e0 bellard
    while (len > 0) {
3429 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3430 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3431 13eb76e0 bellard
        if (l > len)
3432 13eb76e0 bellard
            l = len;
3433 13eb76e0 bellard
        flags = page_get_flags(page);
3434 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3435 a68fe89c Paul Brook
            return -1;
3436 13eb76e0 bellard
        if (is_write) {
3437 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3438 a68fe89c Paul Brook
                return -1;
3439 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3440 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3441 a68fe89c Paul Brook
                return -1;
3442 72fb7daa aurel32
            memcpy(p, buf, l);
3443 72fb7daa aurel32
            unlock_user(p, addr, l);
3444 13eb76e0 bellard
        } else {
3445 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3446 a68fe89c Paul Brook
                return -1;
3447 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3448 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3449 a68fe89c Paul Brook
                return -1;
3450 72fb7daa aurel32
            memcpy(buf, p, l);
3451 5b257578 aurel32
            unlock_user(p, addr, 0);
3452 13eb76e0 bellard
        }
3453 13eb76e0 bellard
        len -= l;
3454 13eb76e0 bellard
        buf += l;
3455 13eb76e0 bellard
        addr += l;
3456 13eb76e0 bellard
    }
3457 a68fe89c Paul Brook
    return 0;
3458 13eb76e0 bellard
}
3459 8df1cd07 bellard
3460 13eb76e0 bellard
#else
3461 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3462 13eb76e0 bellard
                            int len, int is_write)
3463 13eb76e0 bellard
{
3464 13eb76e0 bellard
    int l, io_index;
3465 13eb76e0 bellard
    uint8_t *ptr;
3466 13eb76e0 bellard
    uint32_t val;
3467 c227f099 Anthony Liguori
    target_phys_addr_t page;
3468 2e12669a bellard
    unsigned long pd;
3469 92e873b9 bellard
    PhysPageDesc *p;
3470 3b46e624 ths
3471 13eb76e0 bellard
    while (len > 0) {
3472 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3473 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3474 13eb76e0 bellard
        if (l > len)
3475 13eb76e0 bellard
            l = len;
3476 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3477 13eb76e0 bellard
        if (!p) {
3478 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3479 13eb76e0 bellard
        } else {
3480 13eb76e0 bellard
            pd = p->phys_offset;
3481 13eb76e0 bellard
        }
3482 3b46e624 ths
3483 13eb76e0 bellard
        if (is_write) {
3484 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3485 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3486 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3487 8da3ff18 pbrook
                if (p)
3488 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3489 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3490 6a00d601 bellard
                   potential bugs */
3491 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3492 1c213d19 bellard
                    /* 32 bit write access */
3493 c27004ec bellard
                    val = ldl_p(buf);
3494 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3495 13eb76e0 bellard
                    l = 4;
3496 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3497 1c213d19 bellard
                    /* 16 bit write access */
3498 c27004ec bellard
                    val = lduw_p(buf);
3499 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3500 13eb76e0 bellard
                    l = 2;
3501 13eb76e0 bellard
                } else {
3502 1c213d19 bellard
                    /* 8 bit write access */
3503 c27004ec bellard
                    val = ldub_p(buf);
3504 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3505 13eb76e0 bellard
                    l = 1;
3506 13eb76e0 bellard
                }
3507 13eb76e0 bellard
            } else {
3508 b448f2f3 bellard
                unsigned long addr1;
3509 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3510 13eb76e0 bellard
                /* RAM case */
3511 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3512 13eb76e0 bellard
                memcpy(ptr, buf, l);
3513 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3514 3a7d929e bellard
                    /* invalidate code */
3515 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3516 3a7d929e bellard
                    /* set dirty bit */
3517 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3518 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3519 3a7d929e bellard
                }
3520 13eb76e0 bellard
            }
3521 13eb76e0 bellard
        } else {
3522 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3523 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3524 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3525 13eb76e0 bellard
                /* I/O case */
3526 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527 8da3ff18 pbrook
                if (p)
3528 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3529 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3530 13eb76e0 bellard
                    /* 32 bit read access */
3531 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3532 c27004ec bellard
                    stl_p(buf, val);
3533 13eb76e0 bellard
                    l = 4;
3534 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3535 13eb76e0 bellard
                    /* 16 bit read access */
3536 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3537 c27004ec bellard
                    stw_p(buf, val);
3538 13eb76e0 bellard
                    l = 2;
3539 13eb76e0 bellard
                } else {
3540 1c213d19 bellard
                    /* 8 bit read access */
3541 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3542 c27004ec bellard
                    stb_p(buf, val);
3543 13eb76e0 bellard
                    l = 1;
3544 13eb76e0 bellard
                }
3545 13eb76e0 bellard
            } else {
3546 13eb76e0 bellard
                /* RAM case */
3547 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3548 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3549 13eb76e0 bellard
                memcpy(buf, ptr, l);
3550 13eb76e0 bellard
            }
3551 13eb76e0 bellard
        }
3552 13eb76e0 bellard
        len -= l;
3553 13eb76e0 bellard
        buf += l;
3554 13eb76e0 bellard
        addr += l;
3555 13eb76e0 bellard
    }
3556 13eb76e0 bellard
}
3557 8df1cd07 bellard
3558 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3559 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3560 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3561 d0ecd2aa bellard
{
3562 d0ecd2aa bellard
    int l;
3563 d0ecd2aa bellard
    uint8_t *ptr;
3564 c227f099 Anthony Liguori
    target_phys_addr_t page;
3565 d0ecd2aa bellard
    unsigned long pd;
3566 d0ecd2aa bellard
    PhysPageDesc *p;
3567 3b46e624 ths
3568 d0ecd2aa bellard
    while (len > 0) {
3569 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3570 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3571 d0ecd2aa bellard
        if (l > len)
3572 d0ecd2aa bellard
            l = len;
3573 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3574 d0ecd2aa bellard
        if (!p) {
3575 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3576 d0ecd2aa bellard
        } else {
3577 d0ecd2aa bellard
            pd = p->phys_offset;
3578 d0ecd2aa bellard
        }
3579 3b46e624 ths
3580 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3581 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3582 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3583 d0ecd2aa bellard
            /* do nothing */
3584 d0ecd2aa bellard
        } else {
3585 d0ecd2aa bellard
            unsigned long addr1;
3586 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3587 d0ecd2aa bellard
            /* ROM/RAM case */
3588 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3589 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3590 d0ecd2aa bellard
        }
3591 d0ecd2aa bellard
        len -= l;
3592 d0ecd2aa bellard
        buf += l;
3593 d0ecd2aa bellard
        addr += l;
3594 d0ecd2aa bellard
    }
3595 d0ecd2aa bellard
}
3596 d0ecd2aa bellard
3597 6d16c2f8 aliguori
typedef struct {
3598 6d16c2f8 aliguori
    void *buffer;
3599 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3600 c227f099 Anthony Liguori
    target_phys_addr_t len;
3601 6d16c2f8 aliguori
} BounceBuffer;
3602 6d16c2f8 aliguori
3603 6d16c2f8 aliguori
static BounceBuffer bounce;
3604 6d16c2f8 aliguori
3605 ba223c29 aliguori
typedef struct MapClient {
3606 ba223c29 aliguori
    void *opaque;
3607 ba223c29 aliguori
    void (*callback)(void *opaque);
3608 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3609 ba223c29 aliguori
} MapClient;
3610 ba223c29 aliguori
3611 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3612 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3613 ba223c29 aliguori
3614 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3615 ba223c29 aliguori
{
3616 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3617 ba223c29 aliguori
3618 ba223c29 aliguori
    client->opaque = opaque;
3619 ba223c29 aliguori
    client->callback = callback;
3620 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3621 ba223c29 aliguori
    return client;
3622 ba223c29 aliguori
}
3623 ba223c29 aliguori
3624 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3625 ba223c29 aliguori
{
3626 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3627 ba223c29 aliguori
3628 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3629 34d5e948 Isaku Yamahata
    qemu_free(client);
3630 ba223c29 aliguori
}
3631 ba223c29 aliguori
3632 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3633 ba223c29 aliguori
{
3634 ba223c29 aliguori
    MapClient *client;
3635 ba223c29 aliguori
3636 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3637 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3638 ba223c29 aliguori
        client->callback(client->opaque);
3639 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3640 ba223c29 aliguori
    }
3641 ba223c29 aliguori
}
3642 ba223c29 aliguori
3643 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3644 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3645 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3646 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3647 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3648 ba223c29 aliguori
 * likely to succeed.
3649 6d16c2f8 aliguori
 */
3650 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3651 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3652 6d16c2f8 aliguori
                              int is_write)
3653 6d16c2f8 aliguori
{
3654 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3655 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3656 6d16c2f8 aliguori
    int l;
3657 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3658 6d16c2f8 aliguori
    uint8_t *ptr;
3659 c227f099 Anthony Liguori
    target_phys_addr_t page;
3660 6d16c2f8 aliguori
    unsigned long pd;
3661 6d16c2f8 aliguori
    PhysPageDesc *p;
3662 6d16c2f8 aliguori
    unsigned long addr1;
3663 6d16c2f8 aliguori
3664 6d16c2f8 aliguori
    while (len > 0) {
3665 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3666 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3667 6d16c2f8 aliguori
        if (l > len)
3668 6d16c2f8 aliguori
            l = len;
3669 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3670 6d16c2f8 aliguori
        if (!p) {
3671 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3672 6d16c2f8 aliguori
        } else {
3673 6d16c2f8 aliguori
            pd = p->phys_offset;
3674 6d16c2f8 aliguori
        }
3675 6d16c2f8 aliguori
3676 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3677 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3678 6d16c2f8 aliguori
                break;
3679 6d16c2f8 aliguori
            }
3680 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3681 6d16c2f8 aliguori
            bounce.addr = addr;
3682 6d16c2f8 aliguori
            bounce.len = l;
3683 6d16c2f8 aliguori
            if (!is_write) {
3684 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3685 6d16c2f8 aliguori
            }
3686 6d16c2f8 aliguori
            ptr = bounce.buffer;
3687 6d16c2f8 aliguori
        } else {
3688 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3689 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3690 6d16c2f8 aliguori
        }
3691 6d16c2f8 aliguori
        if (!done) {
3692 6d16c2f8 aliguori
            ret = ptr;
3693 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3694 6d16c2f8 aliguori
            break;
3695 6d16c2f8 aliguori
        }
3696 6d16c2f8 aliguori
3697 6d16c2f8 aliguori
        len -= l;
3698 6d16c2f8 aliguori
        addr += l;
3699 6d16c2f8 aliguori
        done += l;
3700 6d16c2f8 aliguori
    }
3701 6d16c2f8 aliguori
    *plen = done;
3702 6d16c2f8 aliguori
    return ret;
3703 6d16c2f8 aliguori
}
3704 6d16c2f8 aliguori
3705 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3706 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3707 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3708 6d16c2f8 aliguori
 */
3709 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3710 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3711 6d16c2f8 aliguori
{
3712 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3713 6d16c2f8 aliguori
        if (is_write) {
3714 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3715 6d16c2f8 aliguori
            while (access_len) {
3716 6d16c2f8 aliguori
                unsigned l;
3717 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3718 6d16c2f8 aliguori
                if (l > access_len)
3719 6d16c2f8 aliguori
                    l = access_len;
3720 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3721 6d16c2f8 aliguori
                    /* invalidate code */
3722 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3723 6d16c2f8 aliguori
                    /* set dirty bit */
3724 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3725 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3726 6d16c2f8 aliguori
                }
3727 6d16c2f8 aliguori
                addr1 += l;
3728 6d16c2f8 aliguori
                access_len -= l;
3729 6d16c2f8 aliguori
            }
3730 6d16c2f8 aliguori
        }
3731 6d16c2f8 aliguori
        return;
3732 6d16c2f8 aliguori
    }
3733 6d16c2f8 aliguori
    if (is_write) {
3734 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3735 6d16c2f8 aliguori
    }
3736 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3737 6d16c2f8 aliguori
    bounce.buffer = NULL;
3738 ba223c29 aliguori
    cpu_notify_map_clients();
3739 6d16c2f8 aliguori
}
3740 d0ecd2aa bellard
3741 8df1cd07 bellard
/* warning: addr must be aligned */
3742 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3743 8df1cd07 bellard
{
3744 8df1cd07 bellard
    int io_index;
3745 8df1cd07 bellard
    uint8_t *ptr;
3746 8df1cd07 bellard
    uint32_t val;
3747 8df1cd07 bellard
    unsigned long pd;
3748 8df1cd07 bellard
    PhysPageDesc *p;
3749 8df1cd07 bellard
3750 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3751 8df1cd07 bellard
    if (!p) {
3752 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3753 8df1cd07 bellard
    } else {
3754 8df1cd07 bellard
        pd = p->phys_offset;
3755 8df1cd07 bellard
    }
3756 3b46e624 ths
3757 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3758 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3759 8df1cd07 bellard
        /* I/O case */
3760 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3761 8da3ff18 pbrook
        if (p)
3762 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3763 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3764 8df1cd07 bellard
    } else {
3765 8df1cd07 bellard
        /* RAM case */
3766 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3767 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3768 8df1cd07 bellard
        val = ldl_p(ptr);
3769 8df1cd07 bellard
    }
3770 8df1cd07 bellard
    return val;
3771 8df1cd07 bellard
}
3772 8df1cd07 bellard
3773 84b7b8e7 bellard
/* warning: addr must be aligned */
3774 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3775 84b7b8e7 bellard
{
3776 84b7b8e7 bellard
    int io_index;
3777 84b7b8e7 bellard
    uint8_t *ptr;
3778 84b7b8e7 bellard
    uint64_t val;
3779 84b7b8e7 bellard
    unsigned long pd;
3780 84b7b8e7 bellard
    PhysPageDesc *p;
3781 84b7b8e7 bellard
3782 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3783 84b7b8e7 bellard
    if (!p) {
3784 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3785 84b7b8e7 bellard
    } else {
3786 84b7b8e7 bellard
        pd = p->phys_offset;
3787 84b7b8e7 bellard
    }
3788 3b46e624 ths
3789 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3790 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3791 84b7b8e7 bellard
        /* I/O case */
3792 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3793 8da3ff18 pbrook
        if (p)
3794 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3795 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3796 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3797 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3798 84b7b8e7 bellard
#else
3799 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3800 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3801 84b7b8e7 bellard
#endif
3802 84b7b8e7 bellard
    } else {
3803 84b7b8e7 bellard
        /* RAM case */
3804 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3805 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3806 84b7b8e7 bellard
        val = ldq_p(ptr);
3807 84b7b8e7 bellard
    }
3808 84b7b8e7 bellard
    return val;
3809 84b7b8e7 bellard
}
3810 84b7b8e7 bellard
3811 aab33094 bellard
/* XXX: optimize */
3812 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3813 aab33094 bellard
{
3814 aab33094 bellard
    uint8_t val;
3815 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3816 aab33094 bellard
    return val;
3817 aab33094 bellard
}
3818 aab33094 bellard
3819 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3820 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3821 aab33094 bellard
{
3822 733f0b02 Michael S. Tsirkin
    int io_index;
3823 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3824 733f0b02 Michael S. Tsirkin
    uint64_t val;
3825 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3826 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3827 733f0b02 Michael S. Tsirkin
3828 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3829 733f0b02 Michael S. Tsirkin
    if (!p) {
3830 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3831 733f0b02 Michael S. Tsirkin
    } else {
3832 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3833 733f0b02 Michael S. Tsirkin
    }
3834 733f0b02 Michael S. Tsirkin
3835 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3836 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
3837 733f0b02 Michael S. Tsirkin
        /* I/O case */
3838 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3839 733f0b02 Michael S. Tsirkin
        if (p)
3840 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3841 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3842 733f0b02 Michael S. Tsirkin
    } else {
3843 733f0b02 Michael S. Tsirkin
        /* RAM case */
3844 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3845 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3846 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
3847 733f0b02 Michael S. Tsirkin
    }
3848 733f0b02 Michael S. Tsirkin
    return val;
3849 aab33094 bellard
}
3850 aab33094 bellard
3851 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3852 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3853 8df1cd07 bellard
   bits are used to track modified PTEs */
3854 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3855 8df1cd07 bellard
{
3856 8df1cd07 bellard
    int io_index;
3857 8df1cd07 bellard
    uint8_t *ptr;
3858 8df1cd07 bellard
    unsigned long pd;
3859 8df1cd07 bellard
    PhysPageDesc *p;
3860 8df1cd07 bellard
3861 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3862 8df1cd07 bellard
    if (!p) {
3863 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3864 8df1cd07 bellard
    } else {
3865 8df1cd07 bellard
        pd = p->phys_offset;
3866 8df1cd07 bellard
    }
3867 3b46e624 ths
3868 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3869 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3870 8da3ff18 pbrook
        if (p)
3871 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3872 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3873 8df1cd07 bellard
    } else {
3874 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3875 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3876 8df1cd07 bellard
        stl_p(ptr, val);
3877 74576198 aliguori
3878 74576198 aliguori
        if (unlikely(in_migration)) {
3879 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3880 74576198 aliguori
                /* invalidate code */
3881 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3882 74576198 aliguori
                /* set dirty bit */
3883 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3884 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3885 74576198 aliguori
            }
3886 74576198 aliguori
        }
3887 8df1cd07 bellard
    }
3888 8df1cd07 bellard
}
3889 8df1cd07 bellard
3890 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3891 bc98a7ef j_mayer
{
3892 bc98a7ef j_mayer
    int io_index;
3893 bc98a7ef j_mayer
    uint8_t *ptr;
3894 bc98a7ef j_mayer
    unsigned long pd;
3895 bc98a7ef j_mayer
    PhysPageDesc *p;
3896 bc98a7ef j_mayer
3897 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3898 bc98a7ef j_mayer
    if (!p) {
3899 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3900 bc98a7ef j_mayer
    } else {
3901 bc98a7ef j_mayer
        pd = p->phys_offset;
3902 bc98a7ef j_mayer
    }
3903 3b46e624 ths
3904 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3905 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3906 8da3ff18 pbrook
        if (p)
3907 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3908 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3909 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3910 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3911 bc98a7ef j_mayer
#else
3912 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3913 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3914 bc98a7ef j_mayer
#endif
3915 bc98a7ef j_mayer
    } else {
3916 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3917 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3918 bc98a7ef j_mayer
        stq_p(ptr, val);
3919 bc98a7ef j_mayer
    }
3920 bc98a7ef j_mayer
}
3921 bc98a7ef j_mayer
3922 8df1cd07 bellard
/* warning: addr must be aligned */
3923 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3924 8df1cd07 bellard
{
3925 8df1cd07 bellard
    int io_index;
3926 8df1cd07 bellard
    uint8_t *ptr;
3927 8df1cd07 bellard
    unsigned long pd;
3928 8df1cd07 bellard
    PhysPageDesc *p;
3929 8df1cd07 bellard
3930 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3931 8df1cd07 bellard
    if (!p) {
3932 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3933 8df1cd07 bellard
    } else {
3934 8df1cd07 bellard
        pd = p->phys_offset;
3935 8df1cd07 bellard
    }
3936 3b46e624 ths
3937 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3938 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3939 8da3ff18 pbrook
        if (p)
3940 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3941 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3942 8df1cd07 bellard
    } else {
3943 8df1cd07 bellard
        unsigned long addr1;
3944 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3945 8df1cd07 bellard
        /* RAM case */
3946 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3947 8df1cd07 bellard
        stl_p(ptr, val);
3948 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3949 3a7d929e bellard
            /* invalidate code */
3950 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3951 3a7d929e bellard
            /* set dirty bit */
3952 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3953 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3954 3a7d929e bellard
        }
3955 8df1cd07 bellard
    }
3956 8df1cd07 bellard
}
3957 8df1cd07 bellard
3958 aab33094 bellard
/* XXX: optimize */
3959 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3960 aab33094 bellard
{
3961 aab33094 bellard
    uint8_t v = val;
3962 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3963 aab33094 bellard
}
3964 aab33094 bellard
3965 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3966 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3967 aab33094 bellard
{
3968 733f0b02 Michael S. Tsirkin
    int io_index;
3969 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3970 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3971 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3972 733f0b02 Michael S. Tsirkin
3973 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3974 733f0b02 Michael S. Tsirkin
    if (!p) {
3975 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3976 733f0b02 Michael S. Tsirkin
    } else {
3977 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3978 733f0b02 Michael S. Tsirkin
    }
3979 733f0b02 Michael S. Tsirkin
3980 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3981 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3982 733f0b02 Michael S. Tsirkin
        if (p)
3983 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3984 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3985 733f0b02 Michael S. Tsirkin
    } else {
3986 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
3987 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3988 733f0b02 Michael S. Tsirkin
        /* RAM case */
3989 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
3990 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
3991 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
3992 733f0b02 Michael S. Tsirkin
            /* invalidate code */
3993 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3994 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
3995 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
3996 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
3997 733f0b02 Michael S. Tsirkin
        }
3998 733f0b02 Michael S. Tsirkin
    }
3999 aab33094 bellard
}
4000 aab33094 bellard
4001 aab33094 bellard
/* XXX: optimize */
4002 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4003 aab33094 bellard
{
4004 aab33094 bellard
    val = tswap64(val);
4005 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4006 aab33094 bellard
}
4007 aab33094 bellard
4008 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4009 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4010 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4011 13eb76e0 bellard
{
4012 13eb76e0 bellard
    int l;
4013 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4014 9b3c35e0 j_mayer
    target_ulong page;
4015 13eb76e0 bellard
4016 13eb76e0 bellard
    while (len > 0) {
4017 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4018 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4019 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4020 13eb76e0 bellard
        if (phys_addr == -1)
4021 13eb76e0 bellard
            return -1;
4022 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4023 13eb76e0 bellard
        if (l > len)
4024 13eb76e0 bellard
            l = len;
4025 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4026 5e2972fd aliguori
        if (is_write)
4027 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4028 5e2972fd aliguori
        else
4029 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4030 13eb76e0 bellard
        len -= l;
4031 13eb76e0 bellard
        buf += l;
4032 13eb76e0 bellard
        addr += l;
4033 13eb76e0 bellard
    }
4034 13eb76e0 bellard
    return 0;
4035 13eb76e0 bellard
}
4036 a68fe89c Paul Brook
#endif
4037 13eb76e0 bellard
4038 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4039 2e70f6ef pbrook
   must be at the end of the TB */
4040 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4041 2e70f6ef pbrook
{
4042 2e70f6ef pbrook
    TranslationBlock *tb;
4043 2e70f6ef pbrook
    uint32_t n, cflags;
4044 2e70f6ef pbrook
    target_ulong pc, cs_base;
4045 2e70f6ef pbrook
    uint64_t flags;
4046 2e70f6ef pbrook
4047 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4048 2e70f6ef pbrook
    if (!tb) {
4049 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4050 2e70f6ef pbrook
                  retaddr);
4051 2e70f6ef pbrook
    }
4052 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4053 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4054 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4055 bf20dc07 ths
       occurred.  */
4056 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4057 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4058 2e70f6ef pbrook
    n++;
4059 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4060 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4061 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4062 2e70f6ef pbrook
       branch.  */
4063 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4064 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4065 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4066 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4067 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4068 2e70f6ef pbrook
    }
4069 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4070 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4071 2e70f6ef pbrook
            && n > 1) {
4072 2e70f6ef pbrook
        env->pc -= 2;
4073 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4074 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4075 2e70f6ef pbrook
    }
4076 2e70f6ef pbrook
#endif
4077 2e70f6ef pbrook
    /* This should never happen.  */
4078 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4079 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4080 2e70f6ef pbrook
4081 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4082 2e70f6ef pbrook
    pc = tb->pc;
4083 2e70f6ef pbrook
    cs_base = tb->cs_base;
4084 2e70f6ef pbrook
    flags = tb->flags;
4085 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4086 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4087 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4088 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4089 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4090 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4091 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4092 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4093 2e70f6ef pbrook
       second new TB.  */
4094 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4095 2e70f6ef pbrook
}
4096 2e70f6ef pbrook
4097 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4098 b3755a91 Paul Brook
4099 e3db7226 bellard
void dump_exec_info(FILE *f,
4100 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4101 e3db7226 bellard
{
4102 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4103 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4104 e3db7226 bellard
    TranslationBlock *tb;
4105 3b46e624 ths
4106 e3db7226 bellard
    target_code_size = 0;
4107 e3db7226 bellard
    max_target_code_size = 0;
4108 e3db7226 bellard
    cross_page = 0;
4109 e3db7226 bellard
    direct_jmp_count = 0;
4110 e3db7226 bellard
    direct_jmp2_count = 0;
4111 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4112 e3db7226 bellard
        tb = &tbs[i];
4113 e3db7226 bellard
        target_code_size += tb->size;
4114 e3db7226 bellard
        if (tb->size > max_target_code_size)
4115 e3db7226 bellard
            max_target_code_size = tb->size;
4116 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4117 e3db7226 bellard
            cross_page++;
4118 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4119 e3db7226 bellard
            direct_jmp_count++;
4120 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4121 e3db7226 bellard
                direct_jmp2_count++;
4122 e3db7226 bellard
            }
4123 e3db7226 bellard
        }
4124 e3db7226 bellard
    }
4125 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4126 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4127 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
4128 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4129 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4130 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4131 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4132 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4133 e3db7226 bellard
                max_target_code_size);
4134 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4135 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4136 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4137 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4138 5fafdf24 ths
            cross_page,
4139 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4140 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4141 5fafdf24 ths
                direct_jmp_count,
4142 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4143 e3db7226 bellard
                direct_jmp2_count,
4144 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4145 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4146 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4147 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4148 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4149 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4150 e3db7226 bellard
}
4151 e3db7226 bellard
4152 61382a50 bellard
#define MMUSUFFIX _cmmu
4153 61382a50 bellard
#define GETPC() NULL
4154 61382a50 bellard
#define env cpu_single_env
4155 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4156 61382a50 bellard
4157 61382a50 bellard
#define SHIFT 0
4158 61382a50 bellard
#include "softmmu_template.h"
4159 61382a50 bellard
4160 61382a50 bellard
#define SHIFT 1
4161 61382a50 bellard
#include "softmmu_template.h"
4162 61382a50 bellard
4163 61382a50 bellard
#define SHIFT 2
4164 61382a50 bellard
#include "softmmu_template.h"
4165 61382a50 bellard
4166 61382a50 bellard
#define SHIFT 3
4167 61382a50 bellard
#include "softmmu_template.h"
4168 61382a50 bellard
4169 61382a50 bellard
#undef env
4170 61382a50 bellard
4171 61382a50 bellard
#endif