Statistics
| Branch: | Revision:

root / exec.c @ 3329f07b

History | View | Annotate | Download (122 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 cc9e98cb Alex Williamson
#include "hw/qdev.h"
40 74576198 aliguori
#include "osdep.h"
41 7ba1e619 aliguori
#include "kvm.h"
42 29e922b6 Blue Swirl
#include "qemu-timer.h"
43 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
44 53a5960a pbrook
#include <qemu.h>
45 fd052bf6 Riku Voipio
#include <signal.h>
46 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
47 f01576f1 Juergen Lock
#include <sys/param.h>
48 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
49 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
50 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
51 f01576f1 Juergen Lock
#include <sys/time.h>
52 f01576f1 Juergen Lock
#include <sys/proc.h>
53 f01576f1 Juergen Lock
#include <machine/profile.h>
54 f01576f1 Juergen Lock
#define _KERNEL
55 f01576f1 Juergen Lock
#include <sys/user.h>
56 f01576f1 Juergen Lock
#undef _KERNEL
57 f01576f1 Juergen Lock
#undef sigqueue
58 f01576f1 Juergen Lock
#include <libutil.h>
59 f01576f1 Juergen Lock
#endif
60 f01576f1 Juergen Lock
#endif
61 53a5960a pbrook
#endif
62 54936004 bellard
63 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
64 66e85a21 bellard
//#define DEBUG_FLUSH
65 9fa3e853 bellard
//#define DEBUG_TLB
66 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
67 fd6ce8f6 bellard
68 fd6ce8f6 bellard
/* make various TB consistency checks */
69 5fafdf24 ths
//#define DEBUG_TB_CHECK
70 5fafdf24 ths
//#define DEBUG_TLB_CHECK
71 fd6ce8f6 bellard
72 1196be37 ths
//#define DEBUG_IOPORT
73 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
74 1196be37 ths
75 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
76 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
77 99773bd4 pbrook
#undef DEBUG_TB_CHECK
78 99773bd4 pbrook
#endif
79 99773bd4 pbrook
80 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
81 9fa3e853 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 24ab68ac Stefan Weil
static int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 f8e2af11 Stefan Weil
#elif defined(_WIN32)
97 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
98 f8e2af11 Stefan Weil
#define code_gen_section                                \
99 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
100 d03d860b blueswir1
#else
101 d03d860b blueswir1
#define code_gen_section                                \
102 d03d860b blueswir1
    __attribute__((aligned (32)))
103 d03d860b blueswir1
#endif
104 d03d860b blueswir1
105 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
106 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
107 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
108 26a5f13b bellard
/* threshold to flush the translated code buffer */
109 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
110 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
111 fd6ce8f6 bellard
112 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
113 9fa3e853 bellard
int phys_ram_fd;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
117 e2eef170 pbrook
#endif
118 9fa3e853 bellard
119 6a00d601 bellard
CPUState *first_cpu;
120 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
121 6a00d601 bellard
   cpu_exec() */
122 5fafdf24 ths
CPUState *cpu_single_env;
123 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
124 bf20dc07 ths
   1 = Precise instruction counting.
125 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
126 2e70f6ef pbrook
int use_icount = 0;
127 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
128 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
129 2e70f6ef pbrook
int64_t qemu_icount;
130 6a00d601 bellard
131 54936004 bellard
typedef struct PageDesc {
132 92e873b9 bellard
    /* list of TBs intersecting this ram page */
133 fd6ce8f6 bellard
    TranslationBlock *first_tb;
134 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
135 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
136 9fa3e853 bellard
    unsigned int code_write_count;
137 9fa3e853 bellard
    uint8_t *code_bitmap;
138 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
139 9fa3e853 bellard
    unsigned long flags;
140 9fa3e853 bellard
#endif
141 54936004 bellard
} PageDesc;
142 54936004 bellard
143 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
144 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
145 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
146 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
148 41c1b1c9 Paul Brook
#else
149 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
150 41c1b1c9 Paul Brook
#endif
151 bedb69ea j_mayer
#else
152 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
153 bedb69ea j_mayer
#endif
154 54936004 bellard
155 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
156 5cd2c5b6 Richard Henderson
#define L2_BITS 10
157 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
158 54936004 bellard
159 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
161 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
163 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
166 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
167 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
168 5cd2c5b6 Richard Henderson
#else
169 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
170 5cd2c5b6 Richard Henderson
#endif
171 5cd2c5b6 Richard Henderson
172 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
173 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
174 5cd2c5b6 Richard Henderson
#else
175 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
176 5cd2c5b6 Richard Henderson
#endif
177 5cd2c5b6 Richard Henderson
178 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
179 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
180 5cd2c5b6 Richard Henderson
181 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183 5cd2c5b6 Richard Henderson
184 83fb7adf bellard
unsigned long qemu_real_host_page_size;
185 83fb7adf bellard
unsigned long qemu_host_page_bits;
186 83fb7adf bellard
unsigned long qemu_host_page_size;
187 83fb7adf bellard
unsigned long qemu_host_page_mask;
188 54936004 bellard
189 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
190 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
191 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
192 54936004 bellard
193 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
194 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
195 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
196 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
197 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
198 41c1b1c9 Paul Brook
} PhysPageDesc;
199 41c1b1c9 Paul Brook
200 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
201 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
202 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
203 6d9a1304 Paul Brook
204 e2eef170 pbrook
static void io_mem_init(void);
205 e2eef170 pbrook
206 33417e70 bellard
/* io memory support */
207 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
208 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
209 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
210 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
211 6658ffb8 pbrook
static int io_mem_watch;
212 6658ffb8 pbrook
#endif
213 33417e70 bellard
214 34865134 bellard
/* log support */
215 1e8b27ca Juha Riihimรคki
#ifdef WIN32
216 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
217 1e8b27ca Juha Riihimรคki
#else
218 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
219 1e8b27ca Juha Riihimรคki
#endif
220 34865134 bellard
FILE *logfile;
221 34865134 bellard
int loglevel;
222 e735b91c pbrook
static int log_append = 0;
223 34865134 bellard
224 e3db7226 bellard
/* statistics */
225 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
226 e3db7226 bellard
static int tlb_flush_count;
227 b3755a91 Paul Brook
#endif
228 e3db7226 bellard
static int tb_flush_count;
229 e3db7226 bellard
static int tb_phys_invalidate_count;
230 e3db7226 bellard
231 7cb69cae bellard
#ifdef _WIN32
232 7cb69cae bellard
static void map_exec(void *addr, long size)
233 7cb69cae bellard
{
234 7cb69cae bellard
    DWORD old_protect;
235 7cb69cae bellard
    VirtualProtect(addr, size,
236 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
237 7cb69cae bellard
    
238 7cb69cae bellard
}
239 7cb69cae bellard
#else
240 7cb69cae bellard
static void map_exec(void *addr, long size)
241 7cb69cae bellard
{
242 4369415f bellard
    unsigned long start, end, page_size;
243 7cb69cae bellard
    
244 4369415f bellard
    page_size = getpagesize();
245 7cb69cae bellard
    start = (unsigned long)addr;
246 4369415f bellard
    start &= ~(page_size - 1);
247 7cb69cae bellard
    
248 7cb69cae bellard
    end = (unsigned long)addr + size;
249 4369415f bellard
    end += page_size - 1;
250 4369415f bellard
    end &= ~(page_size - 1);
251 7cb69cae bellard
    
252 7cb69cae bellard
    mprotect((void *)start, end - start,
253 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
254 7cb69cae bellard
}
255 7cb69cae bellard
#endif
256 7cb69cae bellard
257 b346ff46 bellard
static void page_init(void)
258 54936004 bellard
{
259 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
260 54936004 bellard
       TARGET_PAGE_SIZE */
261 c2b48b69 aliguori
#ifdef _WIN32
262 c2b48b69 aliguori
    {
263 c2b48b69 aliguori
        SYSTEM_INFO system_info;
264 c2b48b69 aliguori
265 c2b48b69 aliguori
        GetSystemInfo(&system_info);
266 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
267 c2b48b69 aliguori
    }
268 c2b48b69 aliguori
#else
269 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
270 c2b48b69 aliguori
#endif
271 83fb7adf bellard
    if (qemu_host_page_size == 0)
272 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
273 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
274 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
275 83fb7adf bellard
    qemu_host_page_bits = 0;
276 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
277 83fb7adf bellard
        qemu_host_page_bits++;
278 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
279 50a9569b balrog
280 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 50a9569b balrog
    {
282 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
283 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
284 f01576f1 Juergen Lock
        int i, cnt;
285 f01576f1 Juergen Lock
286 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
287 f01576f1 Juergen Lock
        if (freep) {
288 f01576f1 Juergen Lock
            mmap_lock();
289 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
290 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
291 f01576f1 Juergen Lock
292 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
293 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
294 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
295 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296 f01576f1 Juergen Lock
297 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
298 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
299 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 f01576f1 Juergen Lock
                    } else {
301 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 f01576f1 Juergen Lock
                        endaddr = ~0ul;
303 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 f01576f1 Juergen Lock
#endif
305 f01576f1 Juergen Lock
                    }
306 f01576f1 Juergen Lock
                }
307 f01576f1 Juergen Lock
            }
308 f01576f1 Juergen Lock
            free(freep);
309 f01576f1 Juergen Lock
            mmap_unlock();
310 f01576f1 Juergen Lock
        }
311 f01576f1 Juergen Lock
#else
312 50a9569b balrog
        FILE *f;
313 50a9569b balrog
314 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
315 5cd2c5b6 Richard Henderson
316 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
317 50a9569b balrog
        if (f) {
318 5cd2c5b6 Richard Henderson
            mmap_lock();
319 5cd2c5b6 Richard Henderson
320 50a9569b balrog
            do {
321 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
322 5cd2c5b6 Richard Henderson
                int n;
323 5cd2c5b6 Richard Henderson
324 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325 5cd2c5b6 Richard Henderson
326 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
327 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 5cd2c5b6 Richard Henderson
329 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
330 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
331 5cd2c5b6 Richard Henderson
                    } else {
332 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
333 5cd2c5b6 Richard Henderson
                    }
334 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 50a9569b balrog
                }
336 50a9569b balrog
            } while (!feof(f));
337 5cd2c5b6 Richard Henderson
338 50a9569b balrog
            fclose(f);
339 5cd2c5b6 Richard Henderson
            mmap_unlock();
340 50a9569b balrog
        }
341 f01576f1 Juergen Lock
#endif
342 50a9569b balrog
    }
343 50a9569b balrog
#endif
344 54936004 bellard
}
345 54936004 bellard
346 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 54936004 bellard
{
348 41c1b1c9 Paul Brook
    PageDesc *pd;
349 41c1b1c9 Paul Brook
    void **lp;
350 41c1b1c9 Paul Brook
    int i;
351 41c1b1c9 Paul Brook
352 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
353 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
354 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
355 5cd2c5b6 Richard Henderson
    do {                                                \
356 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
357 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
358 5cd2c5b6 Richard Henderson
    } while (0)
359 5cd2c5b6 Richard Henderson
#else
360 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
361 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
362 17e2377a pbrook
#endif
363 434929bf aliguori
364 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
365 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366 5cd2c5b6 Richard Henderson
367 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
368 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 5cd2c5b6 Richard Henderson
        void **p = *lp;
370 5cd2c5b6 Richard Henderson
371 5cd2c5b6 Richard Henderson
        if (p == NULL) {
372 5cd2c5b6 Richard Henderson
            if (!alloc) {
373 5cd2c5b6 Richard Henderson
                return NULL;
374 5cd2c5b6 Richard Henderson
            }
375 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
376 5cd2c5b6 Richard Henderson
            *lp = p;
377 17e2377a pbrook
        }
378 5cd2c5b6 Richard Henderson
379 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
380 5cd2c5b6 Richard Henderson
    }
381 5cd2c5b6 Richard Henderson
382 5cd2c5b6 Richard Henderson
    pd = *lp;
383 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
384 5cd2c5b6 Richard Henderson
        if (!alloc) {
385 5cd2c5b6 Richard Henderson
            return NULL;
386 5cd2c5b6 Richard Henderson
        }
387 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 5cd2c5b6 Richard Henderson
        *lp = pd;
389 54936004 bellard
    }
390 5cd2c5b6 Richard Henderson
391 5cd2c5b6 Richard Henderson
#undef ALLOC
392 5cd2c5b6 Richard Henderson
393 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
394 54936004 bellard
}
395 54936004 bellard
396 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
397 54936004 bellard
{
398 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
399 fd6ce8f6 bellard
}
400 fd6ce8f6 bellard
401 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
402 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
403 92e873b9 bellard
{
404 e3f4e2a4 pbrook
    PhysPageDesc *pd;
405 5cd2c5b6 Richard Henderson
    void **lp;
406 5cd2c5b6 Richard Henderson
    int i;
407 92e873b9 bellard
408 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
409 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
410 108c49b8 bellard
411 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
412 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 5cd2c5b6 Richard Henderson
        void **p = *lp;
414 5cd2c5b6 Richard Henderson
        if (p == NULL) {
415 5cd2c5b6 Richard Henderson
            if (!alloc) {
416 5cd2c5b6 Richard Henderson
                return NULL;
417 5cd2c5b6 Richard Henderson
            }
418 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
419 5cd2c5b6 Richard Henderson
        }
420 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
421 108c49b8 bellard
    }
422 5cd2c5b6 Richard Henderson
423 e3f4e2a4 pbrook
    pd = *lp;
424 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
425 e3f4e2a4 pbrook
        int i;
426 5cd2c5b6 Richard Henderson
427 5cd2c5b6 Richard Henderson
        if (!alloc) {
428 108c49b8 bellard
            return NULL;
429 5cd2c5b6 Richard Henderson
        }
430 5cd2c5b6 Richard Henderson
431 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432 5cd2c5b6 Richard Henderson
433 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
434 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
435 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
436 67c4d23c pbrook
        }
437 92e873b9 bellard
    }
438 5cd2c5b6 Richard Henderson
439 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
440 92e873b9 bellard
}
441 92e873b9 bellard
442 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
443 92e873b9 bellard
{
444 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
445 92e873b9 bellard
}
446 92e873b9 bellard
447 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
448 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
449 3a7d929e bellard
                                    target_ulong vaddr);
450 c8a706fe pbrook
#define mmap_lock() do { } while(0)
451 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
452 9fa3e853 bellard
#endif
453 fd6ce8f6 bellard
454 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 4369415f bellard
456 4369415f bellard
#if defined(CONFIG_USER_ONLY)
457 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
458 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
459 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
460 4369415f bellard
#endif
461 4369415f bellard
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
465 4369415f bellard
#endif
466 4369415f bellard
467 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
468 26a5f13b bellard
{
469 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
470 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
471 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
473 4369415f bellard
#else
474 26a5f13b bellard
    code_gen_buffer_size = tb_size;
475 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
476 4369415f bellard
#if defined(CONFIG_USER_ONLY)
477 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
478 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
479 4369415f bellard
#else
480 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
481 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
482 4369415f bellard
#endif
483 26a5f13b bellard
    }
484 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
485 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
486 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
487 26a5f13b bellard
       the host cpu and OS */
488 26a5f13b bellard
#if defined(__linux__) 
489 26a5f13b bellard
    {
490 26a5f13b bellard
        int flags;
491 141ac468 blueswir1
        void *start = NULL;
492 141ac468 blueswir1
493 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
494 26a5f13b bellard
#if defined(__x86_64__)
495 26a5f13b bellard
        flags |= MAP_32BIT;
496 26a5f13b bellard
        /* Cannot map more than that */
497 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
498 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
499 141ac468 blueswir1
#elif defined(__sparc_v9__)
500 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
501 141ac468 blueswir1
        flags |= MAP_FIXED;
502 141ac468 blueswir1
        start = (void *) 0x60000000UL;
503 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
504 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
505 1cb0661e balrog
#elif defined(__arm__)
506 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
507 1cb0661e balrog
        flags |= MAP_FIXED;
508 1cb0661e balrog
        start = (void *) 0x01000000UL;
509 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
510 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
511 eba0b893 Richard Henderson
#elif defined(__s390x__)
512 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
513 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
514 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
515 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
516 eba0b893 Richard Henderson
        }
517 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
518 26a5f13b bellard
#endif
519 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
520 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
521 26a5f13b bellard
                               flags, -1, 0);
522 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
523 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
524 26a5f13b bellard
            exit(1);
525 26a5f13b bellard
        }
526 26a5f13b bellard
    }
527 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
528 06e67a82 aliguori
    {
529 06e67a82 aliguori
        int flags;
530 06e67a82 aliguori
        void *addr = NULL;
531 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
532 06e67a82 aliguori
#if defined(__x86_64__)
533 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
534 06e67a82 aliguori
         * 0x40000000 is free */
535 06e67a82 aliguori
        flags |= MAP_FIXED;
536 06e67a82 aliguori
        addr = (void *)0x40000000;
537 06e67a82 aliguori
        /* Cannot map more than that */
538 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
539 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
540 06e67a82 aliguori
#endif
541 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
542 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
543 06e67a82 aliguori
                               flags, -1, 0);
544 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
545 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
546 06e67a82 aliguori
            exit(1);
547 06e67a82 aliguori
        }
548 06e67a82 aliguori
    }
549 26a5f13b bellard
#else
550 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
551 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
552 26a5f13b bellard
#endif
553 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
554 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
555 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
556 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
557 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
558 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
559 26a5f13b bellard
}
560 26a5f13b bellard
561 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
562 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
563 26a5f13b bellard
   size. */
564 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
565 26a5f13b bellard
{
566 26a5f13b bellard
    cpu_gen_init();
567 26a5f13b bellard
    code_gen_alloc(tb_size);
568 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
569 4369415f bellard
    page_init();
570 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
571 26a5f13b bellard
    io_mem_init();
572 e2eef170 pbrook
#endif
573 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
575 9002ec79 Richard Henderson
       initialize the prologue now.  */
576 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
577 9002ec79 Richard Henderson
#endif
578 26a5f13b bellard
}
579 26a5f13b bellard
580 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
581 9656f324 pbrook
582 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
583 e7f4eff7 Juan Quintela
{
584 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
585 9656f324 pbrook
586 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
587 3098dba0 aurel32
       version_id is increased. */
588 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
589 9656f324 pbrook
    tlb_flush(env, 1);
590 9656f324 pbrook
591 9656f324 pbrook
    return 0;
592 9656f324 pbrook
}
593 e7f4eff7 Juan Quintela
594 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
595 e7f4eff7 Juan Quintela
    .name = "cpu_common",
596 e7f4eff7 Juan Quintela
    .version_id = 1,
597 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
598 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
599 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
600 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
601 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
602 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
603 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
604 e7f4eff7 Juan Quintela
    }
605 e7f4eff7 Juan Quintela
};
606 9656f324 pbrook
#endif
607 9656f324 pbrook
608 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
609 950f1472 Glauber Costa
{
610 950f1472 Glauber Costa
    CPUState *env = first_cpu;
611 950f1472 Glauber Costa
612 950f1472 Glauber Costa
    while (env) {
613 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
614 950f1472 Glauber Costa
            break;
615 950f1472 Glauber Costa
        env = env->next_cpu;
616 950f1472 Glauber Costa
    }
617 950f1472 Glauber Costa
618 950f1472 Glauber Costa
    return env;
619 950f1472 Glauber Costa
}
620 950f1472 Glauber Costa
621 6a00d601 bellard
void cpu_exec_init(CPUState *env)
622 fd6ce8f6 bellard
{
623 6a00d601 bellard
    CPUState **penv;
624 6a00d601 bellard
    int cpu_index;
625 6a00d601 bellard
626 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
627 c2764719 pbrook
    cpu_list_lock();
628 c2764719 pbrook
#endif
629 6a00d601 bellard
    env->next_cpu = NULL;
630 6a00d601 bellard
    penv = &first_cpu;
631 6a00d601 bellard
    cpu_index = 0;
632 6a00d601 bellard
    while (*penv != NULL) {
633 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
634 6a00d601 bellard
        cpu_index++;
635 6a00d601 bellard
    }
636 6a00d601 bellard
    env->cpu_index = cpu_index;
637 268a362c aliguori
    env->numa_node = 0;
638 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
639 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
640 6a00d601 bellard
    *penv = env;
641 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
642 c2764719 pbrook
    cpu_list_unlock();
643 c2764719 pbrook
#endif
644 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
646 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
647 b3c7724c pbrook
                    cpu_save, cpu_load, env);
648 b3c7724c pbrook
#endif
649 fd6ce8f6 bellard
}
650 fd6ce8f6 bellard
651 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
652 9fa3e853 bellard
{
653 9fa3e853 bellard
    if (p->code_bitmap) {
654 59817ccb bellard
        qemu_free(p->code_bitmap);
655 9fa3e853 bellard
        p->code_bitmap = NULL;
656 9fa3e853 bellard
    }
657 9fa3e853 bellard
    p->code_write_count = 0;
658 9fa3e853 bellard
}
659 9fa3e853 bellard
660 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
661 5cd2c5b6 Richard Henderson
662 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
663 fd6ce8f6 bellard
{
664 5cd2c5b6 Richard Henderson
    int i;
665 fd6ce8f6 bellard
666 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
667 5cd2c5b6 Richard Henderson
        return;
668 5cd2c5b6 Richard Henderson
    }
669 5cd2c5b6 Richard Henderson
    if (level == 0) {
670 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
671 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
672 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
673 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
674 fd6ce8f6 bellard
        }
675 5cd2c5b6 Richard Henderson
    } else {
676 5cd2c5b6 Richard Henderson
        void **pp = *lp;
677 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
678 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
679 5cd2c5b6 Richard Henderson
        }
680 5cd2c5b6 Richard Henderson
    }
681 5cd2c5b6 Richard Henderson
}
682 5cd2c5b6 Richard Henderson
683 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
684 5cd2c5b6 Richard Henderson
{
685 5cd2c5b6 Richard Henderson
    int i;
686 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
687 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
688 fd6ce8f6 bellard
    }
689 fd6ce8f6 bellard
}
690 fd6ce8f6 bellard
691 fd6ce8f6 bellard
/* flush all the translation blocks */
692 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
693 6a00d601 bellard
void tb_flush(CPUState *env1)
694 fd6ce8f6 bellard
{
695 6a00d601 bellard
    CPUState *env;
696 0124311e bellard
#if defined(DEBUG_FLUSH)
697 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
698 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
699 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
700 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
701 fd6ce8f6 bellard
#endif
702 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
703 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
704 a208e54a pbrook
705 fd6ce8f6 bellard
    nb_tbs = 0;
706 3b46e624 ths
707 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
708 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
709 6a00d601 bellard
    }
710 9fa3e853 bellard
711 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
712 fd6ce8f6 bellard
    page_flush_tb();
713 9fa3e853 bellard
714 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
715 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
716 d4e8164f bellard
       expensive */
717 e3db7226 bellard
    tb_flush_count++;
718 fd6ce8f6 bellard
}
719 fd6ce8f6 bellard
720 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
721 fd6ce8f6 bellard
722 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
723 fd6ce8f6 bellard
{
724 fd6ce8f6 bellard
    TranslationBlock *tb;
725 fd6ce8f6 bellard
    int i;
726 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
727 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
728 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
729 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
730 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
731 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
732 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
733 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
734 fd6ce8f6 bellard
            }
735 fd6ce8f6 bellard
        }
736 fd6ce8f6 bellard
    }
737 fd6ce8f6 bellard
}
738 fd6ce8f6 bellard
739 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
740 fd6ce8f6 bellard
static void tb_page_check(void)
741 fd6ce8f6 bellard
{
742 fd6ce8f6 bellard
    TranslationBlock *tb;
743 fd6ce8f6 bellard
    int i, flags1, flags2;
744 3b46e624 ths
745 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
746 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
747 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
748 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
749 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
750 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
751 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
752 fd6ce8f6 bellard
            }
753 fd6ce8f6 bellard
        }
754 fd6ce8f6 bellard
    }
755 fd6ce8f6 bellard
}
756 fd6ce8f6 bellard
757 fd6ce8f6 bellard
#endif
758 fd6ce8f6 bellard
759 fd6ce8f6 bellard
/* invalidate one TB */
760 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
761 fd6ce8f6 bellard
                             int next_offset)
762 fd6ce8f6 bellard
{
763 fd6ce8f6 bellard
    TranslationBlock *tb1;
764 fd6ce8f6 bellard
    for(;;) {
765 fd6ce8f6 bellard
        tb1 = *ptb;
766 fd6ce8f6 bellard
        if (tb1 == tb) {
767 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
768 fd6ce8f6 bellard
            break;
769 fd6ce8f6 bellard
        }
770 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
771 fd6ce8f6 bellard
    }
772 fd6ce8f6 bellard
}
773 fd6ce8f6 bellard
774 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
775 9fa3e853 bellard
{
776 9fa3e853 bellard
    TranslationBlock *tb1;
777 9fa3e853 bellard
    unsigned int n1;
778 9fa3e853 bellard
779 9fa3e853 bellard
    for(;;) {
780 9fa3e853 bellard
        tb1 = *ptb;
781 9fa3e853 bellard
        n1 = (long)tb1 & 3;
782 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 9fa3e853 bellard
        if (tb1 == tb) {
784 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
785 9fa3e853 bellard
            break;
786 9fa3e853 bellard
        }
787 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
788 9fa3e853 bellard
    }
789 9fa3e853 bellard
}
790 9fa3e853 bellard
791 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
792 d4e8164f bellard
{
793 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
794 d4e8164f bellard
    unsigned int n1;
795 d4e8164f bellard
796 d4e8164f bellard
    ptb = &tb->jmp_next[n];
797 d4e8164f bellard
    tb1 = *ptb;
798 d4e8164f bellard
    if (tb1) {
799 d4e8164f bellard
        /* find tb(n) in circular list */
800 d4e8164f bellard
        for(;;) {
801 d4e8164f bellard
            tb1 = *ptb;
802 d4e8164f bellard
            n1 = (long)tb1 & 3;
803 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
804 d4e8164f bellard
            if (n1 == n && tb1 == tb)
805 d4e8164f bellard
                break;
806 d4e8164f bellard
            if (n1 == 2) {
807 d4e8164f bellard
                ptb = &tb1->jmp_first;
808 d4e8164f bellard
            } else {
809 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
810 d4e8164f bellard
            }
811 d4e8164f bellard
        }
812 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
813 d4e8164f bellard
        *ptb = tb->jmp_next[n];
814 d4e8164f bellard
815 d4e8164f bellard
        tb->jmp_next[n] = NULL;
816 d4e8164f bellard
    }
817 d4e8164f bellard
}
818 d4e8164f bellard
819 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
820 d4e8164f bellard
   another TB */
821 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
822 d4e8164f bellard
{
823 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
824 d4e8164f bellard
}
825 d4e8164f bellard
826 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
827 fd6ce8f6 bellard
{
828 6a00d601 bellard
    CPUState *env;
829 8a40a180 bellard
    PageDesc *p;
830 d4e8164f bellard
    unsigned int h, n1;
831 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
832 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
833 3b46e624 ths
834 8a40a180 bellard
    /* remove the TB from the hash list */
835 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
836 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
837 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
838 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
839 8a40a180 bellard
840 8a40a180 bellard
    /* remove the TB from the page list */
841 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
842 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
843 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
844 8a40a180 bellard
        invalidate_page_bitmap(p);
845 8a40a180 bellard
    }
846 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
847 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
848 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
849 8a40a180 bellard
        invalidate_page_bitmap(p);
850 8a40a180 bellard
    }
851 8a40a180 bellard
852 36bdbe54 bellard
    tb_invalidated_flag = 1;
853 59817ccb bellard
854 fd6ce8f6 bellard
    /* remove the TB from the hash list */
855 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
856 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
857 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
858 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
859 6a00d601 bellard
    }
860 d4e8164f bellard
861 d4e8164f bellard
    /* suppress this TB from the two jump lists */
862 d4e8164f bellard
    tb_jmp_remove(tb, 0);
863 d4e8164f bellard
    tb_jmp_remove(tb, 1);
864 d4e8164f bellard
865 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
866 d4e8164f bellard
    tb1 = tb->jmp_first;
867 d4e8164f bellard
    for(;;) {
868 d4e8164f bellard
        n1 = (long)tb1 & 3;
869 d4e8164f bellard
        if (n1 == 2)
870 d4e8164f bellard
            break;
871 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
872 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
873 d4e8164f bellard
        tb_reset_jump(tb1, n1);
874 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
875 d4e8164f bellard
        tb1 = tb2;
876 d4e8164f bellard
    }
877 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
878 9fa3e853 bellard
879 e3db7226 bellard
    tb_phys_invalidate_count++;
880 9fa3e853 bellard
}
881 9fa3e853 bellard
882 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
883 9fa3e853 bellard
{
884 9fa3e853 bellard
    int end, mask, end1;
885 9fa3e853 bellard
886 9fa3e853 bellard
    end = start + len;
887 9fa3e853 bellard
    tab += start >> 3;
888 9fa3e853 bellard
    mask = 0xff << (start & 7);
889 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
890 9fa3e853 bellard
        if (start < end) {
891 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
892 9fa3e853 bellard
            *tab |= mask;
893 9fa3e853 bellard
        }
894 9fa3e853 bellard
    } else {
895 9fa3e853 bellard
        *tab++ |= mask;
896 9fa3e853 bellard
        start = (start + 8) & ~7;
897 9fa3e853 bellard
        end1 = end & ~7;
898 9fa3e853 bellard
        while (start < end1) {
899 9fa3e853 bellard
            *tab++ = 0xff;
900 9fa3e853 bellard
            start += 8;
901 9fa3e853 bellard
        }
902 9fa3e853 bellard
        if (start < end) {
903 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
904 9fa3e853 bellard
            *tab |= mask;
905 9fa3e853 bellard
        }
906 9fa3e853 bellard
    }
907 9fa3e853 bellard
}
908 9fa3e853 bellard
909 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
910 9fa3e853 bellard
{
911 9fa3e853 bellard
    int n, tb_start, tb_end;
912 9fa3e853 bellard
    TranslationBlock *tb;
913 3b46e624 ths
914 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
915 9fa3e853 bellard
916 9fa3e853 bellard
    tb = p->first_tb;
917 9fa3e853 bellard
    while (tb != NULL) {
918 9fa3e853 bellard
        n = (long)tb & 3;
919 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
920 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
921 9fa3e853 bellard
        if (n == 0) {
922 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
923 9fa3e853 bellard
               it is not a problem */
924 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
925 9fa3e853 bellard
            tb_end = tb_start + tb->size;
926 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
927 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
928 9fa3e853 bellard
        } else {
929 9fa3e853 bellard
            tb_start = 0;
930 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
931 9fa3e853 bellard
        }
932 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
933 9fa3e853 bellard
        tb = tb->page_next[n];
934 9fa3e853 bellard
    }
935 9fa3e853 bellard
}
936 9fa3e853 bellard
937 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
938 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
939 2e70f6ef pbrook
                              int flags, int cflags)
940 d720b93d bellard
{
941 d720b93d bellard
    TranslationBlock *tb;
942 d720b93d bellard
    uint8_t *tc_ptr;
943 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
944 41c1b1c9 Paul Brook
    target_ulong virt_page2;
945 d720b93d bellard
    int code_gen_size;
946 d720b93d bellard
947 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
948 c27004ec bellard
    tb = tb_alloc(pc);
949 d720b93d bellard
    if (!tb) {
950 d720b93d bellard
        /* flush must be done */
951 d720b93d bellard
        tb_flush(env);
952 d720b93d bellard
        /* cannot fail at this point */
953 c27004ec bellard
        tb = tb_alloc(pc);
954 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
955 2e70f6ef pbrook
        tb_invalidated_flag = 1;
956 d720b93d bellard
    }
957 d720b93d bellard
    tc_ptr = code_gen_ptr;
958 d720b93d bellard
    tb->tc_ptr = tc_ptr;
959 d720b93d bellard
    tb->cs_base = cs_base;
960 d720b93d bellard
    tb->flags = flags;
961 d720b93d bellard
    tb->cflags = cflags;
962 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
963 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
964 3b46e624 ths
965 d720b93d bellard
    /* check next page if needed */
966 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
967 d720b93d bellard
    phys_page2 = -1;
968 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
969 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
970 d720b93d bellard
    }
971 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
972 2e70f6ef pbrook
    return tb;
973 d720b93d bellard
}
974 3b46e624 ths
975 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
976 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
977 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
978 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
979 d720b93d bellard
   TB if code is modified inside this TB. */
980 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
981 d720b93d bellard
                                   int is_cpu_write_access)
982 d720b93d bellard
{
983 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
984 d720b93d bellard
    CPUState *env = cpu_single_env;
985 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
986 6b917547 aliguori
    PageDesc *p;
987 6b917547 aliguori
    int n;
988 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
989 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
990 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
991 6b917547 aliguori
    int current_tb_modified = 0;
992 6b917547 aliguori
    target_ulong current_pc = 0;
993 6b917547 aliguori
    target_ulong current_cs_base = 0;
994 6b917547 aliguori
    int current_flags = 0;
995 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
996 9fa3e853 bellard
997 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
998 5fafdf24 ths
    if (!p)
999 9fa3e853 bellard
        return;
1000 5fafdf24 ths
    if (!p->code_bitmap &&
1001 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1002 d720b93d bellard
        is_cpu_write_access) {
1003 9fa3e853 bellard
        /* build code bitmap */
1004 9fa3e853 bellard
        build_page_bitmap(p);
1005 9fa3e853 bellard
    }
1006 9fa3e853 bellard
1007 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1008 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1009 9fa3e853 bellard
    tb = p->first_tb;
1010 9fa3e853 bellard
    while (tb != NULL) {
1011 9fa3e853 bellard
        n = (long)tb & 3;
1012 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1013 9fa3e853 bellard
        tb_next = tb->page_next[n];
1014 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1015 9fa3e853 bellard
        if (n == 0) {
1016 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1017 9fa3e853 bellard
               it is not a problem */
1018 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1019 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1020 9fa3e853 bellard
        } else {
1021 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1022 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 9fa3e853 bellard
        }
1024 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1025 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1026 d720b93d bellard
            if (current_tb_not_found) {
1027 d720b93d bellard
                current_tb_not_found = 0;
1028 d720b93d bellard
                current_tb = NULL;
1029 2e70f6ef pbrook
                if (env->mem_io_pc) {
1030 d720b93d bellard
                    /* now we have a real cpu fault */
1031 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1032 d720b93d bellard
                }
1033 d720b93d bellard
            }
1034 d720b93d bellard
            if (current_tb == tb &&
1035 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1036 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1037 d720b93d bellard
                its execution. We could be more precise by checking
1038 d720b93d bellard
                that the modification is after the current PC, but it
1039 d720b93d bellard
                would require a specialized function to partially
1040 d720b93d bellard
                restore the CPU state */
1041 3b46e624 ths
1042 d720b93d bellard
                current_tb_modified = 1;
1043 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1044 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1045 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1046 6b917547 aliguori
                                     &current_flags);
1047 d720b93d bellard
            }
1048 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1049 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1050 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1051 6f5a9f7e bellard
            saved_tb = NULL;
1052 6f5a9f7e bellard
            if (env) {
1053 6f5a9f7e bellard
                saved_tb = env->current_tb;
1054 6f5a9f7e bellard
                env->current_tb = NULL;
1055 6f5a9f7e bellard
            }
1056 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1057 6f5a9f7e bellard
            if (env) {
1058 6f5a9f7e bellard
                env->current_tb = saved_tb;
1059 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1060 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1061 6f5a9f7e bellard
            }
1062 9fa3e853 bellard
        }
1063 9fa3e853 bellard
        tb = tb_next;
1064 9fa3e853 bellard
    }
1065 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1066 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1067 9fa3e853 bellard
    if (!p->first_tb) {
1068 9fa3e853 bellard
        invalidate_page_bitmap(p);
1069 d720b93d bellard
        if (is_cpu_write_access) {
1070 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1071 d720b93d bellard
        }
1072 d720b93d bellard
    }
1073 d720b93d bellard
#endif
1074 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1075 d720b93d bellard
    if (current_tb_modified) {
1076 d720b93d bellard
        /* we generate a block containing just the instruction
1077 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1078 d720b93d bellard
           itself */
1079 ea1c1802 bellard
        env->current_tb = NULL;
1080 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1081 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1082 9fa3e853 bellard
    }
1083 fd6ce8f6 bellard
#endif
1084 9fa3e853 bellard
}
1085 fd6ce8f6 bellard
1086 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1087 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1088 9fa3e853 bellard
{
1089 9fa3e853 bellard
    PageDesc *p;
1090 9fa3e853 bellard
    int offset, b;
1091 59817ccb bellard
#if 0
1092 a4193c8a bellard
    if (1) {
1093 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1094 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1095 93fcfe39 aliguori
                  cpu_single_env->eip,
1096 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1097 59817ccb bellard
    }
1098 59817ccb bellard
#endif
1099 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1100 5fafdf24 ths
    if (!p)
1101 9fa3e853 bellard
        return;
1102 9fa3e853 bellard
    if (p->code_bitmap) {
1103 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1104 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1105 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1106 9fa3e853 bellard
            goto do_invalidate;
1107 9fa3e853 bellard
    } else {
1108 9fa3e853 bellard
    do_invalidate:
1109 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1110 9fa3e853 bellard
    }
1111 9fa3e853 bellard
}
1112 9fa3e853 bellard
1113 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1114 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1115 d720b93d bellard
                                    unsigned long pc, void *puc)
1116 9fa3e853 bellard
{
1117 6b917547 aliguori
    TranslationBlock *tb;
1118 9fa3e853 bellard
    PageDesc *p;
1119 6b917547 aliguori
    int n;
1120 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1121 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1122 d720b93d bellard
    CPUState *env = cpu_single_env;
1123 6b917547 aliguori
    int current_tb_modified = 0;
1124 6b917547 aliguori
    target_ulong current_pc = 0;
1125 6b917547 aliguori
    target_ulong current_cs_base = 0;
1126 6b917547 aliguori
    int current_flags = 0;
1127 d720b93d bellard
#endif
1128 9fa3e853 bellard
1129 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1130 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1131 5fafdf24 ths
    if (!p)
1132 9fa3e853 bellard
        return;
1133 9fa3e853 bellard
    tb = p->first_tb;
1134 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1135 d720b93d bellard
    if (tb && pc != 0) {
1136 d720b93d bellard
        current_tb = tb_find_pc(pc);
1137 d720b93d bellard
    }
1138 d720b93d bellard
#endif
1139 9fa3e853 bellard
    while (tb != NULL) {
1140 9fa3e853 bellard
        n = (long)tb & 3;
1141 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1142 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1143 d720b93d bellard
        if (current_tb == tb &&
1144 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1145 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1146 d720b93d bellard
                   its execution. We could be more precise by checking
1147 d720b93d bellard
                   that the modification is after the current PC, but it
1148 d720b93d bellard
                   would require a specialized function to partially
1149 d720b93d bellard
                   restore the CPU state */
1150 3b46e624 ths
1151 d720b93d bellard
            current_tb_modified = 1;
1152 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1153 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 6b917547 aliguori
                                 &current_flags);
1155 d720b93d bellard
        }
1156 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1157 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1158 9fa3e853 bellard
        tb = tb->page_next[n];
1159 9fa3e853 bellard
    }
1160 fd6ce8f6 bellard
    p->first_tb = NULL;
1161 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1162 d720b93d bellard
    if (current_tb_modified) {
1163 d720b93d bellard
        /* we generate a block containing just the instruction
1164 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1165 d720b93d bellard
           itself */
1166 ea1c1802 bellard
        env->current_tb = NULL;
1167 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1168 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1169 d720b93d bellard
    }
1170 d720b93d bellard
#endif
1171 fd6ce8f6 bellard
}
1172 9fa3e853 bellard
#endif
1173 fd6ce8f6 bellard
1174 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1175 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1176 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1177 fd6ce8f6 bellard
{
1178 fd6ce8f6 bellard
    PageDesc *p;
1179 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1180 9fa3e853 bellard
1181 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1182 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1183 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1184 9fa3e853 bellard
    last_first_tb = p->first_tb;
1185 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1186 9fa3e853 bellard
    invalidate_page_bitmap(p);
1187 fd6ce8f6 bellard
1188 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1189 d720b93d bellard
1190 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1191 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1192 53a5960a pbrook
        target_ulong addr;
1193 53a5960a pbrook
        PageDesc *p2;
1194 9fa3e853 bellard
        int prot;
1195 9fa3e853 bellard
1196 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1197 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1198 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1199 fd6ce8f6 bellard
        prot = 0;
1200 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1201 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1202 53a5960a pbrook
1203 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1204 53a5960a pbrook
            if (!p2)
1205 53a5960a pbrook
                continue;
1206 53a5960a pbrook
            prot |= p2->flags;
1207 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1208 53a5960a pbrook
          }
1209 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1210 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1211 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1212 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1213 53a5960a pbrook
               page_addr);
1214 fd6ce8f6 bellard
#endif
1215 fd6ce8f6 bellard
    }
1216 9fa3e853 bellard
#else
1217 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1218 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1219 9fa3e853 bellard
       allocated in a physical page */
1220 9fa3e853 bellard
    if (!last_first_tb) {
1221 6a00d601 bellard
        tlb_protect_code(page_addr);
1222 9fa3e853 bellard
    }
1223 9fa3e853 bellard
#endif
1224 d720b93d bellard
1225 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1226 fd6ce8f6 bellard
}
1227 fd6ce8f6 bellard
1228 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1229 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1230 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1231 fd6ce8f6 bellard
{
1232 fd6ce8f6 bellard
    TranslationBlock *tb;
1233 fd6ce8f6 bellard
1234 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1235 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1236 d4e8164f bellard
        return NULL;
1237 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1238 fd6ce8f6 bellard
    tb->pc = pc;
1239 b448f2f3 bellard
    tb->cflags = 0;
1240 d4e8164f bellard
    return tb;
1241 d4e8164f bellard
}
1242 d4e8164f bellard
1243 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1244 2e70f6ef pbrook
{
1245 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1246 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1247 2e70f6ef pbrook
       be the last one generated.  */
1248 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1249 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1250 2e70f6ef pbrook
        nb_tbs--;
1251 2e70f6ef pbrook
    }
1252 2e70f6ef pbrook
}
1253 2e70f6ef pbrook
1254 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1255 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1256 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1257 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1258 d4e8164f bellard
{
1259 9fa3e853 bellard
    unsigned int h;
1260 9fa3e853 bellard
    TranslationBlock **ptb;
1261 9fa3e853 bellard
1262 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1263 c8a706fe pbrook
       before we are done.  */
1264 c8a706fe pbrook
    mmap_lock();
1265 9fa3e853 bellard
    /* add in the physical hash table */
1266 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1267 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1268 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1269 9fa3e853 bellard
    *ptb = tb;
1270 fd6ce8f6 bellard
1271 fd6ce8f6 bellard
    /* add in the page list */
1272 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1273 9fa3e853 bellard
    if (phys_page2 != -1)
1274 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1275 9fa3e853 bellard
    else
1276 9fa3e853 bellard
        tb->page_addr[1] = -1;
1277 9fa3e853 bellard
1278 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1279 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1280 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1281 d4e8164f bellard
1282 d4e8164f bellard
    /* init original jump addresses */
1283 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1284 d4e8164f bellard
        tb_reset_jump(tb, 0);
1285 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1286 d4e8164f bellard
        tb_reset_jump(tb, 1);
1287 8a40a180 bellard
1288 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1289 8a40a180 bellard
    tb_page_check();
1290 8a40a180 bellard
#endif
1291 c8a706fe pbrook
    mmap_unlock();
1292 fd6ce8f6 bellard
}
1293 fd6ce8f6 bellard
1294 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1295 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1296 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1297 fd6ce8f6 bellard
{
1298 9fa3e853 bellard
    int m_min, m_max, m;
1299 9fa3e853 bellard
    unsigned long v;
1300 9fa3e853 bellard
    TranslationBlock *tb;
1301 a513fe19 bellard
1302 a513fe19 bellard
    if (nb_tbs <= 0)
1303 a513fe19 bellard
        return NULL;
1304 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1305 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1306 a513fe19 bellard
        return NULL;
1307 a513fe19 bellard
    /* binary search (cf Knuth) */
1308 a513fe19 bellard
    m_min = 0;
1309 a513fe19 bellard
    m_max = nb_tbs - 1;
1310 a513fe19 bellard
    while (m_min <= m_max) {
1311 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1312 a513fe19 bellard
        tb = &tbs[m];
1313 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1314 a513fe19 bellard
        if (v == tc_ptr)
1315 a513fe19 bellard
            return tb;
1316 a513fe19 bellard
        else if (tc_ptr < v) {
1317 a513fe19 bellard
            m_max = m - 1;
1318 a513fe19 bellard
        } else {
1319 a513fe19 bellard
            m_min = m + 1;
1320 a513fe19 bellard
        }
1321 5fafdf24 ths
    }
1322 a513fe19 bellard
    return &tbs[m_max];
1323 a513fe19 bellard
}
1324 7501267e bellard
1325 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1326 ea041c0e bellard
1327 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1328 ea041c0e bellard
{
1329 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1330 ea041c0e bellard
    unsigned int n1;
1331 ea041c0e bellard
1332 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1333 ea041c0e bellard
    if (tb1 != NULL) {
1334 ea041c0e bellard
        /* find head of list */
1335 ea041c0e bellard
        for(;;) {
1336 ea041c0e bellard
            n1 = (long)tb1 & 3;
1337 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1338 ea041c0e bellard
            if (n1 == 2)
1339 ea041c0e bellard
                break;
1340 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1341 ea041c0e bellard
        }
1342 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1343 ea041c0e bellard
        tb_next = tb1;
1344 ea041c0e bellard
1345 ea041c0e bellard
        /* remove tb from the jmp_first list */
1346 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1347 ea041c0e bellard
        for(;;) {
1348 ea041c0e bellard
            tb1 = *ptb;
1349 ea041c0e bellard
            n1 = (long)tb1 & 3;
1350 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1352 ea041c0e bellard
                break;
1353 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1354 ea041c0e bellard
        }
1355 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1356 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1357 3b46e624 ths
1358 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1359 ea041c0e bellard
        tb_reset_jump(tb, n);
1360 ea041c0e bellard
1361 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1362 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1363 ea041c0e bellard
    }
1364 ea041c0e bellard
}
1365 ea041c0e bellard
1366 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1367 ea041c0e bellard
{
1368 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1369 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1370 ea041c0e bellard
}
1371 ea041c0e bellard
1372 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1373 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1374 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1375 94df27fd Paul Brook
{
1376 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1377 94df27fd Paul Brook
}
1378 94df27fd Paul Brook
#else
1379 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380 d720b93d bellard
{
1381 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1382 9b3c35e0 j_mayer
    target_ulong pd;
1383 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1384 c2f07f81 pbrook
    PhysPageDesc *p;
1385 d720b93d bellard
1386 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1387 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1388 c2f07f81 pbrook
    if (!p) {
1389 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1390 c2f07f81 pbrook
    } else {
1391 c2f07f81 pbrook
        pd = p->phys_offset;
1392 c2f07f81 pbrook
    }
1393 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1394 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1395 d720b93d bellard
}
1396 c27004ec bellard
#endif
1397 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1398 d720b93d bellard
1399 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1400 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1401 c527ee8f Paul Brook
1402 c527ee8f Paul Brook
{
1403 c527ee8f Paul Brook
}
1404 c527ee8f Paul Brook
1405 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1407 c527ee8f Paul Brook
{
1408 c527ee8f Paul Brook
    return -ENOSYS;
1409 c527ee8f Paul Brook
}
1410 c527ee8f Paul Brook
#else
1411 6658ffb8 pbrook
/* Add a watchpoint.  */
1412 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1413 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1414 6658ffb8 pbrook
{
1415 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1416 c0ce998e aliguori
    CPUWatchpoint *wp;
1417 6658ffb8 pbrook
1418 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1419 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1420 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1421 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1422 b4051334 aliguori
        return -EINVAL;
1423 b4051334 aliguori
    }
1424 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1425 a1d1bb31 aliguori
1426 a1d1bb31 aliguori
    wp->vaddr = addr;
1427 b4051334 aliguori
    wp->len_mask = len_mask;
1428 a1d1bb31 aliguori
    wp->flags = flags;
1429 a1d1bb31 aliguori
1430 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1431 c0ce998e aliguori
    if (flags & BP_GDB)
1432 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1433 c0ce998e aliguori
    else
1434 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1435 6658ffb8 pbrook
1436 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1437 a1d1bb31 aliguori
1438 a1d1bb31 aliguori
    if (watchpoint)
1439 a1d1bb31 aliguori
        *watchpoint = wp;
1440 a1d1bb31 aliguori
    return 0;
1441 6658ffb8 pbrook
}
1442 6658ffb8 pbrook
1443 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1444 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1445 a1d1bb31 aliguori
                          int flags)
1446 6658ffb8 pbrook
{
1447 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1448 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1449 6658ffb8 pbrook
1450 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1451 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1452 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1453 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1454 6658ffb8 pbrook
            return 0;
1455 6658ffb8 pbrook
        }
1456 6658ffb8 pbrook
    }
1457 a1d1bb31 aliguori
    return -ENOENT;
1458 6658ffb8 pbrook
}
1459 6658ffb8 pbrook
1460 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1461 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1462 a1d1bb31 aliguori
{
1463 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1464 7d03f82f edgar_igl
1465 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1466 a1d1bb31 aliguori
1467 a1d1bb31 aliguori
    qemu_free(watchpoint);
1468 a1d1bb31 aliguori
}
1469 a1d1bb31 aliguori
1470 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1471 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1472 a1d1bb31 aliguori
{
1473 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1474 a1d1bb31 aliguori
1475 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1476 a1d1bb31 aliguori
        if (wp->flags & mask)
1477 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1478 c0ce998e aliguori
    }
1479 7d03f82f edgar_igl
}
1480 c527ee8f Paul Brook
#endif
1481 7d03f82f edgar_igl
1482 a1d1bb31 aliguori
/* Add a breakpoint.  */
1483 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1484 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1485 4c3a88a2 bellard
{
1486 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1487 c0ce998e aliguori
    CPUBreakpoint *bp;
1488 3b46e624 ths
1489 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1490 4c3a88a2 bellard
1491 a1d1bb31 aliguori
    bp->pc = pc;
1492 a1d1bb31 aliguori
    bp->flags = flags;
1493 a1d1bb31 aliguori
1494 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1495 c0ce998e aliguori
    if (flags & BP_GDB)
1496 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1497 c0ce998e aliguori
    else
1498 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1499 3b46e624 ths
1500 d720b93d bellard
    breakpoint_invalidate(env, pc);
1501 a1d1bb31 aliguori
1502 a1d1bb31 aliguori
    if (breakpoint)
1503 a1d1bb31 aliguori
        *breakpoint = bp;
1504 4c3a88a2 bellard
    return 0;
1505 4c3a88a2 bellard
#else
1506 a1d1bb31 aliguori
    return -ENOSYS;
1507 4c3a88a2 bellard
#endif
1508 4c3a88a2 bellard
}
1509 4c3a88a2 bellard
1510 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1511 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1512 a1d1bb31 aliguori
{
1513 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1514 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1515 a1d1bb31 aliguori
1516 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1517 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1518 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1519 a1d1bb31 aliguori
            return 0;
1520 a1d1bb31 aliguori
        }
1521 7d03f82f edgar_igl
    }
1522 a1d1bb31 aliguori
    return -ENOENT;
1523 a1d1bb31 aliguori
#else
1524 a1d1bb31 aliguori
    return -ENOSYS;
1525 7d03f82f edgar_igl
#endif
1526 7d03f82f edgar_igl
}
1527 7d03f82f edgar_igl
1528 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1529 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1530 4c3a88a2 bellard
{
1531 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1532 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1533 d720b93d bellard
1534 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1535 a1d1bb31 aliguori
1536 a1d1bb31 aliguori
    qemu_free(breakpoint);
1537 a1d1bb31 aliguori
#endif
1538 a1d1bb31 aliguori
}
1539 a1d1bb31 aliguori
1540 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1541 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1542 a1d1bb31 aliguori
{
1543 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1544 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1545 a1d1bb31 aliguori
1546 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1547 a1d1bb31 aliguori
        if (bp->flags & mask)
1548 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1549 c0ce998e aliguori
    }
1550 4c3a88a2 bellard
#endif
1551 4c3a88a2 bellard
}
1552 4c3a88a2 bellard
1553 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1554 c33a346e bellard
   CPU loop after each instruction */
1555 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1556 c33a346e bellard
{
1557 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1558 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1559 c33a346e bellard
        env->singlestep_enabled = enabled;
1560 e22a25c9 aliguori
        if (kvm_enabled())
1561 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1562 e22a25c9 aliguori
        else {
1563 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1564 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1565 e22a25c9 aliguori
            tb_flush(env);
1566 e22a25c9 aliguori
        }
1567 c33a346e bellard
    }
1568 c33a346e bellard
#endif
1569 c33a346e bellard
}
1570 c33a346e bellard
1571 34865134 bellard
/* enable or disable low levels log */
1572 34865134 bellard
void cpu_set_log(int log_flags)
1573 34865134 bellard
{
1574 34865134 bellard
    loglevel = log_flags;
1575 34865134 bellard
    if (loglevel && !logfile) {
1576 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1577 34865134 bellard
        if (!logfile) {
1578 34865134 bellard
            perror(logfilename);
1579 34865134 bellard
            _exit(1);
1580 34865134 bellard
        }
1581 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1582 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1583 9fa3e853 bellard
        {
1584 b55266b5 blueswir1
            static char logfile_buf[4096];
1585 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1586 9fa3e853 bellard
        }
1587 bf65f53f Filip Navara
#elif !defined(_WIN32)
1588 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1589 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1590 9fa3e853 bellard
#endif
1591 e735b91c pbrook
        log_append = 1;
1592 e735b91c pbrook
    }
1593 e735b91c pbrook
    if (!loglevel && logfile) {
1594 e735b91c pbrook
        fclose(logfile);
1595 e735b91c pbrook
        logfile = NULL;
1596 34865134 bellard
    }
1597 34865134 bellard
}
1598 34865134 bellard
1599 34865134 bellard
void cpu_set_log_filename(const char *filename)
1600 34865134 bellard
{
1601 34865134 bellard
    logfilename = strdup(filename);
1602 e735b91c pbrook
    if (logfile) {
1603 e735b91c pbrook
        fclose(logfile);
1604 e735b91c pbrook
        logfile = NULL;
1605 e735b91c pbrook
    }
1606 e735b91c pbrook
    cpu_set_log(loglevel);
1607 34865134 bellard
}
1608 c33a346e bellard
1609 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1610 ea041c0e bellard
{
1611 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1612 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1613 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1614 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1615 ea041c0e bellard
    TranslationBlock *tb;
1616 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1617 59817ccb bellard
1618 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1619 3098dba0 aurel32
    tb = env->current_tb;
1620 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1621 3098dba0 aurel32
       all the potentially executing TB */
1622 f76cfe56 Riku Voipio
    if (tb) {
1623 3098dba0 aurel32
        env->current_tb = NULL;
1624 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1625 be214e6c aurel32
    }
1626 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1627 3098dba0 aurel32
}
1628 3098dba0 aurel32
1629 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1630 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1631 3098dba0 aurel32
{
1632 3098dba0 aurel32
    int old_mask;
1633 be214e6c aurel32
1634 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1635 68a79315 bellard
    env->interrupt_request |= mask;
1636 3098dba0 aurel32
1637 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1638 8edac960 aliguori
    /*
1639 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1640 8edac960 aliguori
     * case its halted.
1641 8edac960 aliguori
     */
1642 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1643 8edac960 aliguori
        qemu_cpu_kick(env);
1644 8edac960 aliguori
        return;
1645 8edac960 aliguori
    }
1646 8edac960 aliguori
#endif
1647 8edac960 aliguori
1648 2e70f6ef pbrook
    if (use_icount) {
1649 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1650 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1651 2e70f6ef pbrook
        if (!can_do_io(env)
1652 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1653 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1654 2e70f6ef pbrook
        }
1655 2e70f6ef pbrook
#endif
1656 2e70f6ef pbrook
    } else {
1657 3098dba0 aurel32
        cpu_unlink_tb(env);
1658 ea041c0e bellard
    }
1659 ea041c0e bellard
}
1660 ea041c0e bellard
1661 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1662 b54ad049 bellard
{
1663 b54ad049 bellard
    env->interrupt_request &= ~mask;
1664 b54ad049 bellard
}
1665 b54ad049 bellard
1666 3098dba0 aurel32
void cpu_exit(CPUState *env)
1667 3098dba0 aurel32
{
1668 3098dba0 aurel32
    env->exit_request = 1;
1669 3098dba0 aurel32
    cpu_unlink_tb(env);
1670 3098dba0 aurel32
}
1671 3098dba0 aurel32
1672 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1673 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1674 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1675 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1676 f193c797 bellard
      "show target assembly code for each compiled TB" },
1677 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1678 57fec1fe bellard
      "show micro ops for each compiled TB" },
1679 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1680 e01a1157 blueswir1
      "show micro ops "
1681 e01a1157 blueswir1
#ifdef TARGET_I386
1682 e01a1157 blueswir1
      "before eflags optimization and "
1683 f193c797 bellard
#endif
1684 e01a1157 blueswir1
      "after liveness analysis" },
1685 f193c797 bellard
    { CPU_LOG_INT, "int",
1686 f193c797 bellard
      "show interrupts/exceptions in short format" },
1687 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1688 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1689 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1690 e91c8a77 ths
      "show CPU state before block translation" },
1691 f193c797 bellard
#ifdef TARGET_I386
1692 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1693 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1694 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1695 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1696 f193c797 bellard
#endif
1697 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1698 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1699 fd872598 bellard
      "show all i/o ports accesses" },
1700 8e3a9fd2 bellard
#endif
1701 f193c797 bellard
    { 0, NULL, NULL },
1702 f193c797 bellard
};
1703 f193c797 bellard
1704 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1705 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1706 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1707 f6f3fbca Michael S. Tsirkin
1708 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1709 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1710 9742bf26 Yoshiaki Tamura
                                  ram_addr_t phys_offset)
1711 f6f3fbca Michael S. Tsirkin
{
1712 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1713 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1714 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1715 f6f3fbca Michael S. Tsirkin
    }
1716 f6f3fbca Michael S. Tsirkin
}
1717 f6f3fbca Michael S. Tsirkin
1718 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1719 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1720 f6f3fbca Michael S. Tsirkin
{
1721 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1722 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1723 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1724 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1725 f6f3fbca Michael S. Tsirkin
            return r;
1726 f6f3fbca Michael S. Tsirkin
    }
1727 f6f3fbca Michael S. Tsirkin
    return 0;
1728 f6f3fbca Michael S. Tsirkin
}
1729 f6f3fbca Michael S. Tsirkin
1730 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1731 f6f3fbca Michael S. Tsirkin
{
1732 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1733 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1734 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1735 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1736 f6f3fbca Michael S. Tsirkin
            return r;
1737 f6f3fbca Michael S. Tsirkin
    }
1738 f6f3fbca Michael S. Tsirkin
    return 0;
1739 f6f3fbca Michael S. Tsirkin
}
1740 f6f3fbca Michael S. Tsirkin
1741 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1742 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1743 f6f3fbca Michael S. Tsirkin
{
1744 5cd2c5b6 Richard Henderson
    int i;
1745 f6f3fbca Michael S. Tsirkin
1746 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1747 5cd2c5b6 Richard Henderson
        return;
1748 5cd2c5b6 Richard Henderson
    }
1749 5cd2c5b6 Richard Henderson
    if (level == 0) {
1750 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1751 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1752 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1753 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1754 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1755 f6f3fbca Michael S. Tsirkin
            }
1756 5cd2c5b6 Richard Henderson
        }
1757 5cd2c5b6 Richard Henderson
    } else {
1758 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1759 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1760 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1761 f6f3fbca Michael S. Tsirkin
        }
1762 f6f3fbca Michael S. Tsirkin
    }
1763 f6f3fbca Michael S. Tsirkin
}
1764 f6f3fbca Michael S. Tsirkin
1765 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1766 f6f3fbca Michael S. Tsirkin
{
1767 5cd2c5b6 Richard Henderson
    int i;
1768 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1769 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1770 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1771 f6f3fbca Michael S. Tsirkin
    }
1772 f6f3fbca Michael S. Tsirkin
}
1773 f6f3fbca Michael S. Tsirkin
1774 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1775 f6f3fbca Michael S. Tsirkin
{
1776 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1777 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1778 f6f3fbca Michael S. Tsirkin
}
1779 f6f3fbca Michael S. Tsirkin
1780 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1781 f6f3fbca Michael S. Tsirkin
{
1782 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1783 f6f3fbca Michael S. Tsirkin
}
1784 f6f3fbca Michael S. Tsirkin
#endif
1785 f6f3fbca Michael S. Tsirkin
1786 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1787 f193c797 bellard
{
1788 f193c797 bellard
    if (strlen(s2) != n)
1789 f193c797 bellard
        return 0;
1790 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1791 f193c797 bellard
}
1792 3b46e624 ths
1793 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1794 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1795 f193c797 bellard
{
1796 c7cd6a37 blueswir1
    const CPULogItem *item;
1797 f193c797 bellard
    int mask;
1798 f193c797 bellard
    const char *p, *p1;
1799 f193c797 bellard
1800 f193c797 bellard
    p = str;
1801 f193c797 bellard
    mask = 0;
1802 f193c797 bellard
    for(;;) {
1803 f193c797 bellard
        p1 = strchr(p, ',');
1804 f193c797 bellard
        if (!p1)
1805 f193c797 bellard
            p1 = p + strlen(p);
1806 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1807 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1808 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1809 9742bf26 Yoshiaki Tamura
            }
1810 9742bf26 Yoshiaki Tamura
        } else {
1811 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1812 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1813 9742bf26 Yoshiaki Tamura
                    goto found;
1814 9742bf26 Yoshiaki Tamura
            }
1815 9742bf26 Yoshiaki Tamura
            return 0;
1816 f193c797 bellard
        }
1817 f193c797 bellard
    found:
1818 f193c797 bellard
        mask |= item->mask;
1819 f193c797 bellard
        if (*p1 != ',')
1820 f193c797 bellard
            break;
1821 f193c797 bellard
        p = p1 + 1;
1822 f193c797 bellard
    }
1823 f193c797 bellard
    return mask;
1824 f193c797 bellard
}
1825 ea041c0e bellard
1826 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1827 7501267e bellard
{
1828 7501267e bellard
    va_list ap;
1829 493ae1f0 pbrook
    va_list ap2;
1830 7501267e bellard
1831 7501267e bellard
    va_start(ap, fmt);
1832 493ae1f0 pbrook
    va_copy(ap2, ap);
1833 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1834 7501267e bellard
    vfprintf(stderr, fmt, ap);
1835 7501267e bellard
    fprintf(stderr, "\n");
1836 7501267e bellard
#ifdef TARGET_I386
1837 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1838 7fe48483 bellard
#else
1839 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1840 7501267e bellard
#endif
1841 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1842 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1843 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1844 93fcfe39 aliguori
        qemu_log("\n");
1845 f9373291 j_mayer
#ifdef TARGET_I386
1846 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1847 f9373291 j_mayer
#else
1848 93fcfe39 aliguori
        log_cpu_state(env, 0);
1849 f9373291 j_mayer
#endif
1850 31b1a7b4 aliguori
        qemu_log_flush();
1851 93fcfe39 aliguori
        qemu_log_close();
1852 924edcae balrog
    }
1853 493ae1f0 pbrook
    va_end(ap2);
1854 f9373291 j_mayer
    va_end(ap);
1855 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1856 fd052bf6 Riku Voipio
    {
1857 fd052bf6 Riku Voipio
        struct sigaction act;
1858 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1859 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1860 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1861 fd052bf6 Riku Voipio
    }
1862 fd052bf6 Riku Voipio
#endif
1863 7501267e bellard
    abort();
1864 7501267e bellard
}
1865 7501267e bellard
1866 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1867 c5be9f08 ths
{
1868 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1869 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1870 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1871 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1872 5a38f081 aliguori
    CPUBreakpoint *bp;
1873 5a38f081 aliguori
    CPUWatchpoint *wp;
1874 5a38f081 aliguori
#endif
1875 5a38f081 aliguori
1876 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1877 5a38f081 aliguori
1878 5a38f081 aliguori
    /* Preserve chaining and index. */
1879 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1880 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1881 5a38f081 aliguori
1882 5a38f081 aliguori
    /* Clone all break/watchpoints.
1883 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1884 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1885 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1886 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1887 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1888 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1889 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1890 5a38f081 aliguori
    }
1891 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1892 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1893 5a38f081 aliguori
                              wp->flags, NULL);
1894 5a38f081 aliguori
    }
1895 5a38f081 aliguori
#endif
1896 5a38f081 aliguori
1897 c5be9f08 ths
    return new_env;
1898 c5be9f08 ths
}
1899 c5be9f08 ths
1900 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1901 0124311e bellard
1902 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1903 5c751e99 edgar_igl
{
1904 5c751e99 edgar_igl
    unsigned int i;
1905 5c751e99 edgar_igl
1906 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1907 5c751e99 edgar_igl
       overlap the flushed page.  */
1908 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1909 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1910 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1911 5c751e99 edgar_igl
1912 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1913 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1914 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915 5c751e99 edgar_igl
}
1916 5c751e99 edgar_igl
1917 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1918 08738984 Igor Kovalenko
    .addr_read  = -1,
1919 08738984 Igor Kovalenko
    .addr_write = -1,
1920 08738984 Igor Kovalenko
    .addr_code  = -1,
1921 08738984 Igor Kovalenko
    .addend     = -1,
1922 08738984 Igor Kovalenko
};
1923 08738984 Igor Kovalenko
1924 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1925 ee8b7021 bellard
   implemented yet) */
1926 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1927 33417e70 bellard
{
1928 33417e70 bellard
    int i;
1929 0124311e bellard
1930 9fa3e853 bellard
#if defined(DEBUG_TLB)
1931 9fa3e853 bellard
    printf("tlb_flush:\n");
1932 9fa3e853 bellard
#endif
1933 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1934 0124311e bellard
       links while we are modifying them */
1935 0124311e bellard
    env->current_tb = NULL;
1936 0124311e bellard
1937 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1938 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1939 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1940 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1941 cfde4bd9 Isaku Yamahata
        }
1942 33417e70 bellard
    }
1943 9fa3e853 bellard
1944 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1945 9fa3e853 bellard
1946 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1947 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1948 e3db7226 bellard
    tlb_flush_count++;
1949 33417e70 bellard
}
1950 33417e70 bellard
1951 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1952 61382a50 bellard
{
1953 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1954 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1955 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1956 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1957 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1958 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1959 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1960 84b7b8e7 bellard
    }
1961 61382a50 bellard
}
1962 61382a50 bellard
1963 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1964 33417e70 bellard
{
1965 8a40a180 bellard
    int i;
1966 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1967 0124311e bellard
1968 9fa3e853 bellard
#if defined(DEBUG_TLB)
1969 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1970 9fa3e853 bellard
#endif
1971 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1972 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1973 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1974 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1975 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1976 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1977 d4c430a8 Paul Brook
#endif
1978 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1979 d4c430a8 Paul Brook
        return;
1980 d4c430a8 Paul Brook
    }
1981 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1982 0124311e bellard
       links while we are modifying them */
1983 0124311e bellard
    env->current_tb = NULL;
1984 61382a50 bellard
1985 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1986 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1987 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1988 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1989 0124311e bellard
1990 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1991 9fa3e853 bellard
}
1992 9fa3e853 bellard
1993 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1994 9fa3e853 bellard
   can be detected */
1995 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1996 9fa3e853 bellard
{
1997 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1998 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1999 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2000 9fa3e853 bellard
}
2001 9fa3e853 bellard
2002 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2003 3a7d929e bellard
   tested for self modifying code */
2004 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2005 3a7d929e bellard
                                    target_ulong vaddr)
2006 9fa3e853 bellard
{
2007 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2008 1ccde1cb bellard
}
2009 1ccde1cb bellard
2010 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2011 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2012 1ccde1cb bellard
{
2013 1ccde1cb bellard
    unsigned long addr;
2014 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2015 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2016 1ccde1cb bellard
        if ((addr - start) < length) {
2017 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2018 1ccde1cb bellard
        }
2019 1ccde1cb bellard
    }
2020 1ccde1cb bellard
}
2021 1ccde1cb bellard
2022 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2023 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2024 0a962c02 bellard
                                     int dirty_flags)
2025 1ccde1cb bellard
{
2026 1ccde1cb bellard
    CPUState *env;
2027 4f2ac237 bellard
    unsigned long length, start1;
2028 f7c11b53 Yoshiaki Tamura
    int i;
2029 1ccde1cb bellard
2030 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2031 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2032 1ccde1cb bellard
2033 1ccde1cb bellard
    length = end - start;
2034 1ccde1cb bellard
    if (length == 0)
2035 1ccde1cb bellard
        return;
2036 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2037 f23db169 bellard
2038 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2039 1ccde1cb bellard
       when accessing the range */
2040 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2041 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2042 5579c7f3 pbrook
       address comparisons below.  */
2043 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2044 5579c7f3 pbrook
            != (end - 1) - start) {
2045 5579c7f3 pbrook
        abort();
2046 5579c7f3 pbrook
    }
2047 5579c7f3 pbrook
2048 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2049 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2050 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2052 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2053 cfde4bd9 Isaku Yamahata
                                      start1, length);
2054 cfde4bd9 Isaku Yamahata
        }
2055 6a00d601 bellard
    }
2056 1ccde1cb bellard
}
2057 1ccde1cb bellard
2058 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2059 74576198 aliguori
{
2060 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2061 74576198 aliguori
    in_migration = enable;
2062 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2063 f6f3fbca Michael S. Tsirkin
    return ret;
2064 74576198 aliguori
}
2065 74576198 aliguori
2066 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2067 74576198 aliguori
{
2068 74576198 aliguori
    return in_migration;
2069 74576198 aliguori
}
2070 74576198 aliguori
2071 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2072 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2073 2bec46dc aliguori
{
2074 7b8f3b78 Michael S. Tsirkin
    int ret;
2075 151f7749 Jan Kiszka
2076 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2077 151f7749 Jan Kiszka
    return ret;
2078 2bec46dc aliguori
}
2079 2bec46dc aliguori
2080 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2081 3a7d929e bellard
{
2082 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2083 5579c7f3 pbrook
    void *p;
2084 3a7d929e bellard
2085 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2086 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2087 5579c7f3 pbrook
            + tlb_entry->addend);
2088 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2089 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2090 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2091 3a7d929e bellard
        }
2092 3a7d929e bellard
    }
2093 3a7d929e bellard
}
2094 3a7d929e bellard
2095 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2096 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2097 3a7d929e bellard
{
2098 3a7d929e bellard
    int i;
2099 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2100 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2101 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2102 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2103 cfde4bd9 Isaku Yamahata
    }
2104 3a7d929e bellard
}
2105 3a7d929e bellard
2106 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2107 1ccde1cb bellard
{
2108 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2109 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2110 1ccde1cb bellard
}
2111 1ccde1cb bellard
2112 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2113 0f459d16 pbrook
   so that it is no longer dirty */
2114 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2115 1ccde1cb bellard
{
2116 1ccde1cb bellard
    int i;
2117 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2118 1ccde1cb bellard
2119 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2120 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2121 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2122 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2123 9fa3e853 bellard
}
2124 9fa3e853 bellard
2125 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2126 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2127 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2128 d4c430a8 Paul Brook
                               target_ulong size)
2129 d4c430a8 Paul Brook
{
2130 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2131 d4c430a8 Paul Brook
2132 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2133 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2134 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2135 d4c430a8 Paul Brook
        return;
2136 d4c430a8 Paul Brook
    }
2137 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2138 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2139 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2140 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2141 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2142 d4c430a8 Paul Brook
        mask <<= 1;
2143 d4c430a8 Paul Brook
    }
2144 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2145 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2146 d4c430a8 Paul Brook
}
2147 d4c430a8 Paul Brook
2148 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2149 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2150 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2151 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2152 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2153 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2154 9fa3e853 bellard
{
2155 92e873b9 bellard
    PhysPageDesc *p;
2156 4f2ac237 bellard
    unsigned long pd;
2157 9fa3e853 bellard
    unsigned int index;
2158 4f2ac237 bellard
    target_ulong address;
2159 0f459d16 pbrook
    target_ulong code_address;
2160 355b1943 Paul Brook
    unsigned long addend;
2161 84b7b8e7 bellard
    CPUTLBEntry *te;
2162 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2163 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2164 9fa3e853 bellard
2165 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2166 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2167 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2168 d4c430a8 Paul Brook
    }
2169 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2170 9fa3e853 bellard
    if (!p) {
2171 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2172 9fa3e853 bellard
    } else {
2173 9fa3e853 bellard
        pd = p->phys_offset;
2174 9fa3e853 bellard
    }
2175 9fa3e853 bellard
#if defined(DEBUG_TLB)
2176 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2177 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2178 9fa3e853 bellard
#endif
2179 9fa3e853 bellard
2180 0f459d16 pbrook
    address = vaddr;
2181 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2182 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2183 0f459d16 pbrook
        address |= TLB_MMIO;
2184 0f459d16 pbrook
    }
2185 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2186 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2187 0f459d16 pbrook
        /* Normal RAM.  */
2188 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2189 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2190 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2191 0f459d16 pbrook
        else
2192 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2193 0f459d16 pbrook
    } else {
2194 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2195 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2196 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2197 0f459d16 pbrook
           and avoid full address decoding in every device.
2198 0f459d16 pbrook
           We can't use the high bits of pd for this because
2199 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2200 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2201 8da3ff18 pbrook
        if (p) {
2202 8da3ff18 pbrook
            iotlb += p->region_offset;
2203 8da3ff18 pbrook
        } else {
2204 8da3ff18 pbrook
            iotlb += paddr;
2205 8da3ff18 pbrook
        }
2206 0f459d16 pbrook
    }
2207 0f459d16 pbrook
2208 0f459d16 pbrook
    code_address = address;
2209 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2210 0f459d16 pbrook
       watchpoint trap routines.  */
2211 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2212 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2213 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2214 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2215 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2216 bf298f83 Jun Koi
                address |= TLB_MMIO;
2217 bf298f83 Jun Koi
                break;
2218 bf298f83 Jun Koi
            }
2219 6658ffb8 pbrook
        }
2220 0f459d16 pbrook
    }
2221 d79acba4 balrog
2222 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2223 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2224 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2225 0f459d16 pbrook
    te->addend = addend - vaddr;
2226 0f459d16 pbrook
    if (prot & PAGE_READ) {
2227 0f459d16 pbrook
        te->addr_read = address;
2228 0f459d16 pbrook
    } else {
2229 0f459d16 pbrook
        te->addr_read = -1;
2230 0f459d16 pbrook
    }
2231 5c751e99 edgar_igl
2232 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2233 0f459d16 pbrook
        te->addr_code = code_address;
2234 0f459d16 pbrook
    } else {
2235 0f459d16 pbrook
        te->addr_code = -1;
2236 0f459d16 pbrook
    }
2237 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2238 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2239 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2240 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2241 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2242 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2243 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2244 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2245 9fa3e853 bellard
        } else {
2246 0f459d16 pbrook
            te->addr_write = address;
2247 9fa3e853 bellard
        }
2248 0f459d16 pbrook
    } else {
2249 0f459d16 pbrook
        te->addr_write = -1;
2250 9fa3e853 bellard
    }
2251 9fa3e853 bellard
}
2252 9fa3e853 bellard
2253 0124311e bellard
#else
2254 0124311e bellard
2255 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2256 0124311e bellard
{
2257 0124311e bellard
}
2258 0124311e bellard
2259 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2260 0124311e bellard
{
2261 0124311e bellard
}
2262 0124311e bellard
2263 edf8e2af Mika Westerberg
/*
2264 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2265 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2266 edf8e2af Mika Westerberg
 */
2267 5cd2c5b6 Richard Henderson
2268 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2269 5cd2c5b6 Richard Henderson
{
2270 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2271 5cd2c5b6 Richard Henderson
    void *priv;
2272 5cd2c5b6 Richard Henderson
    unsigned long start;
2273 5cd2c5b6 Richard Henderson
    int prot;
2274 5cd2c5b6 Richard Henderson
};
2275 5cd2c5b6 Richard Henderson
2276 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2277 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2278 5cd2c5b6 Richard Henderson
{
2279 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2280 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2281 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2282 5cd2c5b6 Richard Henderson
            return rc;
2283 5cd2c5b6 Richard Henderson
        }
2284 5cd2c5b6 Richard Henderson
    }
2285 5cd2c5b6 Richard Henderson
2286 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2287 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2288 5cd2c5b6 Richard Henderson
2289 5cd2c5b6 Richard Henderson
    return 0;
2290 5cd2c5b6 Richard Henderson
}
2291 5cd2c5b6 Richard Henderson
2292 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2293 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2294 5cd2c5b6 Richard Henderson
{
2295 b480d9b7 Paul Brook
    abi_ulong pa;
2296 5cd2c5b6 Richard Henderson
    int i, rc;
2297 5cd2c5b6 Richard Henderson
2298 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2299 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2300 5cd2c5b6 Richard Henderson
    }
2301 5cd2c5b6 Richard Henderson
2302 5cd2c5b6 Richard Henderson
    if (level == 0) {
2303 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2304 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2305 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2306 5cd2c5b6 Richard Henderson
2307 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2308 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2309 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2310 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2311 5cd2c5b6 Richard Henderson
                    return rc;
2312 9fa3e853 bellard
                }
2313 9fa3e853 bellard
            }
2314 5cd2c5b6 Richard Henderson
        }
2315 5cd2c5b6 Richard Henderson
    } else {
2316 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2317 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2318 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2319 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2320 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2321 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2322 5cd2c5b6 Richard Henderson
                return rc;
2323 5cd2c5b6 Richard Henderson
            }
2324 5cd2c5b6 Richard Henderson
        }
2325 5cd2c5b6 Richard Henderson
    }
2326 5cd2c5b6 Richard Henderson
2327 5cd2c5b6 Richard Henderson
    return 0;
2328 5cd2c5b6 Richard Henderson
}
2329 5cd2c5b6 Richard Henderson
2330 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2331 5cd2c5b6 Richard Henderson
{
2332 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2333 5cd2c5b6 Richard Henderson
    unsigned long i;
2334 5cd2c5b6 Richard Henderson
2335 5cd2c5b6 Richard Henderson
    data.fn = fn;
2336 5cd2c5b6 Richard Henderson
    data.priv = priv;
2337 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2338 5cd2c5b6 Richard Henderson
    data.prot = 0;
2339 5cd2c5b6 Richard Henderson
2340 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2341 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2342 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2343 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2344 5cd2c5b6 Richard Henderson
            return rc;
2345 9fa3e853 bellard
        }
2346 33417e70 bellard
    }
2347 5cd2c5b6 Richard Henderson
2348 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2349 edf8e2af Mika Westerberg
}
2350 edf8e2af Mika Westerberg
2351 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2352 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2353 edf8e2af Mika Westerberg
{
2354 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2355 edf8e2af Mika Westerberg
2356 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2357 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2358 edf8e2af Mika Westerberg
        start, end, end - start,
2359 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2360 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2361 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2362 edf8e2af Mika Westerberg
2363 edf8e2af Mika Westerberg
    return (0);
2364 edf8e2af Mika Westerberg
}
2365 edf8e2af Mika Westerberg
2366 edf8e2af Mika Westerberg
/* dump memory mappings */
2367 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2368 edf8e2af Mika Westerberg
{
2369 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2370 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2371 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2372 33417e70 bellard
}
2373 33417e70 bellard
2374 53a5960a pbrook
int page_get_flags(target_ulong address)
2375 33417e70 bellard
{
2376 9fa3e853 bellard
    PageDesc *p;
2377 9fa3e853 bellard
2378 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2379 33417e70 bellard
    if (!p)
2380 9fa3e853 bellard
        return 0;
2381 9fa3e853 bellard
    return p->flags;
2382 9fa3e853 bellard
}
2383 9fa3e853 bellard
2384 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2385 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2386 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2387 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2388 9fa3e853 bellard
{
2389 376a7909 Richard Henderson
    target_ulong addr, len;
2390 376a7909 Richard Henderson
2391 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2392 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2393 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2394 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2395 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2396 376a7909 Richard Henderson
#endif
2397 376a7909 Richard Henderson
    assert(start < end);
2398 9fa3e853 bellard
2399 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2400 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2401 376a7909 Richard Henderson
2402 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2403 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2404 376a7909 Richard Henderson
    }
2405 376a7909 Richard Henderson
2406 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2407 376a7909 Richard Henderson
         len != 0;
2408 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2409 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2410 376a7909 Richard Henderson
2411 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2412 376a7909 Richard Henderson
           the code inside.  */
2413 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2414 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2415 9fa3e853 bellard
            p->first_tb) {
2416 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2417 9fa3e853 bellard
        }
2418 9fa3e853 bellard
        p->flags = flags;
2419 9fa3e853 bellard
    }
2420 33417e70 bellard
}
2421 33417e70 bellard
2422 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2423 3d97b40b ths
{
2424 3d97b40b ths
    PageDesc *p;
2425 3d97b40b ths
    target_ulong end;
2426 3d97b40b ths
    target_ulong addr;
2427 3d97b40b ths
2428 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2429 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2430 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2431 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2432 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2433 376a7909 Richard Henderson
#endif
2434 376a7909 Richard Henderson
2435 3e0650a9 Richard Henderson
    if (len == 0) {
2436 3e0650a9 Richard Henderson
        return 0;
2437 3e0650a9 Richard Henderson
    }
2438 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2439 376a7909 Richard Henderson
        /* We've wrapped around.  */
2440 55f280c9 balrog
        return -1;
2441 376a7909 Richard Henderson
    }
2442 55f280c9 balrog
2443 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2444 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2445 3d97b40b ths
2446 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2447 376a7909 Richard Henderson
         len != 0;
2448 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2449 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2450 3d97b40b ths
        if( !p )
2451 3d97b40b ths
            return -1;
2452 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2453 3d97b40b ths
            return -1;
2454 3d97b40b ths
2455 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2456 3d97b40b ths
            return -1;
2457 dae3270c bellard
        if (flags & PAGE_WRITE) {
2458 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2459 dae3270c bellard
                return -1;
2460 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2461 dae3270c bellard
               contains translated code */
2462 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2463 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2464 dae3270c bellard
                    return -1;
2465 dae3270c bellard
            }
2466 dae3270c bellard
            return 0;
2467 dae3270c bellard
        }
2468 3d97b40b ths
    }
2469 3d97b40b ths
    return 0;
2470 3d97b40b ths
}
2471 3d97b40b ths
2472 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2473 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2474 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2475 9fa3e853 bellard
{
2476 45d679d6 Aurelien Jarno
    unsigned int prot;
2477 45d679d6 Aurelien Jarno
    PageDesc *p;
2478 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2479 9fa3e853 bellard
2480 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2481 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2482 c8a706fe pbrook
       practice it seems to be ok.  */
2483 c8a706fe pbrook
    mmap_lock();
2484 c8a706fe pbrook
2485 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2486 45d679d6 Aurelien Jarno
    if (!p) {
2487 c8a706fe pbrook
        mmap_unlock();
2488 9fa3e853 bellard
        return 0;
2489 c8a706fe pbrook
    }
2490 45d679d6 Aurelien Jarno
2491 9fa3e853 bellard
    /* if the page was really writable, then we change its
2492 9fa3e853 bellard
       protection back to writable */
2493 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2494 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2495 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2496 45d679d6 Aurelien Jarno
2497 45d679d6 Aurelien Jarno
        prot = 0;
2498 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2499 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2500 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2501 45d679d6 Aurelien Jarno
            prot |= p->flags;
2502 45d679d6 Aurelien Jarno
2503 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2504 9fa3e853 bellard
               the corresponding translated code. */
2505 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2506 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2507 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2508 9fa3e853 bellard
#endif
2509 9fa3e853 bellard
        }
2510 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2511 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2512 45d679d6 Aurelien Jarno
2513 45d679d6 Aurelien Jarno
        mmap_unlock();
2514 45d679d6 Aurelien Jarno
        return 1;
2515 9fa3e853 bellard
    }
2516 c8a706fe pbrook
    mmap_unlock();
2517 9fa3e853 bellard
    return 0;
2518 9fa3e853 bellard
}
2519 9fa3e853 bellard
2520 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2521 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2522 1ccde1cb bellard
{
2523 1ccde1cb bellard
}
2524 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2525 9fa3e853 bellard
2526 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2527 8da3ff18 pbrook
2528 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2529 c04b2b78 Paul Brook
typedef struct subpage_t {
2530 c04b2b78 Paul Brook
    target_phys_addr_t base;
2531 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2532 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2533 c04b2b78 Paul Brook
} subpage_t;
2534 c04b2b78 Paul Brook
2535 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2536 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2537 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2538 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2539 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2540 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541 db7b5426 blueswir1
                      need_subpage)                                     \
2542 db7b5426 blueswir1
    do {                                                                \
2543 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2544 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2545 db7b5426 blueswir1
        else {                                                          \
2546 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2547 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2548 db7b5426 blueswir1
                need_subpage = 1;                                       \
2549 db7b5426 blueswir1
        }                                                               \
2550 db7b5426 blueswir1
                                                                        \
2551 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2552 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2553 db7b5426 blueswir1
        else {                                                          \
2554 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2556 db7b5426 blueswir1
                need_subpage = 1;                                       \
2557 db7b5426 blueswir1
        }                                                               \
2558 db7b5426 blueswir1
    } while (0)
2559 db7b5426 blueswir1
2560 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2561 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2562 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2563 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2564 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2565 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2566 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2567 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2568 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2569 c227f099 Anthony Liguori
                                         ram_addr_t size,
2570 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2571 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2572 33417e70 bellard
{
2573 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2574 92e873b9 bellard
    PhysPageDesc *p;
2575 9d42037b bellard
    CPUState *env;
2576 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2577 f6405247 Richard Henderson
    subpage_t *subpage;
2578 33417e70 bellard
2579 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2580 f6f3fbca Michael S. Tsirkin
2581 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2582 67c4d23c pbrook
        region_offset = start_addr;
2583 67c4d23c pbrook
    }
2584 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2585 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2586 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2587 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2588 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2589 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2590 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2591 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2592 db7b5426 blueswir1
            int need_subpage = 0;
2593 db7b5426 blueswir1
2594 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2595 db7b5426 blueswir1
                          need_subpage);
2596 f6405247 Richard Henderson
            if (need_subpage) {
2597 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2598 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2599 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2600 8da3ff18 pbrook
                                           p->region_offset);
2601 db7b5426 blueswir1
                } else {
2602 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2603 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2604 db7b5426 blueswir1
                }
2605 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2606 8da3ff18 pbrook
                                 region_offset);
2607 8da3ff18 pbrook
                p->region_offset = 0;
2608 db7b5426 blueswir1
            } else {
2609 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2610 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2611 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2612 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2613 db7b5426 blueswir1
            }
2614 db7b5426 blueswir1
        } else {
2615 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2616 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2617 8da3ff18 pbrook
            p->region_offset = region_offset;
2618 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2619 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2620 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2621 0e8f0967 pbrook
            } else {
2622 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2623 db7b5426 blueswir1
                int need_subpage = 0;
2624 db7b5426 blueswir1
2625 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2626 db7b5426 blueswir1
                              end_addr2, need_subpage);
2627 db7b5426 blueswir1
2628 f6405247 Richard Henderson
                if (need_subpage) {
2629 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2630 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2631 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2632 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2633 8da3ff18 pbrook
                                     phys_offset, region_offset);
2634 8da3ff18 pbrook
                    p->region_offset = 0;
2635 db7b5426 blueswir1
                }
2636 db7b5426 blueswir1
            }
2637 db7b5426 blueswir1
        }
2638 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2639 33417e70 bellard
    }
2640 3b46e624 ths
2641 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2642 9d42037b bellard
       reset the modified entries */
2643 9d42037b bellard
    /* XXX: slow ! */
2644 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2645 9d42037b bellard
        tlb_flush(env, 1);
2646 9d42037b bellard
    }
2647 33417e70 bellard
}
2648 33417e70 bellard
2649 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2650 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2651 ba863458 bellard
{
2652 ba863458 bellard
    PhysPageDesc *p;
2653 ba863458 bellard
2654 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2655 ba863458 bellard
    if (!p)
2656 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2657 ba863458 bellard
    return p->phys_offset;
2658 ba863458 bellard
}
2659 ba863458 bellard
2660 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661 f65ed4c1 aliguori
{
2662 f65ed4c1 aliguori
    if (kvm_enabled())
2663 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2664 f65ed4c1 aliguori
}
2665 f65ed4c1 aliguori
2666 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2667 f65ed4c1 aliguori
{
2668 f65ed4c1 aliguori
    if (kvm_enabled())
2669 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2670 f65ed4c1 aliguori
}
2671 f65ed4c1 aliguori
2672 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2673 62a2744c Sheng Yang
{
2674 62a2744c Sheng Yang
    if (kvm_enabled())
2675 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2676 62a2744c Sheng Yang
}
2677 62a2744c Sheng Yang
2678 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2679 c902760f Marcelo Tosatti
2680 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2681 c902760f Marcelo Tosatti
2682 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2683 c902760f Marcelo Tosatti
2684 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2685 c902760f Marcelo Tosatti
{
2686 c902760f Marcelo Tosatti
    struct statfs fs;
2687 c902760f Marcelo Tosatti
    int ret;
2688 c902760f Marcelo Tosatti
2689 c902760f Marcelo Tosatti
    do {
2690 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2691 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2692 c902760f Marcelo Tosatti
2693 c902760f Marcelo Tosatti
    if (ret != 0) {
2694 9742bf26 Yoshiaki Tamura
        perror(path);
2695 9742bf26 Yoshiaki Tamura
        return 0;
2696 c902760f Marcelo Tosatti
    }
2697 c902760f Marcelo Tosatti
2698 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2699 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2700 c902760f Marcelo Tosatti
2701 c902760f Marcelo Tosatti
    return fs.f_bsize;
2702 c902760f Marcelo Tosatti
}
2703 c902760f Marcelo Tosatti
2704 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2705 04b16653 Alex Williamson
                            ram_addr_t memory,
2706 04b16653 Alex Williamson
                            const char *path)
2707 c902760f Marcelo Tosatti
{
2708 c902760f Marcelo Tosatti
    char *filename;
2709 c902760f Marcelo Tosatti
    void *area;
2710 c902760f Marcelo Tosatti
    int fd;
2711 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2712 c902760f Marcelo Tosatti
    int flags;
2713 c902760f Marcelo Tosatti
#endif
2714 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2715 c902760f Marcelo Tosatti
2716 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2717 c902760f Marcelo Tosatti
    if (!hpagesize) {
2718 9742bf26 Yoshiaki Tamura
        return NULL;
2719 c902760f Marcelo Tosatti
    }
2720 c902760f Marcelo Tosatti
2721 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2722 c902760f Marcelo Tosatti
        return NULL;
2723 c902760f Marcelo Tosatti
    }
2724 c902760f Marcelo Tosatti
2725 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 c902760f Marcelo Tosatti
        return NULL;
2728 c902760f Marcelo Tosatti
    }
2729 c902760f Marcelo Tosatti
2730 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731 9742bf26 Yoshiaki Tamura
        return NULL;
2732 c902760f Marcelo Tosatti
    }
2733 c902760f Marcelo Tosatti
2734 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2735 c902760f Marcelo Tosatti
    if (fd < 0) {
2736 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2737 9742bf26 Yoshiaki Tamura
        free(filename);
2738 9742bf26 Yoshiaki Tamura
        return NULL;
2739 c902760f Marcelo Tosatti
    }
2740 c902760f Marcelo Tosatti
    unlink(filename);
2741 c902760f Marcelo Tosatti
    free(filename);
2742 c902760f Marcelo Tosatti
2743 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744 c902760f Marcelo Tosatti
2745 c902760f Marcelo Tosatti
    /*
2746 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2747 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2748 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2749 c902760f Marcelo Tosatti
     * mmap will fail.
2750 c902760f Marcelo Tosatti
     */
2751 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2752 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2753 c902760f Marcelo Tosatti
2754 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2755 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2757 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2758 c902760f Marcelo Tosatti
     */
2759 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761 c902760f Marcelo Tosatti
#else
2762 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763 c902760f Marcelo Tosatti
#endif
2764 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2765 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2766 9742bf26 Yoshiaki Tamura
        close(fd);
2767 9742bf26 Yoshiaki Tamura
        return (NULL);
2768 c902760f Marcelo Tosatti
    }
2769 04b16653 Alex Williamson
    block->fd = fd;
2770 c902760f Marcelo Tosatti
    return area;
2771 c902760f Marcelo Tosatti
}
2772 c902760f Marcelo Tosatti
#endif
2773 c902760f Marcelo Tosatti
2774 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2775 d17b5288 Alex Williamson
{
2776 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2777 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2778 04b16653 Alex Williamson
2779 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2780 04b16653 Alex Williamson
        return 0;
2781 04b16653 Alex Williamson
2782 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2783 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2784 04b16653 Alex Williamson
2785 04b16653 Alex Williamson
        end = block->offset + block->length;
2786 04b16653 Alex Williamson
2787 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2788 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2789 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2790 04b16653 Alex Williamson
            }
2791 04b16653 Alex Williamson
        }
2792 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2793 04b16653 Alex Williamson
            offset =  end;
2794 04b16653 Alex Williamson
            mingap = next - end;
2795 04b16653 Alex Williamson
        }
2796 04b16653 Alex Williamson
    }
2797 04b16653 Alex Williamson
    return offset;
2798 04b16653 Alex Williamson
}
2799 04b16653 Alex Williamson
2800 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2801 04b16653 Alex Williamson
{
2802 d17b5288 Alex Williamson
    RAMBlock *block;
2803 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2804 d17b5288 Alex Williamson
2805 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2806 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2807 d17b5288 Alex Williamson
2808 d17b5288 Alex Williamson
    return last;
2809 d17b5288 Alex Williamson
}
2810 d17b5288 Alex Williamson
2811 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2812 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2813 84b89d78 Cam Macdonell
{
2814 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2815 84b89d78 Cam Macdonell
2816 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2817 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2818 84b89d78 Cam Macdonell
2819 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2820 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2821 84b89d78 Cam Macdonell
        if (id) {
2822 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2823 84b89d78 Cam Macdonell
            qemu_free(id);
2824 84b89d78 Cam Macdonell
        }
2825 84b89d78 Cam Macdonell
    }
2826 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2827 84b89d78 Cam Macdonell
2828 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2829 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2830 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2831 84b89d78 Cam Macdonell
                    new_block->idstr);
2832 84b89d78 Cam Macdonell
            abort();
2833 84b89d78 Cam Macdonell
        }
2834 84b89d78 Cam Macdonell
    }
2835 84b89d78 Cam Macdonell
2836 6977dfe6 Yoshiaki Tamura
    if (host) {
2837 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2838 6977dfe6 Yoshiaki Tamura
    } else {
2839 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2840 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2841 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2842 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2843 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2844 618a568d Marcelo Tosatti
#ifdef MADV_MERGEABLE
2845 6977dfe6 Yoshiaki Tamura
                madvise(new_block->host, size, MADV_MERGEABLE);
2846 618a568d Marcelo Tosatti
#endif
2847 6977dfe6 Yoshiaki Tamura
            }
2848 c902760f Marcelo Tosatti
#else
2849 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2850 6977dfe6 Yoshiaki Tamura
            exit(1);
2851 c902760f Marcelo Tosatti
#endif
2852 6977dfe6 Yoshiaki Tamura
        } else {
2853 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2854 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2855 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2856 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2857 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2858 6b02494d Alexander Graf
#else
2859 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2860 6b02494d Alexander Graf
#endif
2861 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2862 6977dfe6 Yoshiaki Tamura
            madvise(new_block->host, size, MADV_MERGEABLE);
2863 ccb167e9 Izik Eidus
#endif
2864 6977dfe6 Yoshiaki Tamura
        }
2865 c902760f Marcelo Tosatti
    }
2866 6977dfe6 Yoshiaki Tamura
2867 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2868 94a6b54f pbrook
    new_block->length = size;
2869 94a6b54f pbrook
2870 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2871 94a6b54f pbrook
2872 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2873 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2874 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2875 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2876 94a6b54f pbrook
2877 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2878 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2879 6f0437e8 Jan Kiszka
2880 94a6b54f pbrook
    return new_block->offset;
2881 94a6b54f pbrook
}
2882 e9a1ab19 bellard
2883 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2884 6977dfe6 Yoshiaki Tamura
{
2885 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2886 6977dfe6 Yoshiaki Tamura
}
2887 6977dfe6 Yoshiaki Tamura
2888 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2889 e9a1ab19 bellard
{
2890 04b16653 Alex Williamson
    RAMBlock *block;
2891 04b16653 Alex Williamson
2892 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2893 04b16653 Alex Williamson
        if (addr == block->offset) {
2894 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2895 04b16653 Alex Williamson
            if (mem_path) {
2896 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2897 04b16653 Alex Williamson
                if (block->fd) {
2898 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2899 04b16653 Alex Williamson
                    close(block->fd);
2900 04b16653 Alex Williamson
                } else {
2901 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2902 04b16653 Alex Williamson
                }
2903 04b16653 Alex Williamson
#endif
2904 04b16653 Alex Williamson
            } else {
2905 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2906 04b16653 Alex Williamson
                munmap(block->host, block->length);
2907 04b16653 Alex Williamson
#else
2908 04b16653 Alex Williamson
                qemu_vfree(block->host);
2909 04b16653 Alex Williamson
#endif
2910 04b16653 Alex Williamson
            }
2911 04b16653 Alex Williamson
            qemu_free(block);
2912 04b16653 Alex Williamson
            return;
2913 04b16653 Alex Williamson
        }
2914 04b16653 Alex Williamson
    }
2915 04b16653 Alex Williamson
2916 e9a1ab19 bellard
}
2917 e9a1ab19 bellard
2918 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2919 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2920 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2921 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2922 5579c7f3 pbrook

2923 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2924 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2925 5579c7f3 pbrook
 */
2926 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2927 dc828ca1 pbrook
{
2928 94a6b54f pbrook
    RAMBlock *block;
2929 94a6b54f pbrook
2930 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2931 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2932 f471a17e Alex Williamson
            QLIST_REMOVE(block, next);
2933 f471a17e Alex Williamson
            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2934 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2935 f471a17e Alex Williamson
        }
2936 94a6b54f pbrook
    }
2937 f471a17e Alex Williamson
2938 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2939 f471a17e Alex Williamson
    abort();
2940 f471a17e Alex Williamson
2941 f471a17e Alex Williamson
    return NULL;
2942 dc828ca1 pbrook
}
2943 dc828ca1 pbrook
2944 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2945 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2946 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2947 5579c7f3 pbrook
{
2948 94a6b54f pbrook
    RAMBlock *block;
2949 94a6b54f pbrook
    uint8_t *host = ptr;
2950 94a6b54f pbrook
2951 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2952 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2953 f471a17e Alex Williamson
            return block->offset + (host - block->host);
2954 f471a17e Alex Williamson
        }
2955 94a6b54f pbrook
    }
2956 f471a17e Alex Williamson
2957 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram pointer %p\n", ptr);
2958 f471a17e Alex Williamson
    abort();
2959 f471a17e Alex Williamson
2960 f471a17e Alex Williamson
    return 0;
2961 5579c7f3 pbrook
}
2962 5579c7f3 pbrook
2963 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2964 33417e70 bellard
{
2965 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2966 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2967 67d3b957 pbrook
#endif
2968 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2969 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2970 e18231a3 blueswir1
#endif
2971 e18231a3 blueswir1
    return 0;
2972 e18231a3 blueswir1
}
2973 e18231a3 blueswir1
2974 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2975 e18231a3 blueswir1
{
2976 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2977 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2978 e18231a3 blueswir1
#endif
2979 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2980 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2981 e18231a3 blueswir1
#endif
2982 e18231a3 blueswir1
    return 0;
2983 e18231a3 blueswir1
}
2984 e18231a3 blueswir1
2985 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2986 e18231a3 blueswir1
{
2987 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2988 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2989 e18231a3 blueswir1
#endif
2990 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2991 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2992 b4f0a316 blueswir1
#endif
2993 33417e70 bellard
    return 0;
2994 33417e70 bellard
}
2995 33417e70 bellard
2996 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2997 33417e70 bellard
{
2998 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2999 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3000 67d3b957 pbrook
#endif
3001 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3002 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3003 e18231a3 blueswir1
#endif
3004 e18231a3 blueswir1
}
3005 e18231a3 blueswir1
3006 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3007 e18231a3 blueswir1
{
3008 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3009 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3010 e18231a3 blueswir1
#endif
3011 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3012 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3013 e18231a3 blueswir1
#endif
3014 e18231a3 blueswir1
}
3015 e18231a3 blueswir1
3016 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3017 e18231a3 blueswir1
{
3018 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3019 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3020 e18231a3 blueswir1
#endif
3021 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3022 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3023 b4f0a316 blueswir1
#endif
3024 33417e70 bellard
}
3025 33417e70 bellard
3026 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3027 33417e70 bellard
    unassigned_mem_readb,
3028 e18231a3 blueswir1
    unassigned_mem_readw,
3029 e18231a3 blueswir1
    unassigned_mem_readl,
3030 33417e70 bellard
};
3031 33417e70 bellard
3032 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3033 33417e70 bellard
    unassigned_mem_writeb,
3034 e18231a3 blueswir1
    unassigned_mem_writew,
3035 e18231a3 blueswir1
    unassigned_mem_writel,
3036 33417e70 bellard
};
3037 33417e70 bellard
3038 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3039 0f459d16 pbrook
                                uint32_t val)
3040 9fa3e853 bellard
{
3041 3a7d929e bellard
    int dirty_flags;
3042 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3043 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3044 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3045 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3046 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3047 9fa3e853 bellard
#endif
3048 3a7d929e bellard
    }
3049 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3050 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3051 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3052 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3053 f23db169 bellard
       flushed */
3054 f23db169 bellard
    if (dirty_flags == 0xff)
3055 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3056 9fa3e853 bellard
}
3057 9fa3e853 bellard
3058 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3059 0f459d16 pbrook
                                uint32_t val)
3060 9fa3e853 bellard
{
3061 3a7d929e bellard
    int dirty_flags;
3062 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3063 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3064 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3065 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3066 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3067 9fa3e853 bellard
#endif
3068 3a7d929e bellard
    }
3069 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3070 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3071 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3072 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3073 f23db169 bellard
       flushed */
3074 f23db169 bellard
    if (dirty_flags == 0xff)
3075 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3076 9fa3e853 bellard
}
3077 9fa3e853 bellard
3078 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3079 0f459d16 pbrook
                                uint32_t val)
3080 9fa3e853 bellard
{
3081 3a7d929e bellard
    int dirty_flags;
3082 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3083 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3084 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3085 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3086 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3087 9fa3e853 bellard
#endif
3088 3a7d929e bellard
    }
3089 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3090 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3091 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3092 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3093 f23db169 bellard
       flushed */
3094 f23db169 bellard
    if (dirty_flags == 0xff)
3095 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3096 9fa3e853 bellard
}
3097 9fa3e853 bellard
3098 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3099 9fa3e853 bellard
    NULL, /* never used */
3100 9fa3e853 bellard
    NULL, /* never used */
3101 9fa3e853 bellard
    NULL, /* never used */
3102 9fa3e853 bellard
};
3103 9fa3e853 bellard
3104 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3105 1ccde1cb bellard
    notdirty_mem_writeb,
3106 1ccde1cb bellard
    notdirty_mem_writew,
3107 1ccde1cb bellard
    notdirty_mem_writel,
3108 1ccde1cb bellard
};
3109 1ccde1cb bellard
3110 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3111 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3112 0f459d16 pbrook
{
3113 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3114 06d55cc1 aliguori
    target_ulong pc, cs_base;
3115 06d55cc1 aliguori
    TranslationBlock *tb;
3116 0f459d16 pbrook
    target_ulong vaddr;
3117 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3118 06d55cc1 aliguori
    int cpu_flags;
3119 0f459d16 pbrook
3120 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3121 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3122 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3123 06d55cc1 aliguori
         * current instruction. */
3124 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3125 06d55cc1 aliguori
        return;
3126 06d55cc1 aliguori
    }
3127 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3128 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3129 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3130 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3131 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3132 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3133 6e140f28 aliguori
                env->watchpoint_hit = wp;
3134 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3135 6e140f28 aliguori
                if (!tb) {
3136 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3137 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3138 6e140f28 aliguori
                }
3139 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3140 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3141 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3142 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3143 6e140f28 aliguori
                } else {
3144 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3145 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3146 6e140f28 aliguori
                }
3147 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3148 06d55cc1 aliguori
            }
3149 6e140f28 aliguori
        } else {
3150 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3151 0f459d16 pbrook
        }
3152 0f459d16 pbrook
    }
3153 0f459d16 pbrook
}
3154 0f459d16 pbrook
3155 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3156 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3157 6658ffb8 pbrook
   phys routines.  */
3158 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3159 6658ffb8 pbrook
{
3160 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3161 6658ffb8 pbrook
    return ldub_phys(addr);
3162 6658ffb8 pbrook
}
3163 6658ffb8 pbrook
3164 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3165 6658ffb8 pbrook
{
3166 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3167 6658ffb8 pbrook
    return lduw_phys(addr);
3168 6658ffb8 pbrook
}
3169 6658ffb8 pbrook
3170 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3171 6658ffb8 pbrook
{
3172 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3173 6658ffb8 pbrook
    return ldl_phys(addr);
3174 6658ffb8 pbrook
}
3175 6658ffb8 pbrook
3176 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3177 6658ffb8 pbrook
                             uint32_t val)
3178 6658ffb8 pbrook
{
3179 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3180 6658ffb8 pbrook
    stb_phys(addr, val);
3181 6658ffb8 pbrook
}
3182 6658ffb8 pbrook
3183 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3184 6658ffb8 pbrook
                             uint32_t val)
3185 6658ffb8 pbrook
{
3186 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3187 6658ffb8 pbrook
    stw_phys(addr, val);
3188 6658ffb8 pbrook
}
3189 6658ffb8 pbrook
3190 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3191 6658ffb8 pbrook
                             uint32_t val)
3192 6658ffb8 pbrook
{
3193 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3194 6658ffb8 pbrook
    stl_phys(addr, val);
3195 6658ffb8 pbrook
}
3196 6658ffb8 pbrook
3197 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3198 6658ffb8 pbrook
    watch_mem_readb,
3199 6658ffb8 pbrook
    watch_mem_readw,
3200 6658ffb8 pbrook
    watch_mem_readl,
3201 6658ffb8 pbrook
};
3202 6658ffb8 pbrook
3203 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3204 6658ffb8 pbrook
    watch_mem_writeb,
3205 6658ffb8 pbrook
    watch_mem_writew,
3206 6658ffb8 pbrook
    watch_mem_writel,
3207 6658ffb8 pbrook
};
3208 6658ffb8 pbrook
3209 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3210 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3211 f6405247 Richard Henderson
                                        unsigned int len)
3212 db7b5426 blueswir1
{
3213 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3214 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3215 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3216 db7b5426 blueswir1
           mmio, len, addr, idx);
3217 db7b5426 blueswir1
#endif
3218 db7b5426 blueswir1
3219 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3220 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3221 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3222 db7b5426 blueswir1
}
3223 db7b5426 blueswir1
3224 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3225 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3226 db7b5426 blueswir1
{
3227 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3228 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3229 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3230 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3231 db7b5426 blueswir1
#endif
3232 f6405247 Richard Henderson
3233 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3234 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3235 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3236 db7b5426 blueswir1
}
3237 db7b5426 blueswir1
3238 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3239 db7b5426 blueswir1
{
3240 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3241 db7b5426 blueswir1
}
3242 db7b5426 blueswir1
3243 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3244 db7b5426 blueswir1
                            uint32_t value)
3245 db7b5426 blueswir1
{
3246 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3247 db7b5426 blueswir1
}
3248 db7b5426 blueswir1
3249 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3250 db7b5426 blueswir1
{
3251 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3252 db7b5426 blueswir1
}
3253 db7b5426 blueswir1
3254 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3255 db7b5426 blueswir1
                            uint32_t value)
3256 db7b5426 blueswir1
{
3257 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3258 db7b5426 blueswir1
}
3259 db7b5426 blueswir1
3260 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3261 db7b5426 blueswir1
{
3262 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3263 db7b5426 blueswir1
}
3264 db7b5426 blueswir1
3265 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3266 f6405247 Richard Henderson
                            uint32_t value)
3267 db7b5426 blueswir1
{
3268 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3269 db7b5426 blueswir1
}
3270 db7b5426 blueswir1
3271 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3272 db7b5426 blueswir1
    &subpage_readb,
3273 db7b5426 blueswir1
    &subpage_readw,
3274 db7b5426 blueswir1
    &subpage_readl,
3275 db7b5426 blueswir1
};
3276 db7b5426 blueswir1
3277 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3278 db7b5426 blueswir1
    &subpage_writeb,
3279 db7b5426 blueswir1
    &subpage_writew,
3280 db7b5426 blueswir1
    &subpage_writel,
3281 db7b5426 blueswir1
};
3282 db7b5426 blueswir1
3283 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3284 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3285 db7b5426 blueswir1
{
3286 db7b5426 blueswir1
    int idx, eidx;
3287 db7b5426 blueswir1
3288 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3289 db7b5426 blueswir1
        return -1;
3290 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3291 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3292 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3293 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3294 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3295 db7b5426 blueswir1
#endif
3296 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3297 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3298 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3299 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3300 db7b5426 blueswir1
    }
3301 db7b5426 blueswir1
3302 db7b5426 blueswir1
    return 0;
3303 db7b5426 blueswir1
}
3304 db7b5426 blueswir1
3305 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3306 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3307 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3308 db7b5426 blueswir1
{
3309 c227f099 Anthony Liguori
    subpage_t *mmio;
3310 db7b5426 blueswir1
    int subpage_memory;
3311 db7b5426 blueswir1
3312 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3313 1eec614b aliguori
3314 1eec614b aliguori
    mmio->base = base;
3315 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3316 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3317 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3318 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3319 db7b5426 blueswir1
#endif
3320 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3321 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3322 db7b5426 blueswir1
3323 db7b5426 blueswir1
    return mmio;
3324 db7b5426 blueswir1
}
3325 db7b5426 blueswir1
3326 88715657 aliguori
static int get_free_io_mem_idx(void)
3327 88715657 aliguori
{
3328 88715657 aliguori
    int i;
3329 88715657 aliguori
3330 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3331 88715657 aliguori
        if (!io_mem_used[i]) {
3332 88715657 aliguori
            io_mem_used[i] = 1;
3333 88715657 aliguori
            return i;
3334 88715657 aliguori
        }
3335 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3336 88715657 aliguori
    return -1;
3337 88715657 aliguori
}
3338 88715657 aliguori
3339 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3340 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3341 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3342 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3343 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3344 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3345 4254fab8 blueswir1
   returned if error. */
3346 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3347 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3348 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3349 1eed09cb Avi Kivity
                                        void *opaque)
3350 33417e70 bellard
{
3351 3cab721d Richard Henderson
    int i;
3352 3cab721d Richard Henderson
3353 33417e70 bellard
    if (io_index <= 0) {
3354 88715657 aliguori
        io_index = get_free_io_mem_idx();
3355 88715657 aliguori
        if (io_index == -1)
3356 88715657 aliguori
            return io_index;
3357 33417e70 bellard
    } else {
3358 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3359 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3360 33417e70 bellard
            return -1;
3361 33417e70 bellard
    }
3362 b5ff1b31 bellard
3363 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3364 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3365 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3366 3cab721d Richard Henderson
    }
3367 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3368 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3369 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3370 3cab721d Richard Henderson
    }
3371 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3372 f6405247 Richard Henderson
3373 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3374 33417e70 bellard
}
3375 61382a50 bellard
3376 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3377 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3378 1eed09cb Avi Kivity
                           void *opaque)
3379 1eed09cb Avi Kivity
{
3380 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3381 1eed09cb Avi Kivity
}
3382 1eed09cb Avi Kivity
3383 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3384 88715657 aliguori
{
3385 88715657 aliguori
    int i;
3386 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3387 88715657 aliguori
3388 88715657 aliguori
    for (i=0;i < 3; i++) {
3389 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3390 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3391 88715657 aliguori
    }
3392 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3393 88715657 aliguori
    io_mem_used[io_index] = 0;
3394 88715657 aliguori
}
3395 88715657 aliguori
3396 e9179ce1 Avi Kivity
static void io_mem_init(void)
3397 e9179ce1 Avi Kivity
{
3398 e9179ce1 Avi Kivity
    int i;
3399 e9179ce1 Avi Kivity
3400 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3401 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3402 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3403 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3404 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3405 e9179ce1 Avi Kivity
3406 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3407 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3408 e9179ce1 Avi Kivity
}
3409 e9179ce1 Avi Kivity
3410 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3411 e2eef170 pbrook
3412 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3413 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3414 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3415 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3416 13eb76e0 bellard
{
3417 13eb76e0 bellard
    int l, flags;
3418 13eb76e0 bellard
    target_ulong page;
3419 53a5960a pbrook
    void * p;
3420 13eb76e0 bellard
3421 13eb76e0 bellard
    while (len > 0) {
3422 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3423 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3424 13eb76e0 bellard
        if (l > len)
3425 13eb76e0 bellard
            l = len;
3426 13eb76e0 bellard
        flags = page_get_flags(page);
3427 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3428 a68fe89c Paul Brook
            return -1;
3429 13eb76e0 bellard
        if (is_write) {
3430 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3431 a68fe89c Paul Brook
                return -1;
3432 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3433 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3434 a68fe89c Paul Brook
                return -1;
3435 72fb7daa aurel32
            memcpy(p, buf, l);
3436 72fb7daa aurel32
            unlock_user(p, addr, l);
3437 13eb76e0 bellard
        } else {
3438 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3439 a68fe89c Paul Brook
                return -1;
3440 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3441 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3442 a68fe89c Paul Brook
                return -1;
3443 72fb7daa aurel32
            memcpy(buf, p, l);
3444 5b257578 aurel32
            unlock_user(p, addr, 0);
3445 13eb76e0 bellard
        }
3446 13eb76e0 bellard
        len -= l;
3447 13eb76e0 bellard
        buf += l;
3448 13eb76e0 bellard
        addr += l;
3449 13eb76e0 bellard
    }
3450 a68fe89c Paul Brook
    return 0;
3451 13eb76e0 bellard
}
3452 8df1cd07 bellard
3453 13eb76e0 bellard
#else
3454 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3455 13eb76e0 bellard
                            int len, int is_write)
3456 13eb76e0 bellard
{
3457 13eb76e0 bellard
    int l, io_index;
3458 13eb76e0 bellard
    uint8_t *ptr;
3459 13eb76e0 bellard
    uint32_t val;
3460 c227f099 Anthony Liguori
    target_phys_addr_t page;
3461 2e12669a bellard
    unsigned long pd;
3462 92e873b9 bellard
    PhysPageDesc *p;
3463 3b46e624 ths
3464 13eb76e0 bellard
    while (len > 0) {
3465 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3466 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3467 13eb76e0 bellard
        if (l > len)
3468 13eb76e0 bellard
            l = len;
3469 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3470 13eb76e0 bellard
        if (!p) {
3471 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3472 13eb76e0 bellard
        } else {
3473 13eb76e0 bellard
            pd = p->phys_offset;
3474 13eb76e0 bellard
        }
3475 3b46e624 ths
3476 13eb76e0 bellard
        if (is_write) {
3477 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3478 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3479 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3480 8da3ff18 pbrook
                if (p)
3481 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3482 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3483 6a00d601 bellard
                   potential bugs */
3484 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3485 1c213d19 bellard
                    /* 32 bit write access */
3486 c27004ec bellard
                    val = ldl_p(buf);
3487 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3488 13eb76e0 bellard
                    l = 4;
3489 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3490 1c213d19 bellard
                    /* 16 bit write access */
3491 c27004ec bellard
                    val = lduw_p(buf);
3492 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3493 13eb76e0 bellard
                    l = 2;
3494 13eb76e0 bellard
                } else {
3495 1c213d19 bellard
                    /* 8 bit write access */
3496 c27004ec bellard
                    val = ldub_p(buf);
3497 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3498 13eb76e0 bellard
                    l = 1;
3499 13eb76e0 bellard
                }
3500 13eb76e0 bellard
            } else {
3501 b448f2f3 bellard
                unsigned long addr1;
3502 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3503 13eb76e0 bellard
                /* RAM case */
3504 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3505 13eb76e0 bellard
                memcpy(ptr, buf, l);
3506 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3507 3a7d929e bellard
                    /* invalidate code */
3508 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3509 3a7d929e bellard
                    /* set dirty bit */
3510 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3511 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3512 3a7d929e bellard
                }
3513 13eb76e0 bellard
            }
3514 13eb76e0 bellard
        } else {
3515 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3516 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3517 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3518 13eb76e0 bellard
                /* I/O case */
3519 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3520 8da3ff18 pbrook
                if (p)
3521 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3522 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3523 13eb76e0 bellard
                    /* 32 bit read access */
3524 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3525 c27004ec bellard
                    stl_p(buf, val);
3526 13eb76e0 bellard
                    l = 4;
3527 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3528 13eb76e0 bellard
                    /* 16 bit read access */
3529 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3530 c27004ec bellard
                    stw_p(buf, val);
3531 13eb76e0 bellard
                    l = 2;
3532 13eb76e0 bellard
                } else {
3533 1c213d19 bellard
                    /* 8 bit read access */
3534 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3535 c27004ec bellard
                    stb_p(buf, val);
3536 13eb76e0 bellard
                    l = 1;
3537 13eb76e0 bellard
                }
3538 13eb76e0 bellard
            } else {
3539 13eb76e0 bellard
                /* RAM case */
3540 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3541 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3542 13eb76e0 bellard
                memcpy(buf, ptr, l);
3543 13eb76e0 bellard
            }
3544 13eb76e0 bellard
        }
3545 13eb76e0 bellard
        len -= l;
3546 13eb76e0 bellard
        buf += l;
3547 13eb76e0 bellard
        addr += l;
3548 13eb76e0 bellard
    }
3549 13eb76e0 bellard
}
3550 8df1cd07 bellard
3551 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3552 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3553 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3554 d0ecd2aa bellard
{
3555 d0ecd2aa bellard
    int l;
3556 d0ecd2aa bellard
    uint8_t *ptr;
3557 c227f099 Anthony Liguori
    target_phys_addr_t page;
3558 d0ecd2aa bellard
    unsigned long pd;
3559 d0ecd2aa bellard
    PhysPageDesc *p;
3560 3b46e624 ths
3561 d0ecd2aa bellard
    while (len > 0) {
3562 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3563 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3564 d0ecd2aa bellard
        if (l > len)
3565 d0ecd2aa bellard
            l = len;
3566 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3567 d0ecd2aa bellard
        if (!p) {
3568 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3569 d0ecd2aa bellard
        } else {
3570 d0ecd2aa bellard
            pd = p->phys_offset;
3571 d0ecd2aa bellard
        }
3572 3b46e624 ths
3573 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3574 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3575 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3576 d0ecd2aa bellard
            /* do nothing */
3577 d0ecd2aa bellard
        } else {
3578 d0ecd2aa bellard
            unsigned long addr1;
3579 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3580 d0ecd2aa bellard
            /* ROM/RAM case */
3581 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3582 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3583 d0ecd2aa bellard
        }
3584 d0ecd2aa bellard
        len -= l;
3585 d0ecd2aa bellard
        buf += l;
3586 d0ecd2aa bellard
        addr += l;
3587 d0ecd2aa bellard
    }
3588 d0ecd2aa bellard
}
3589 d0ecd2aa bellard
3590 6d16c2f8 aliguori
typedef struct {
3591 6d16c2f8 aliguori
    void *buffer;
3592 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3593 c227f099 Anthony Liguori
    target_phys_addr_t len;
3594 6d16c2f8 aliguori
} BounceBuffer;
3595 6d16c2f8 aliguori
3596 6d16c2f8 aliguori
static BounceBuffer bounce;
3597 6d16c2f8 aliguori
3598 ba223c29 aliguori
typedef struct MapClient {
3599 ba223c29 aliguori
    void *opaque;
3600 ba223c29 aliguori
    void (*callback)(void *opaque);
3601 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3602 ba223c29 aliguori
} MapClient;
3603 ba223c29 aliguori
3604 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3605 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3606 ba223c29 aliguori
3607 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3608 ba223c29 aliguori
{
3609 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3610 ba223c29 aliguori
3611 ba223c29 aliguori
    client->opaque = opaque;
3612 ba223c29 aliguori
    client->callback = callback;
3613 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3614 ba223c29 aliguori
    return client;
3615 ba223c29 aliguori
}
3616 ba223c29 aliguori
3617 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3618 ba223c29 aliguori
{
3619 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3620 ba223c29 aliguori
3621 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3622 34d5e948 Isaku Yamahata
    qemu_free(client);
3623 ba223c29 aliguori
}
3624 ba223c29 aliguori
3625 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3626 ba223c29 aliguori
{
3627 ba223c29 aliguori
    MapClient *client;
3628 ba223c29 aliguori
3629 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3630 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3631 ba223c29 aliguori
        client->callback(client->opaque);
3632 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3633 ba223c29 aliguori
    }
3634 ba223c29 aliguori
}
3635 ba223c29 aliguori
3636 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3637 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3638 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3639 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3640 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3641 ba223c29 aliguori
 * likely to succeed.
3642 6d16c2f8 aliguori
 */
3643 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3644 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3645 6d16c2f8 aliguori
                              int is_write)
3646 6d16c2f8 aliguori
{
3647 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3648 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3649 6d16c2f8 aliguori
    int l;
3650 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3651 6d16c2f8 aliguori
    uint8_t *ptr;
3652 c227f099 Anthony Liguori
    target_phys_addr_t page;
3653 6d16c2f8 aliguori
    unsigned long pd;
3654 6d16c2f8 aliguori
    PhysPageDesc *p;
3655 6d16c2f8 aliguori
    unsigned long addr1;
3656 6d16c2f8 aliguori
3657 6d16c2f8 aliguori
    while (len > 0) {
3658 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3659 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3660 6d16c2f8 aliguori
        if (l > len)
3661 6d16c2f8 aliguori
            l = len;
3662 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3663 6d16c2f8 aliguori
        if (!p) {
3664 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3665 6d16c2f8 aliguori
        } else {
3666 6d16c2f8 aliguori
            pd = p->phys_offset;
3667 6d16c2f8 aliguori
        }
3668 6d16c2f8 aliguori
3669 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3670 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3671 6d16c2f8 aliguori
                break;
3672 6d16c2f8 aliguori
            }
3673 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3674 6d16c2f8 aliguori
            bounce.addr = addr;
3675 6d16c2f8 aliguori
            bounce.len = l;
3676 6d16c2f8 aliguori
            if (!is_write) {
3677 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3678 6d16c2f8 aliguori
            }
3679 6d16c2f8 aliguori
            ptr = bounce.buffer;
3680 6d16c2f8 aliguori
        } else {
3681 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3682 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3683 6d16c2f8 aliguori
        }
3684 6d16c2f8 aliguori
        if (!done) {
3685 6d16c2f8 aliguori
            ret = ptr;
3686 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3687 6d16c2f8 aliguori
            break;
3688 6d16c2f8 aliguori
        }
3689 6d16c2f8 aliguori
3690 6d16c2f8 aliguori
        len -= l;
3691 6d16c2f8 aliguori
        addr += l;
3692 6d16c2f8 aliguori
        done += l;
3693 6d16c2f8 aliguori
    }
3694 6d16c2f8 aliguori
    *plen = done;
3695 6d16c2f8 aliguori
    return ret;
3696 6d16c2f8 aliguori
}
3697 6d16c2f8 aliguori
3698 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3699 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3700 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3701 6d16c2f8 aliguori
 */
3702 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3703 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3704 6d16c2f8 aliguori
{
3705 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3706 6d16c2f8 aliguori
        if (is_write) {
3707 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3708 6d16c2f8 aliguori
            while (access_len) {
3709 6d16c2f8 aliguori
                unsigned l;
3710 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3711 6d16c2f8 aliguori
                if (l > access_len)
3712 6d16c2f8 aliguori
                    l = access_len;
3713 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3714 6d16c2f8 aliguori
                    /* invalidate code */
3715 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3716 6d16c2f8 aliguori
                    /* set dirty bit */
3717 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3718 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3719 6d16c2f8 aliguori
                }
3720 6d16c2f8 aliguori
                addr1 += l;
3721 6d16c2f8 aliguori
                access_len -= l;
3722 6d16c2f8 aliguori
            }
3723 6d16c2f8 aliguori
        }
3724 6d16c2f8 aliguori
        return;
3725 6d16c2f8 aliguori
    }
3726 6d16c2f8 aliguori
    if (is_write) {
3727 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3728 6d16c2f8 aliguori
    }
3729 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3730 6d16c2f8 aliguori
    bounce.buffer = NULL;
3731 ba223c29 aliguori
    cpu_notify_map_clients();
3732 6d16c2f8 aliguori
}
3733 d0ecd2aa bellard
3734 8df1cd07 bellard
/* warning: addr must be aligned */
3735 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3736 8df1cd07 bellard
{
3737 8df1cd07 bellard
    int io_index;
3738 8df1cd07 bellard
    uint8_t *ptr;
3739 8df1cd07 bellard
    uint32_t val;
3740 8df1cd07 bellard
    unsigned long pd;
3741 8df1cd07 bellard
    PhysPageDesc *p;
3742 8df1cd07 bellard
3743 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3744 8df1cd07 bellard
    if (!p) {
3745 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3746 8df1cd07 bellard
    } else {
3747 8df1cd07 bellard
        pd = p->phys_offset;
3748 8df1cd07 bellard
    }
3749 3b46e624 ths
3750 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3751 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3752 8df1cd07 bellard
        /* I/O case */
3753 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3754 8da3ff18 pbrook
        if (p)
3755 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3756 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3757 8df1cd07 bellard
    } else {
3758 8df1cd07 bellard
        /* RAM case */
3759 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3760 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3761 8df1cd07 bellard
        val = ldl_p(ptr);
3762 8df1cd07 bellard
    }
3763 8df1cd07 bellard
    return val;
3764 8df1cd07 bellard
}
3765 8df1cd07 bellard
3766 84b7b8e7 bellard
/* warning: addr must be aligned */
3767 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3768 84b7b8e7 bellard
{
3769 84b7b8e7 bellard
    int io_index;
3770 84b7b8e7 bellard
    uint8_t *ptr;
3771 84b7b8e7 bellard
    uint64_t val;
3772 84b7b8e7 bellard
    unsigned long pd;
3773 84b7b8e7 bellard
    PhysPageDesc *p;
3774 84b7b8e7 bellard
3775 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3776 84b7b8e7 bellard
    if (!p) {
3777 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3778 84b7b8e7 bellard
    } else {
3779 84b7b8e7 bellard
        pd = p->phys_offset;
3780 84b7b8e7 bellard
    }
3781 3b46e624 ths
3782 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3783 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3784 84b7b8e7 bellard
        /* I/O case */
3785 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3786 8da3ff18 pbrook
        if (p)
3787 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3788 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3789 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3790 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3791 84b7b8e7 bellard
#else
3792 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3793 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3794 84b7b8e7 bellard
#endif
3795 84b7b8e7 bellard
    } else {
3796 84b7b8e7 bellard
        /* RAM case */
3797 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3798 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3799 84b7b8e7 bellard
        val = ldq_p(ptr);
3800 84b7b8e7 bellard
    }
3801 84b7b8e7 bellard
    return val;
3802 84b7b8e7 bellard
}
3803 84b7b8e7 bellard
3804 aab33094 bellard
/* XXX: optimize */
3805 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3806 aab33094 bellard
{
3807 aab33094 bellard
    uint8_t val;
3808 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3809 aab33094 bellard
    return val;
3810 aab33094 bellard
}
3811 aab33094 bellard
3812 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3813 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3814 aab33094 bellard
{
3815 733f0b02 Michael S. Tsirkin
    int io_index;
3816 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3817 733f0b02 Michael S. Tsirkin
    uint64_t val;
3818 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3819 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3820 733f0b02 Michael S. Tsirkin
3821 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3822 733f0b02 Michael S. Tsirkin
    if (!p) {
3823 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3824 733f0b02 Michael S. Tsirkin
    } else {
3825 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3826 733f0b02 Michael S. Tsirkin
    }
3827 733f0b02 Michael S. Tsirkin
3828 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3829 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
3830 733f0b02 Michael S. Tsirkin
        /* I/O case */
3831 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3832 733f0b02 Michael S. Tsirkin
        if (p)
3833 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3834 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3835 733f0b02 Michael S. Tsirkin
    } else {
3836 733f0b02 Michael S. Tsirkin
        /* RAM case */
3837 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3838 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3839 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
3840 733f0b02 Michael S. Tsirkin
    }
3841 733f0b02 Michael S. Tsirkin
    return val;
3842 aab33094 bellard
}
3843 aab33094 bellard
3844 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3845 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3846 8df1cd07 bellard
   bits are used to track modified PTEs */
3847 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3848 8df1cd07 bellard
{
3849 8df1cd07 bellard
    int io_index;
3850 8df1cd07 bellard
    uint8_t *ptr;
3851 8df1cd07 bellard
    unsigned long pd;
3852 8df1cd07 bellard
    PhysPageDesc *p;
3853 8df1cd07 bellard
3854 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3855 8df1cd07 bellard
    if (!p) {
3856 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3857 8df1cd07 bellard
    } else {
3858 8df1cd07 bellard
        pd = p->phys_offset;
3859 8df1cd07 bellard
    }
3860 3b46e624 ths
3861 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3862 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3863 8da3ff18 pbrook
        if (p)
3864 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3865 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3866 8df1cd07 bellard
    } else {
3867 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3868 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3869 8df1cd07 bellard
        stl_p(ptr, val);
3870 74576198 aliguori
3871 74576198 aliguori
        if (unlikely(in_migration)) {
3872 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3873 74576198 aliguori
                /* invalidate code */
3874 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3875 74576198 aliguori
                /* set dirty bit */
3876 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3877 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3878 74576198 aliguori
            }
3879 74576198 aliguori
        }
3880 8df1cd07 bellard
    }
3881 8df1cd07 bellard
}
3882 8df1cd07 bellard
3883 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3884 bc98a7ef j_mayer
{
3885 bc98a7ef j_mayer
    int io_index;
3886 bc98a7ef j_mayer
    uint8_t *ptr;
3887 bc98a7ef j_mayer
    unsigned long pd;
3888 bc98a7ef j_mayer
    PhysPageDesc *p;
3889 bc98a7ef j_mayer
3890 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3891 bc98a7ef j_mayer
    if (!p) {
3892 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3893 bc98a7ef j_mayer
    } else {
3894 bc98a7ef j_mayer
        pd = p->phys_offset;
3895 bc98a7ef j_mayer
    }
3896 3b46e624 ths
3897 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3898 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3899 8da3ff18 pbrook
        if (p)
3900 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3901 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3902 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3903 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3904 bc98a7ef j_mayer
#else
3905 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3906 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3907 bc98a7ef j_mayer
#endif
3908 bc98a7ef j_mayer
    } else {
3909 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3910 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3911 bc98a7ef j_mayer
        stq_p(ptr, val);
3912 bc98a7ef j_mayer
    }
3913 bc98a7ef j_mayer
}
3914 bc98a7ef j_mayer
3915 8df1cd07 bellard
/* warning: addr must be aligned */
3916 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3917 8df1cd07 bellard
{
3918 8df1cd07 bellard
    int io_index;
3919 8df1cd07 bellard
    uint8_t *ptr;
3920 8df1cd07 bellard
    unsigned long pd;
3921 8df1cd07 bellard
    PhysPageDesc *p;
3922 8df1cd07 bellard
3923 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3924 8df1cd07 bellard
    if (!p) {
3925 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3926 8df1cd07 bellard
    } else {
3927 8df1cd07 bellard
        pd = p->phys_offset;
3928 8df1cd07 bellard
    }
3929 3b46e624 ths
3930 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3931 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3932 8da3ff18 pbrook
        if (p)
3933 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3934 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3935 8df1cd07 bellard
    } else {
3936 8df1cd07 bellard
        unsigned long addr1;
3937 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3938 8df1cd07 bellard
        /* RAM case */
3939 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3940 8df1cd07 bellard
        stl_p(ptr, val);
3941 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3942 3a7d929e bellard
            /* invalidate code */
3943 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3944 3a7d929e bellard
            /* set dirty bit */
3945 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3946 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3947 3a7d929e bellard
        }
3948 8df1cd07 bellard
    }
3949 8df1cd07 bellard
}
3950 8df1cd07 bellard
3951 aab33094 bellard
/* XXX: optimize */
3952 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3953 aab33094 bellard
{
3954 aab33094 bellard
    uint8_t v = val;
3955 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3956 aab33094 bellard
}
3957 aab33094 bellard
3958 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3959 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3960 aab33094 bellard
{
3961 733f0b02 Michael S. Tsirkin
    int io_index;
3962 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3963 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3964 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3965 733f0b02 Michael S. Tsirkin
3966 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3967 733f0b02 Michael S. Tsirkin
    if (!p) {
3968 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3969 733f0b02 Michael S. Tsirkin
    } else {
3970 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3971 733f0b02 Michael S. Tsirkin
    }
3972 733f0b02 Michael S. Tsirkin
3973 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3974 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3975 733f0b02 Michael S. Tsirkin
        if (p)
3976 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3977 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3978 733f0b02 Michael S. Tsirkin
    } else {
3979 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
3980 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3981 733f0b02 Michael S. Tsirkin
        /* RAM case */
3982 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
3983 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
3984 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
3985 733f0b02 Michael S. Tsirkin
            /* invalidate code */
3986 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3987 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
3988 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
3989 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
3990 733f0b02 Michael S. Tsirkin
        }
3991 733f0b02 Michael S. Tsirkin
    }
3992 aab33094 bellard
}
3993 aab33094 bellard
3994 aab33094 bellard
/* XXX: optimize */
3995 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3996 aab33094 bellard
{
3997 aab33094 bellard
    val = tswap64(val);
3998 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3999 aab33094 bellard
}
4000 aab33094 bellard
4001 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4002 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4003 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4004 13eb76e0 bellard
{
4005 13eb76e0 bellard
    int l;
4006 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4007 9b3c35e0 j_mayer
    target_ulong page;
4008 13eb76e0 bellard
4009 13eb76e0 bellard
    while (len > 0) {
4010 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4011 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4012 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4013 13eb76e0 bellard
        if (phys_addr == -1)
4014 13eb76e0 bellard
            return -1;
4015 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4016 13eb76e0 bellard
        if (l > len)
4017 13eb76e0 bellard
            l = len;
4018 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4019 5e2972fd aliguori
        if (is_write)
4020 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4021 5e2972fd aliguori
        else
4022 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4023 13eb76e0 bellard
        len -= l;
4024 13eb76e0 bellard
        buf += l;
4025 13eb76e0 bellard
        addr += l;
4026 13eb76e0 bellard
    }
4027 13eb76e0 bellard
    return 0;
4028 13eb76e0 bellard
}
4029 a68fe89c Paul Brook
#endif
4030 13eb76e0 bellard
4031 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4032 2e70f6ef pbrook
   must be at the end of the TB */
4033 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4034 2e70f6ef pbrook
{
4035 2e70f6ef pbrook
    TranslationBlock *tb;
4036 2e70f6ef pbrook
    uint32_t n, cflags;
4037 2e70f6ef pbrook
    target_ulong pc, cs_base;
4038 2e70f6ef pbrook
    uint64_t flags;
4039 2e70f6ef pbrook
4040 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4041 2e70f6ef pbrook
    if (!tb) {
4042 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4043 2e70f6ef pbrook
                  retaddr);
4044 2e70f6ef pbrook
    }
4045 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4046 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4047 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4048 bf20dc07 ths
       occurred.  */
4049 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4050 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4051 2e70f6ef pbrook
    n++;
4052 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4053 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4054 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4055 2e70f6ef pbrook
       branch.  */
4056 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4057 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4058 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4059 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4060 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4061 2e70f6ef pbrook
    }
4062 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4063 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4064 2e70f6ef pbrook
            && n > 1) {
4065 2e70f6ef pbrook
        env->pc -= 2;
4066 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4067 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4068 2e70f6ef pbrook
    }
4069 2e70f6ef pbrook
#endif
4070 2e70f6ef pbrook
    /* This should never happen.  */
4071 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4072 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4073 2e70f6ef pbrook
4074 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4075 2e70f6ef pbrook
    pc = tb->pc;
4076 2e70f6ef pbrook
    cs_base = tb->cs_base;
4077 2e70f6ef pbrook
    flags = tb->flags;
4078 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4079 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4080 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4081 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4082 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4083 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4084 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4085 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4086 2e70f6ef pbrook
       second new TB.  */
4087 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4088 2e70f6ef pbrook
}
4089 2e70f6ef pbrook
4090 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4091 b3755a91 Paul Brook
4092 e3db7226 bellard
void dump_exec_info(FILE *f,
4093 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4094 e3db7226 bellard
{
4095 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4096 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4097 e3db7226 bellard
    TranslationBlock *tb;
4098 3b46e624 ths
4099 e3db7226 bellard
    target_code_size = 0;
4100 e3db7226 bellard
    max_target_code_size = 0;
4101 e3db7226 bellard
    cross_page = 0;
4102 e3db7226 bellard
    direct_jmp_count = 0;
4103 e3db7226 bellard
    direct_jmp2_count = 0;
4104 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4105 e3db7226 bellard
        tb = &tbs[i];
4106 e3db7226 bellard
        target_code_size += tb->size;
4107 e3db7226 bellard
        if (tb->size > max_target_code_size)
4108 e3db7226 bellard
            max_target_code_size = tb->size;
4109 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4110 e3db7226 bellard
            cross_page++;
4111 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4112 e3db7226 bellard
            direct_jmp_count++;
4113 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4114 e3db7226 bellard
                direct_jmp2_count++;
4115 e3db7226 bellard
            }
4116 e3db7226 bellard
        }
4117 e3db7226 bellard
    }
4118 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4119 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4120 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
4121 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4122 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4123 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4124 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4125 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4126 e3db7226 bellard
                max_target_code_size);
4127 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4128 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4129 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4130 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4131 5fafdf24 ths
            cross_page,
4132 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4133 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4134 5fafdf24 ths
                direct_jmp_count,
4135 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4136 e3db7226 bellard
                direct_jmp2_count,
4137 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4138 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4139 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4140 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4141 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4142 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4143 e3db7226 bellard
}
4144 e3db7226 bellard
4145 61382a50 bellard
#define MMUSUFFIX _cmmu
4146 61382a50 bellard
#define GETPC() NULL
4147 61382a50 bellard
#define env cpu_single_env
4148 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4149 61382a50 bellard
4150 61382a50 bellard
#define SHIFT 0
4151 61382a50 bellard
#include "softmmu_template.h"
4152 61382a50 bellard
4153 61382a50 bellard
#define SHIFT 1
4154 61382a50 bellard
#include "softmmu_template.h"
4155 61382a50 bellard
4156 61382a50 bellard
#define SHIFT 2
4157 61382a50 bellard
#include "softmmu_template.h"
4158 61382a50 bellard
4159 61382a50 bellard
#define SHIFT 3
4160 61382a50 bellard
#include "softmmu_template.h"
4161 61382a50 bellard
4162 61382a50 bellard
#undef env
4163 61382a50 bellard
4164 61382a50 bellard
#endif