Statistics
| Branch: | Revision:

root / exec.c @ 1884533c

History | View | Annotate | Download (119.1 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 29e922b6 Blue Swirl
#include "qemu-timer.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 fd052bf6 Riku Voipio
#include <signal.h>
45 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 f01576f1 Juergen Lock
#include <sys/param.h>
47 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
48 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
49 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
50 f01576f1 Juergen Lock
#include <sys/time.h>
51 f01576f1 Juergen Lock
#include <sys/proc.h>
52 f01576f1 Juergen Lock
#include <machine/profile.h>
53 f01576f1 Juergen Lock
#define _KERNEL
54 f01576f1 Juergen Lock
#include <sys/user.h>
55 f01576f1 Juergen Lock
#undef _KERNEL
56 f01576f1 Juergen Lock
#undef sigqueue
57 f01576f1 Juergen Lock
#include <libutil.h>
58 f01576f1 Juergen Lock
#endif
59 f01576f1 Juergen Lock
#endif
60 53a5960a pbrook
#endif
61 54936004 bellard
62 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
63 66e85a21 bellard
//#define DEBUG_FLUSH
64 9fa3e853 bellard
//#define DEBUG_TLB
65 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
66 fd6ce8f6 bellard
67 fd6ce8f6 bellard
/* make various TB consistency checks */
68 5fafdf24 ths
//#define DEBUG_TB_CHECK
69 5fafdf24 ths
//#define DEBUG_TLB_CHECK
70 fd6ce8f6 bellard
71 1196be37 ths
//#define DEBUG_IOPORT
72 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
73 1196be37 ths
74 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
75 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
76 99773bd4 pbrook
#undef DEBUG_TB_CHECK
77 99773bd4 pbrook
#endif
78 99773bd4 pbrook
79 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
80 9fa3e853 bellard
81 bdaf78e0 blueswir1
static TranslationBlock *tbs;
82 26a5f13b bellard
int code_gen_max_blocks;
83 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 bdaf78e0 blueswir1
static int nb_tbs;
85 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
86 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 fd6ce8f6 bellard
88 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
89 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
91 d03d860b blueswir1
 section close to code segment. */
92 d03d860b blueswir1
#define code_gen_section                                \
93 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
94 d03d860b blueswir1
    __attribute__((aligned (32)))
95 f8e2af11 Stefan Weil
#elif defined(_WIN32)
96 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 74576198 aliguori
static int in_migration;
114 94a6b54f pbrook
115 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
116 e2eef170 pbrook
#endif
117 9fa3e853 bellard
118 6a00d601 bellard
CPUState *first_cpu;
119 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
120 6a00d601 bellard
   cpu_exec() */
121 5fafdf24 ths
CPUState *cpu_single_env;
122 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
123 bf20dc07 ths
   1 = Precise instruction counting.
124 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
125 2e70f6ef pbrook
int use_icount = 0;
126 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
127 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
128 2e70f6ef pbrook
int64_t qemu_icount;
129 6a00d601 bellard
130 54936004 bellard
typedef struct PageDesc {
131 92e873b9 bellard
    /* list of TBs intersecting this ram page */
132 fd6ce8f6 bellard
    TranslationBlock *first_tb;
133 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
134 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
135 9fa3e853 bellard
    unsigned int code_write_count;
136 9fa3e853 bellard
    uint8_t *code_bitmap;
137 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
138 9fa3e853 bellard
    unsigned long flags;
139 9fa3e853 bellard
#endif
140 54936004 bellard
} PageDesc;
141 54936004 bellard
142 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
143 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
144 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
145 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
146 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
147 41c1b1c9 Paul Brook
#else
148 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
149 41c1b1c9 Paul Brook
#endif
150 bedb69ea j_mayer
#else
151 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
152 bedb69ea j_mayer
#endif
153 54936004 bellard
154 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
155 5cd2c5b6 Richard Henderson
#define L2_BITS 10
156 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
157 54936004 bellard
158 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
159 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
160 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
161 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
162 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 5cd2c5b6 Richard Henderson
164 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
165 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
166 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
167 5cd2c5b6 Richard Henderson
#else
168 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
169 5cd2c5b6 Richard Henderson
#endif
170 5cd2c5b6 Richard Henderson
171 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
172 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
173 5cd2c5b6 Richard Henderson
#else
174 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
175 5cd2c5b6 Richard Henderson
#endif
176 5cd2c5b6 Richard Henderson
177 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
178 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
179 5cd2c5b6 Richard Henderson
180 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
181 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
182 5cd2c5b6 Richard Henderson
183 83fb7adf bellard
unsigned long qemu_real_host_page_size;
184 83fb7adf bellard
unsigned long qemu_host_page_bits;
185 83fb7adf bellard
unsigned long qemu_host_page_size;
186 83fb7adf bellard
unsigned long qemu_host_page_mask;
187 54936004 bellard
188 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
189 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
190 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
191 54936004 bellard
192 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
193 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
194 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
195 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
196 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
197 41c1b1c9 Paul Brook
} PhysPageDesc;
198 41c1b1c9 Paul Brook
199 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
200 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
201 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
202 6d9a1304 Paul Brook
203 e2eef170 pbrook
static void io_mem_init(void);
204 e2eef170 pbrook
205 33417e70 bellard
/* io memory support */
206 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
207 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
208 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
209 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
210 6658ffb8 pbrook
static int io_mem_watch;
211 6658ffb8 pbrook
#endif
212 33417e70 bellard
213 34865134 bellard
/* log support */
214 1e8b27ca Juha Riihimรคki
#ifdef WIN32
215 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
216 1e8b27ca Juha Riihimรคki
#else
217 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
218 1e8b27ca Juha Riihimรคki
#endif
219 34865134 bellard
FILE *logfile;
220 34865134 bellard
int loglevel;
221 e735b91c pbrook
static int log_append = 0;
222 34865134 bellard
223 e3db7226 bellard
/* statistics */
224 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
225 e3db7226 bellard
static int tlb_flush_count;
226 b3755a91 Paul Brook
#endif
227 e3db7226 bellard
static int tb_flush_count;
228 e3db7226 bellard
static int tb_phys_invalidate_count;
229 e3db7226 bellard
230 7cb69cae bellard
#ifdef _WIN32
231 7cb69cae bellard
static void map_exec(void *addr, long size)
232 7cb69cae bellard
{
233 7cb69cae bellard
    DWORD old_protect;
234 7cb69cae bellard
    VirtualProtect(addr, size,
235 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
236 7cb69cae bellard
    
237 7cb69cae bellard
}
238 7cb69cae bellard
#else
239 7cb69cae bellard
static void map_exec(void *addr, long size)
240 7cb69cae bellard
{
241 4369415f bellard
    unsigned long start, end, page_size;
242 7cb69cae bellard
    
243 4369415f bellard
    page_size = getpagesize();
244 7cb69cae bellard
    start = (unsigned long)addr;
245 4369415f bellard
    start &= ~(page_size - 1);
246 7cb69cae bellard
    
247 7cb69cae bellard
    end = (unsigned long)addr + size;
248 4369415f bellard
    end += page_size - 1;
249 4369415f bellard
    end &= ~(page_size - 1);
250 7cb69cae bellard
    
251 7cb69cae bellard
    mprotect((void *)start, end - start,
252 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
253 7cb69cae bellard
}
254 7cb69cae bellard
#endif
255 7cb69cae bellard
256 b346ff46 bellard
static void page_init(void)
257 54936004 bellard
{
258 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
259 54936004 bellard
       TARGET_PAGE_SIZE */
260 c2b48b69 aliguori
#ifdef _WIN32
261 c2b48b69 aliguori
    {
262 c2b48b69 aliguori
        SYSTEM_INFO system_info;
263 c2b48b69 aliguori
264 c2b48b69 aliguori
        GetSystemInfo(&system_info);
265 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
266 c2b48b69 aliguori
    }
267 c2b48b69 aliguori
#else
268 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
269 c2b48b69 aliguori
#endif
270 83fb7adf bellard
    if (qemu_host_page_size == 0)
271 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
272 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
273 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
274 83fb7adf bellard
    qemu_host_page_bits = 0;
275 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
276 83fb7adf bellard
        qemu_host_page_bits++;
277 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
278 50a9569b balrog
279 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 50a9569b balrog
    {
281 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
282 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
283 f01576f1 Juergen Lock
        int i, cnt;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
286 f01576f1 Juergen Lock
        if (freep) {
287 f01576f1 Juergen Lock
            mmap_lock();
288 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
289 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
290 f01576f1 Juergen Lock
291 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
292 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
293 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
294 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295 f01576f1 Juergen Lock
296 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
297 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
298 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 f01576f1 Juergen Lock
                    } else {
300 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 f01576f1 Juergen Lock
                        endaddr = ~0ul;
302 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 f01576f1 Juergen Lock
#endif
304 f01576f1 Juergen Lock
                    }
305 f01576f1 Juergen Lock
                }
306 f01576f1 Juergen Lock
            }
307 f01576f1 Juergen Lock
            free(freep);
308 f01576f1 Juergen Lock
            mmap_unlock();
309 f01576f1 Juergen Lock
        }
310 f01576f1 Juergen Lock
#else
311 50a9569b balrog
        FILE *f;
312 50a9569b balrog
313 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
314 5cd2c5b6 Richard Henderson
315 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
316 50a9569b balrog
        if (f) {
317 5cd2c5b6 Richard Henderson
            mmap_lock();
318 5cd2c5b6 Richard Henderson
319 50a9569b balrog
            do {
320 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
321 5cd2c5b6 Richard Henderson
                int n;
322 5cd2c5b6 Richard Henderson
323 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324 5cd2c5b6 Richard Henderson
325 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
326 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327 5cd2c5b6 Richard Henderson
328 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
329 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
330 5cd2c5b6 Richard Henderson
                    } else {
331 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
332 5cd2c5b6 Richard Henderson
                    }
333 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 50a9569b balrog
                }
335 50a9569b balrog
            } while (!feof(f));
336 5cd2c5b6 Richard Henderson
337 50a9569b balrog
            fclose(f);
338 5cd2c5b6 Richard Henderson
            mmap_unlock();
339 50a9569b balrog
        }
340 f01576f1 Juergen Lock
#endif
341 50a9569b balrog
    }
342 50a9569b balrog
#endif
343 54936004 bellard
}
344 54936004 bellard
345 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
346 54936004 bellard
{
347 41c1b1c9 Paul Brook
    PageDesc *pd;
348 41c1b1c9 Paul Brook
    void **lp;
349 41c1b1c9 Paul Brook
    int i;
350 41c1b1c9 Paul Brook
351 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
352 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
354 5cd2c5b6 Richard Henderson
    do {                                                \
355 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
356 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
357 5cd2c5b6 Richard Henderson
    } while (0)
358 5cd2c5b6 Richard Henderson
#else
359 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
360 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
361 17e2377a pbrook
#endif
362 434929bf aliguori
363 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
364 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365 5cd2c5b6 Richard Henderson
366 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
367 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 5cd2c5b6 Richard Henderson
        void **p = *lp;
369 5cd2c5b6 Richard Henderson
370 5cd2c5b6 Richard Henderson
        if (p == NULL) {
371 5cd2c5b6 Richard Henderson
            if (!alloc) {
372 5cd2c5b6 Richard Henderson
                return NULL;
373 5cd2c5b6 Richard Henderson
            }
374 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
375 5cd2c5b6 Richard Henderson
            *lp = p;
376 17e2377a pbrook
        }
377 5cd2c5b6 Richard Henderson
378 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 5cd2c5b6 Richard Henderson
    }
380 5cd2c5b6 Richard Henderson
381 5cd2c5b6 Richard Henderson
    pd = *lp;
382 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
383 5cd2c5b6 Richard Henderson
        if (!alloc) {
384 5cd2c5b6 Richard Henderson
            return NULL;
385 5cd2c5b6 Richard Henderson
        }
386 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 5cd2c5b6 Richard Henderson
        *lp = pd;
388 54936004 bellard
    }
389 5cd2c5b6 Richard Henderson
390 5cd2c5b6 Richard Henderson
#undef ALLOC
391 5cd2c5b6 Richard Henderson
392 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
393 54936004 bellard
}
394 54936004 bellard
395 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
396 54936004 bellard
{
397 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
398 fd6ce8f6 bellard
}
399 fd6ce8f6 bellard
400 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
401 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
402 92e873b9 bellard
{
403 e3f4e2a4 pbrook
    PhysPageDesc *pd;
404 5cd2c5b6 Richard Henderson
    void **lp;
405 5cd2c5b6 Richard Henderson
    int i;
406 92e873b9 bellard
407 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
408 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
409 108c49b8 bellard
410 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
411 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 5cd2c5b6 Richard Henderson
        void **p = *lp;
413 5cd2c5b6 Richard Henderson
        if (p == NULL) {
414 5cd2c5b6 Richard Henderson
            if (!alloc) {
415 5cd2c5b6 Richard Henderson
                return NULL;
416 5cd2c5b6 Richard Henderson
            }
417 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
418 5cd2c5b6 Richard Henderson
        }
419 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
420 108c49b8 bellard
    }
421 5cd2c5b6 Richard Henderson
422 e3f4e2a4 pbrook
    pd = *lp;
423 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
424 e3f4e2a4 pbrook
        int i;
425 5cd2c5b6 Richard Henderson
426 5cd2c5b6 Richard Henderson
        if (!alloc) {
427 108c49b8 bellard
            return NULL;
428 5cd2c5b6 Richard Henderson
        }
429 5cd2c5b6 Richard Henderson
430 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
431 5cd2c5b6 Richard Henderson
432 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
433 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
434 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
435 67c4d23c pbrook
        }
436 92e873b9 bellard
    }
437 5cd2c5b6 Richard Henderson
438 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
439 92e873b9 bellard
}
440 92e873b9 bellard
441 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
442 92e873b9 bellard
{
443 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
444 92e873b9 bellard
}
445 92e873b9 bellard
446 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
447 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
448 3a7d929e bellard
                                    target_ulong vaddr);
449 c8a706fe pbrook
#define mmap_lock() do { } while(0)
450 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
451 9fa3e853 bellard
#endif
452 fd6ce8f6 bellard
453 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454 4369415f bellard
455 4369415f bellard
#if defined(CONFIG_USER_ONLY)
456 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
457 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
458 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
459 4369415f bellard
#endif
460 4369415f bellard
461 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
462 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
464 4369415f bellard
#endif
465 4369415f bellard
466 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
467 26a5f13b bellard
{
468 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
469 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
470 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
472 4369415f bellard
#else
473 26a5f13b bellard
    code_gen_buffer_size = tb_size;
474 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
475 4369415f bellard
#if defined(CONFIG_USER_ONLY)
476 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
477 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
478 4369415f bellard
#else
479 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
480 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
481 4369415f bellard
#endif
482 26a5f13b bellard
    }
483 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
484 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
485 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
486 26a5f13b bellard
       the host cpu and OS */
487 26a5f13b bellard
#if defined(__linux__) 
488 26a5f13b bellard
    {
489 26a5f13b bellard
        int flags;
490 141ac468 blueswir1
        void *start = NULL;
491 141ac468 blueswir1
492 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
493 26a5f13b bellard
#if defined(__x86_64__)
494 26a5f13b bellard
        flags |= MAP_32BIT;
495 26a5f13b bellard
        /* Cannot map more than that */
496 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
497 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
498 141ac468 blueswir1
#elif defined(__sparc_v9__)
499 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
500 141ac468 blueswir1
        flags |= MAP_FIXED;
501 141ac468 blueswir1
        start = (void *) 0x60000000UL;
502 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
503 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
504 1cb0661e balrog
#elif defined(__arm__)
505 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
506 1cb0661e balrog
        flags |= MAP_FIXED;
507 1cb0661e balrog
        start = (void *) 0x01000000UL;
508 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
509 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
510 eba0b893 Richard Henderson
#elif defined(__s390x__)
511 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
512 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
513 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
514 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
515 eba0b893 Richard Henderson
        }
516 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
517 26a5f13b bellard
#endif
518 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
519 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
520 26a5f13b bellard
                               flags, -1, 0);
521 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
522 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
523 26a5f13b bellard
            exit(1);
524 26a5f13b bellard
        }
525 26a5f13b bellard
    }
526 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
527 06e67a82 aliguori
    {
528 06e67a82 aliguori
        int flags;
529 06e67a82 aliguori
        void *addr = NULL;
530 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
531 06e67a82 aliguori
#if defined(__x86_64__)
532 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
533 06e67a82 aliguori
         * 0x40000000 is free */
534 06e67a82 aliguori
        flags |= MAP_FIXED;
535 06e67a82 aliguori
        addr = (void *)0x40000000;
536 06e67a82 aliguori
        /* Cannot map more than that */
537 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
538 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
539 06e67a82 aliguori
#endif
540 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
541 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
542 06e67a82 aliguori
                               flags, -1, 0);
543 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
544 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
545 06e67a82 aliguori
            exit(1);
546 06e67a82 aliguori
        }
547 06e67a82 aliguori
    }
548 26a5f13b bellard
#else
549 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
550 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
551 26a5f13b bellard
#endif
552 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
553 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
554 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
555 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
556 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
557 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
558 26a5f13b bellard
}
559 26a5f13b bellard
560 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
561 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
562 26a5f13b bellard
   size. */
563 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
564 26a5f13b bellard
{
565 26a5f13b bellard
    cpu_gen_init();
566 26a5f13b bellard
    code_gen_alloc(tb_size);
567 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
568 4369415f bellard
    page_init();
569 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
570 26a5f13b bellard
    io_mem_init();
571 e2eef170 pbrook
#endif
572 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
573 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
574 9002ec79 Richard Henderson
       initialize the prologue now.  */
575 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
576 9002ec79 Richard Henderson
#endif
577 26a5f13b bellard
}
578 26a5f13b bellard
579 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
580 9656f324 pbrook
581 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
582 e7f4eff7 Juan Quintela
{
583 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
584 9656f324 pbrook
585 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
586 3098dba0 aurel32
       version_id is increased. */
587 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
588 9656f324 pbrook
    tlb_flush(env, 1);
589 9656f324 pbrook
590 9656f324 pbrook
    return 0;
591 9656f324 pbrook
}
592 e7f4eff7 Juan Quintela
593 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
594 e7f4eff7 Juan Quintela
    .name = "cpu_common",
595 e7f4eff7 Juan Quintela
    .version_id = 1,
596 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
597 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
598 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
599 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
600 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
601 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
602 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
603 e7f4eff7 Juan Quintela
    }
604 e7f4eff7 Juan Quintela
};
605 9656f324 pbrook
#endif
606 9656f324 pbrook
607 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
608 950f1472 Glauber Costa
{
609 950f1472 Glauber Costa
    CPUState *env = first_cpu;
610 950f1472 Glauber Costa
611 950f1472 Glauber Costa
    while (env) {
612 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
613 950f1472 Glauber Costa
            break;
614 950f1472 Glauber Costa
        env = env->next_cpu;
615 950f1472 Glauber Costa
    }
616 950f1472 Glauber Costa
617 950f1472 Glauber Costa
    return env;
618 950f1472 Glauber Costa
}
619 950f1472 Glauber Costa
620 6a00d601 bellard
void cpu_exec_init(CPUState *env)
621 fd6ce8f6 bellard
{
622 6a00d601 bellard
    CPUState **penv;
623 6a00d601 bellard
    int cpu_index;
624 6a00d601 bellard
625 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
626 c2764719 pbrook
    cpu_list_lock();
627 c2764719 pbrook
#endif
628 6a00d601 bellard
    env->next_cpu = NULL;
629 6a00d601 bellard
    penv = &first_cpu;
630 6a00d601 bellard
    cpu_index = 0;
631 6a00d601 bellard
    while (*penv != NULL) {
632 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
633 6a00d601 bellard
        cpu_index++;
634 6a00d601 bellard
    }
635 6a00d601 bellard
    env->cpu_index = cpu_index;
636 268a362c aliguori
    env->numa_node = 0;
637 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
638 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
639 6a00d601 bellard
    *penv = env;
640 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
641 c2764719 pbrook
    cpu_list_unlock();
642 c2764719 pbrook
#endif
643 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
645 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
646 b3c7724c pbrook
                    cpu_save, cpu_load, env);
647 b3c7724c pbrook
#endif
648 fd6ce8f6 bellard
}
649 fd6ce8f6 bellard
650 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
651 9fa3e853 bellard
{
652 9fa3e853 bellard
    if (p->code_bitmap) {
653 59817ccb bellard
        qemu_free(p->code_bitmap);
654 9fa3e853 bellard
        p->code_bitmap = NULL;
655 9fa3e853 bellard
    }
656 9fa3e853 bellard
    p->code_write_count = 0;
657 9fa3e853 bellard
}
658 9fa3e853 bellard
659 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
660 5cd2c5b6 Richard Henderson
661 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
662 fd6ce8f6 bellard
{
663 5cd2c5b6 Richard Henderson
    int i;
664 fd6ce8f6 bellard
665 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
666 5cd2c5b6 Richard Henderson
        return;
667 5cd2c5b6 Richard Henderson
    }
668 5cd2c5b6 Richard Henderson
    if (level == 0) {
669 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
670 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
671 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
672 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
673 fd6ce8f6 bellard
        }
674 5cd2c5b6 Richard Henderson
    } else {
675 5cd2c5b6 Richard Henderson
        void **pp = *lp;
676 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
677 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
678 5cd2c5b6 Richard Henderson
        }
679 5cd2c5b6 Richard Henderson
    }
680 5cd2c5b6 Richard Henderson
}
681 5cd2c5b6 Richard Henderson
682 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
683 5cd2c5b6 Richard Henderson
{
684 5cd2c5b6 Richard Henderson
    int i;
685 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
686 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
687 fd6ce8f6 bellard
    }
688 fd6ce8f6 bellard
}
689 fd6ce8f6 bellard
690 fd6ce8f6 bellard
/* flush all the translation blocks */
691 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
692 6a00d601 bellard
void tb_flush(CPUState *env1)
693 fd6ce8f6 bellard
{
694 6a00d601 bellard
    CPUState *env;
695 0124311e bellard
#if defined(DEBUG_FLUSH)
696 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
697 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
698 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
699 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
700 fd6ce8f6 bellard
#endif
701 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
702 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
703 a208e54a pbrook
704 fd6ce8f6 bellard
    nb_tbs = 0;
705 3b46e624 ths
706 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
707 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
708 6a00d601 bellard
    }
709 9fa3e853 bellard
710 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
711 fd6ce8f6 bellard
    page_flush_tb();
712 9fa3e853 bellard
713 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
714 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
715 d4e8164f bellard
       expensive */
716 e3db7226 bellard
    tb_flush_count++;
717 fd6ce8f6 bellard
}
718 fd6ce8f6 bellard
719 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
720 fd6ce8f6 bellard
721 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
722 fd6ce8f6 bellard
{
723 fd6ce8f6 bellard
    TranslationBlock *tb;
724 fd6ce8f6 bellard
    int i;
725 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
726 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
727 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
728 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
729 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
730 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
731 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
732 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
733 fd6ce8f6 bellard
            }
734 fd6ce8f6 bellard
        }
735 fd6ce8f6 bellard
    }
736 fd6ce8f6 bellard
}
737 fd6ce8f6 bellard
738 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
739 fd6ce8f6 bellard
static void tb_page_check(void)
740 fd6ce8f6 bellard
{
741 fd6ce8f6 bellard
    TranslationBlock *tb;
742 fd6ce8f6 bellard
    int i, flags1, flags2;
743 3b46e624 ths
744 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
745 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
746 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
747 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
748 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
749 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
750 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
751 fd6ce8f6 bellard
            }
752 fd6ce8f6 bellard
        }
753 fd6ce8f6 bellard
    }
754 fd6ce8f6 bellard
}
755 fd6ce8f6 bellard
756 fd6ce8f6 bellard
#endif
757 fd6ce8f6 bellard
758 fd6ce8f6 bellard
/* invalidate one TB */
759 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
760 fd6ce8f6 bellard
                             int next_offset)
761 fd6ce8f6 bellard
{
762 fd6ce8f6 bellard
    TranslationBlock *tb1;
763 fd6ce8f6 bellard
    for(;;) {
764 fd6ce8f6 bellard
        tb1 = *ptb;
765 fd6ce8f6 bellard
        if (tb1 == tb) {
766 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
767 fd6ce8f6 bellard
            break;
768 fd6ce8f6 bellard
        }
769 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
770 fd6ce8f6 bellard
    }
771 fd6ce8f6 bellard
}
772 fd6ce8f6 bellard
773 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
774 9fa3e853 bellard
{
775 9fa3e853 bellard
    TranslationBlock *tb1;
776 9fa3e853 bellard
    unsigned int n1;
777 9fa3e853 bellard
778 9fa3e853 bellard
    for(;;) {
779 9fa3e853 bellard
        tb1 = *ptb;
780 9fa3e853 bellard
        n1 = (long)tb1 & 3;
781 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
782 9fa3e853 bellard
        if (tb1 == tb) {
783 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
784 9fa3e853 bellard
            break;
785 9fa3e853 bellard
        }
786 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
787 9fa3e853 bellard
    }
788 9fa3e853 bellard
}
789 9fa3e853 bellard
790 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
791 d4e8164f bellard
{
792 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
793 d4e8164f bellard
    unsigned int n1;
794 d4e8164f bellard
795 d4e8164f bellard
    ptb = &tb->jmp_next[n];
796 d4e8164f bellard
    tb1 = *ptb;
797 d4e8164f bellard
    if (tb1) {
798 d4e8164f bellard
        /* find tb(n) in circular list */
799 d4e8164f bellard
        for(;;) {
800 d4e8164f bellard
            tb1 = *ptb;
801 d4e8164f bellard
            n1 = (long)tb1 & 3;
802 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
803 d4e8164f bellard
            if (n1 == n && tb1 == tb)
804 d4e8164f bellard
                break;
805 d4e8164f bellard
            if (n1 == 2) {
806 d4e8164f bellard
                ptb = &tb1->jmp_first;
807 d4e8164f bellard
            } else {
808 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
809 d4e8164f bellard
            }
810 d4e8164f bellard
        }
811 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
812 d4e8164f bellard
        *ptb = tb->jmp_next[n];
813 d4e8164f bellard
814 d4e8164f bellard
        tb->jmp_next[n] = NULL;
815 d4e8164f bellard
    }
816 d4e8164f bellard
}
817 d4e8164f bellard
818 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
819 d4e8164f bellard
   another TB */
820 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
821 d4e8164f bellard
{
822 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
823 d4e8164f bellard
}
824 d4e8164f bellard
825 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
826 fd6ce8f6 bellard
{
827 6a00d601 bellard
    CPUState *env;
828 8a40a180 bellard
    PageDesc *p;
829 d4e8164f bellard
    unsigned int h, n1;
830 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
831 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
832 3b46e624 ths
833 8a40a180 bellard
    /* remove the TB from the hash list */
834 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
835 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
836 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
837 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
838 8a40a180 bellard
839 8a40a180 bellard
    /* remove the TB from the page list */
840 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
841 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
842 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
843 8a40a180 bellard
        invalidate_page_bitmap(p);
844 8a40a180 bellard
    }
845 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
846 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
847 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
848 8a40a180 bellard
        invalidate_page_bitmap(p);
849 8a40a180 bellard
    }
850 8a40a180 bellard
851 36bdbe54 bellard
    tb_invalidated_flag = 1;
852 59817ccb bellard
853 fd6ce8f6 bellard
    /* remove the TB from the hash list */
854 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
855 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
856 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
857 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
858 6a00d601 bellard
    }
859 d4e8164f bellard
860 d4e8164f bellard
    /* suppress this TB from the two jump lists */
861 d4e8164f bellard
    tb_jmp_remove(tb, 0);
862 d4e8164f bellard
    tb_jmp_remove(tb, 1);
863 d4e8164f bellard
864 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
865 d4e8164f bellard
    tb1 = tb->jmp_first;
866 d4e8164f bellard
    for(;;) {
867 d4e8164f bellard
        n1 = (long)tb1 & 3;
868 d4e8164f bellard
        if (n1 == 2)
869 d4e8164f bellard
            break;
870 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
871 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
872 d4e8164f bellard
        tb_reset_jump(tb1, n1);
873 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
874 d4e8164f bellard
        tb1 = tb2;
875 d4e8164f bellard
    }
876 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
877 9fa3e853 bellard
878 e3db7226 bellard
    tb_phys_invalidate_count++;
879 9fa3e853 bellard
}
880 9fa3e853 bellard
881 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
882 9fa3e853 bellard
{
883 9fa3e853 bellard
    int end, mask, end1;
884 9fa3e853 bellard
885 9fa3e853 bellard
    end = start + len;
886 9fa3e853 bellard
    tab += start >> 3;
887 9fa3e853 bellard
    mask = 0xff << (start & 7);
888 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
889 9fa3e853 bellard
        if (start < end) {
890 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
891 9fa3e853 bellard
            *tab |= mask;
892 9fa3e853 bellard
        }
893 9fa3e853 bellard
    } else {
894 9fa3e853 bellard
        *tab++ |= mask;
895 9fa3e853 bellard
        start = (start + 8) & ~7;
896 9fa3e853 bellard
        end1 = end & ~7;
897 9fa3e853 bellard
        while (start < end1) {
898 9fa3e853 bellard
            *tab++ = 0xff;
899 9fa3e853 bellard
            start += 8;
900 9fa3e853 bellard
        }
901 9fa3e853 bellard
        if (start < end) {
902 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
903 9fa3e853 bellard
            *tab |= mask;
904 9fa3e853 bellard
        }
905 9fa3e853 bellard
    }
906 9fa3e853 bellard
}
907 9fa3e853 bellard
908 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
909 9fa3e853 bellard
{
910 9fa3e853 bellard
    int n, tb_start, tb_end;
911 9fa3e853 bellard
    TranslationBlock *tb;
912 3b46e624 ths
913 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
914 9fa3e853 bellard
915 9fa3e853 bellard
    tb = p->first_tb;
916 9fa3e853 bellard
    while (tb != NULL) {
917 9fa3e853 bellard
        n = (long)tb & 3;
918 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
919 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
920 9fa3e853 bellard
        if (n == 0) {
921 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
922 9fa3e853 bellard
               it is not a problem */
923 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
924 9fa3e853 bellard
            tb_end = tb_start + tb->size;
925 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
926 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
927 9fa3e853 bellard
        } else {
928 9fa3e853 bellard
            tb_start = 0;
929 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
930 9fa3e853 bellard
        }
931 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
932 9fa3e853 bellard
        tb = tb->page_next[n];
933 9fa3e853 bellard
    }
934 9fa3e853 bellard
}
935 9fa3e853 bellard
936 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
937 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
938 2e70f6ef pbrook
                              int flags, int cflags)
939 d720b93d bellard
{
940 d720b93d bellard
    TranslationBlock *tb;
941 d720b93d bellard
    uint8_t *tc_ptr;
942 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
943 41c1b1c9 Paul Brook
    target_ulong virt_page2;
944 d720b93d bellard
    int code_gen_size;
945 d720b93d bellard
946 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
947 c27004ec bellard
    tb = tb_alloc(pc);
948 d720b93d bellard
    if (!tb) {
949 d720b93d bellard
        /* flush must be done */
950 d720b93d bellard
        tb_flush(env);
951 d720b93d bellard
        /* cannot fail at this point */
952 c27004ec bellard
        tb = tb_alloc(pc);
953 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
954 2e70f6ef pbrook
        tb_invalidated_flag = 1;
955 d720b93d bellard
    }
956 d720b93d bellard
    tc_ptr = code_gen_ptr;
957 d720b93d bellard
    tb->tc_ptr = tc_ptr;
958 d720b93d bellard
    tb->cs_base = cs_base;
959 d720b93d bellard
    tb->flags = flags;
960 d720b93d bellard
    tb->cflags = cflags;
961 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
962 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
963 3b46e624 ths
964 d720b93d bellard
    /* check next page if needed */
965 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
966 d720b93d bellard
    phys_page2 = -1;
967 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
968 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
969 d720b93d bellard
    }
970 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
971 2e70f6ef pbrook
    return tb;
972 d720b93d bellard
}
973 3b46e624 ths
974 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
975 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
976 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
977 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
978 d720b93d bellard
   TB if code is modified inside this TB. */
979 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
980 d720b93d bellard
                                   int is_cpu_write_access)
981 d720b93d bellard
{
982 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
983 d720b93d bellard
    CPUState *env = cpu_single_env;
984 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
985 6b917547 aliguori
    PageDesc *p;
986 6b917547 aliguori
    int n;
987 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
988 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
989 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
990 6b917547 aliguori
    int current_tb_modified = 0;
991 6b917547 aliguori
    target_ulong current_pc = 0;
992 6b917547 aliguori
    target_ulong current_cs_base = 0;
993 6b917547 aliguori
    int current_flags = 0;
994 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
995 9fa3e853 bellard
996 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
997 5fafdf24 ths
    if (!p)
998 9fa3e853 bellard
        return;
999 5fafdf24 ths
    if (!p->code_bitmap &&
1000 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1001 d720b93d bellard
        is_cpu_write_access) {
1002 9fa3e853 bellard
        /* build code bitmap */
1003 9fa3e853 bellard
        build_page_bitmap(p);
1004 9fa3e853 bellard
    }
1005 9fa3e853 bellard
1006 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1007 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1008 9fa3e853 bellard
    tb = p->first_tb;
1009 9fa3e853 bellard
    while (tb != NULL) {
1010 9fa3e853 bellard
        n = (long)tb & 3;
1011 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1012 9fa3e853 bellard
        tb_next = tb->page_next[n];
1013 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1014 9fa3e853 bellard
        if (n == 0) {
1015 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1016 9fa3e853 bellard
               it is not a problem */
1017 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1018 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1019 9fa3e853 bellard
        } else {
1020 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1021 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1022 9fa3e853 bellard
        }
1023 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1024 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1025 d720b93d bellard
            if (current_tb_not_found) {
1026 d720b93d bellard
                current_tb_not_found = 0;
1027 d720b93d bellard
                current_tb = NULL;
1028 2e70f6ef pbrook
                if (env->mem_io_pc) {
1029 d720b93d bellard
                    /* now we have a real cpu fault */
1030 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1031 d720b93d bellard
                }
1032 d720b93d bellard
            }
1033 d720b93d bellard
            if (current_tb == tb &&
1034 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1035 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1036 d720b93d bellard
                its execution. We could be more precise by checking
1037 d720b93d bellard
                that the modification is after the current PC, but it
1038 d720b93d bellard
                would require a specialized function to partially
1039 d720b93d bellard
                restore the CPU state */
1040 3b46e624 ths
1041 d720b93d bellard
                current_tb_modified = 1;
1042 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1043 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1044 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1045 6b917547 aliguori
                                     &current_flags);
1046 d720b93d bellard
            }
1047 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1048 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1049 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1050 6f5a9f7e bellard
            saved_tb = NULL;
1051 6f5a9f7e bellard
            if (env) {
1052 6f5a9f7e bellard
                saved_tb = env->current_tb;
1053 6f5a9f7e bellard
                env->current_tb = NULL;
1054 6f5a9f7e bellard
            }
1055 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1056 6f5a9f7e bellard
            if (env) {
1057 6f5a9f7e bellard
                env->current_tb = saved_tb;
1058 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1059 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1060 6f5a9f7e bellard
            }
1061 9fa3e853 bellard
        }
1062 9fa3e853 bellard
        tb = tb_next;
1063 9fa3e853 bellard
    }
1064 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1065 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1066 9fa3e853 bellard
    if (!p->first_tb) {
1067 9fa3e853 bellard
        invalidate_page_bitmap(p);
1068 d720b93d bellard
        if (is_cpu_write_access) {
1069 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1070 d720b93d bellard
        }
1071 d720b93d bellard
    }
1072 d720b93d bellard
#endif
1073 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1074 d720b93d bellard
    if (current_tb_modified) {
1075 d720b93d bellard
        /* we generate a block containing just the instruction
1076 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1077 d720b93d bellard
           itself */
1078 ea1c1802 bellard
        env->current_tb = NULL;
1079 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1080 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1081 9fa3e853 bellard
    }
1082 fd6ce8f6 bellard
#endif
1083 9fa3e853 bellard
}
1084 fd6ce8f6 bellard
1085 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1086 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1087 9fa3e853 bellard
{
1088 9fa3e853 bellard
    PageDesc *p;
1089 9fa3e853 bellard
    int offset, b;
1090 59817ccb bellard
#if 0
1091 a4193c8a bellard
    if (1) {
1092 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1093 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1094 93fcfe39 aliguori
                  cpu_single_env->eip,
1095 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1096 59817ccb bellard
    }
1097 59817ccb bellard
#endif
1098 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1099 5fafdf24 ths
    if (!p)
1100 9fa3e853 bellard
        return;
1101 9fa3e853 bellard
    if (p->code_bitmap) {
1102 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1103 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1104 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1105 9fa3e853 bellard
            goto do_invalidate;
1106 9fa3e853 bellard
    } else {
1107 9fa3e853 bellard
    do_invalidate:
1108 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1109 9fa3e853 bellard
    }
1110 9fa3e853 bellard
}
1111 9fa3e853 bellard
1112 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1113 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1114 d720b93d bellard
                                    unsigned long pc, void *puc)
1115 9fa3e853 bellard
{
1116 6b917547 aliguori
    TranslationBlock *tb;
1117 9fa3e853 bellard
    PageDesc *p;
1118 6b917547 aliguori
    int n;
1119 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1120 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1121 d720b93d bellard
    CPUState *env = cpu_single_env;
1122 6b917547 aliguori
    int current_tb_modified = 0;
1123 6b917547 aliguori
    target_ulong current_pc = 0;
1124 6b917547 aliguori
    target_ulong current_cs_base = 0;
1125 6b917547 aliguori
    int current_flags = 0;
1126 d720b93d bellard
#endif
1127 9fa3e853 bellard
1128 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1129 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1130 5fafdf24 ths
    if (!p)
1131 9fa3e853 bellard
        return;
1132 9fa3e853 bellard
    tb = p->first_tb;
1133 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1134 d720b93d bellard
    if (tb && pc != 0) {
1135 d720b93d bellard
        current_tb = tb_find_pc(pc);
1136 d720b93d bellard
    }
1137 d720b93d bellard
#endif
1138 9fa3e853 bellard
    while (tb != NULL) {
1139 9fa3e853 bellard
        n = (long)tb & 3;
1140 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1141 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1142 d720b93d bellard
        if (current_tb == tb &&
1143 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1144 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1145 d720b93d bellard
                   its execution. We could be more precise by checking
1146 d720b93d bellard
                   that the modification is after the current PC, but it
1147 d720b93d bellard
                   would require a specialized function to partially
1148 d720b93d bellard
                   restore the CPU state */
1149 3b46e624 ths
1150 d720b93d bellard
            current_tb_modified = 1;
1151 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1152 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1153 6b917547 aliguori
                                 &current_flags);
1154 d720b93d bellard
        }
1155 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1156 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1157 9fa3e853 bellard
        tb = tb->page_next[n];
1158 9fa3e853 bellard
    }
1159 fd6ce8f6 bellard
    p->first_tb = NULL;
1160 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1161 d720b93d bellard
    if (current_tb_modified) {
1162 d720b93d bellard
        /* we generate a block containing just the instruction
1163 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1164 d720b93d bellard
           itself */
1165 ea1c1802 bellard
        env->current_tb = NULL;
1166 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1167 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1168 d720b93d bellard
    }
1169 d720b93d bellard
#endif
1170 fd6ce8f6 bellard
}
1171 9fa3e853 bellard
#endif
1172 fd6ce8f6 bellard
1173 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1174 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1175 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1176 fd6ce8f6 bellard
{
1177 fd6ce8f6 bellard
    PageDesc *p;
1178 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1179 9fa3e853 bellard
1180 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1181 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1182 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1183 9fa3e853 bellard
    last_first_tb = p->first_tb;
1184 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1185 9fa3e853 bellard
    invalidate_page_bitmap(p);
1186 fd6ce8f6 bellard
1187 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1188 d720b93d bellard
1189 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1190 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1191 53a5960a pbrook
        target_ulong addr;
1192 53a5960a pbrook
        PageDesc *p2;
1193 9fa3e853 bellard
        int prot;
1194 9fa3e853 bellard
1195 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1196 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1197 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1198 fd6ce8f6 bellard
        prot = 0;
1199 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1200 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1201 53a5960a pbrook
1202 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1203 53a5960a pbrook
            if (!p2)
1204 53a5960a pbrook
                continue;
1205 53a5960a pbrook
            prot |= p2->flags;
1206 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1207 53a5960a pbrook
          }
1208 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1209 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1210 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1211 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1212 53a5960a pbrook
               page_addr);
1213 fd6ce8f6 bellard
#endif
1214 fd6ce8f6 bellard
    }
1215 9fa3e853 bellard
#else
1216 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1217 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1218 9fa3e853 bellard
       allocated in a physical page */
1219 9fa3e853 bellard
    if (!last_first_tb) {
1220 6a00d601 bellard
        tlb_protect_code(page_addr);
1221 9fa3e853 bellard
    }
1222 9fa3e853 bellard
#endif
1223 d720b93d bellard
1224 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1225 fd6ce8f6 bellard
}
1226 fd6ce8f6 bellard
1227 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1228 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1229 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1230 fd6ce8f6 bellard
{
1231 fd6ce8f6 bellard
    TranslationBlock *tb;
1232 fd6ce8f6 bellard
1233 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1234 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1235 d4e8164f bellard
        return NULL;
1236 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1237 fd6ce8f6 bellard
    tb->pc = pc;
1238 b448f2f3 bellard
    tb->cflags = 0;
1239 d4e8164f bellard
    return tb;
1240 d4e8164f bellard
}
1241 d4e8164f bellard
1242 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1243 2e70f6ef pbrook
{
1244 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1245 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1246 2e70f6ef pbrook
       be the last one generated.  */
1247 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1248 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1249 2e70f6ef pbrook
        nb_tbs--;
1250 2e70f6ef pbrook
    }
1251 2e70f6ef pbrook
}
1252 2e70f6ef pbrook
1253 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1254 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1255 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1256 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1257 d4e8164f bellard
{
1258 9fa3e853 bellard
    unsigned int h;
1259 9fa3e853 bellard
    TranslationBlock **ptb;
1260 9fa3e853 bellard
1261 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1262 c8a706fe pbrook
       before we are done.  */
1263 c8a706fe pbrook
    mmap_lock();
1264 9fa3e853 bellard
    /* add in the physical hash table */
1265 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1266 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1267 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1268 9fa3e853 bellard
    *ptb = tb;
1269 fd6ce8f6 bellard
1270 fd6ce8f6 bellard
    /* add in the page list */
1271 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1272 9fa3e853 bellard
    if (phys_page2 != -1)
1273 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1274 9fa3e853 bellard
    else
1275 9fa3e853 bellard
        tb->page_addr[1] = -1;
1276 9fa3e853 bellard
1277 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1278 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1279 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1280 d4e8164f bellard
1281 d4e8164f bellard
    /* init original jump addresses */
1282 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1283 d4e8164f bellard
        tb_reset_jump(tb, 0);
1284 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1285 d4e8164f bellard
        tb_reset_jump(tb, 1);
1286 8a40a180 bellard
1287 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1288 8a40a180 bellard
    tb_page_check();
1289 8a40a180 bellard
#endif
1290 c8a706fe pbrook
    mmap_unlock();
1291 fd6ce8f6 bellard
}
1292 fd6ce8f6 bellard
1293 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1294 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1295 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1296 fd6ce8f6 bellard
{
1297 9fa3e853 bellard
    int m_min, m_max, m;
1298 9fa3e853 bellard
    unsigned long v;
1299 9fa3e853 bellard
    TranslationBlock *tb;
1300 a513fe19 bellard
1301 a513fe19 bellard
    if (nb_tbs <= 0)
1302 a513fe19 bellard
        return NULL;
1303 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1304 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1305 a513fe19 bellard
        return NULL;
1306 a513fe19 bellard
    /* binary search (cf Knuth) */
1307 a513fe19 bellard
    m_min = 0;
1308 a513fe19 bellard
    m_max = nb_tbs - 1;
1309 a513fe19 bellard
    while (m_min <= m_max) {
1310 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1311 a513fe19 bellard
        tb = &tbs[m];
1312 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1313 a513fe19 bellard
        if (v == tc_ptr)
1314 a513fe19 bellard
            return tb;
1315 a513fe19 bellard
        else if (tc_ptr < v) {
1316 a513fe19 bellard
            m_max = m - 1;
1317 a513fe19 bellard
        } else {
1318 a513fe19 bellard
            m_min = m + 1;
1319 a513fe19 bellard
        }
1320 5fafdf24 ths
    }
1321 a513fe19 bellard
    return &tbs[m_max];
1322 a513fe19 bellard
}
1323 7501267e bellard
1324 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1325 ea041c0e bellard
1326 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1327 ea041c0e bellard
{
1328 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1329 ea041c0e bellard
    unsigned int n1;
1330 ea041c0e bellard
1331 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1332 ea041c0e bellard
    if (tb1 != NULL) {
1333 ea041c0e bellard
        /* find head of list */
1334 ea041c0e bellard
        for(;;) {
1335 ea041c0e bellard
            n1 = (long)tb1 & 3;
1336 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1337 ea041c0e bellard
            if (n1 == 2)
1338 ea041c0e bellard
                break;
1339 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1340 ea041c0e bellard
        }
1341 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1342 ea041c0e bellard
        tb_next = tb1;
1343 ea041c0e bellard
1344 ea041c0e bellard
        /* remove tb from the jmp_first list */
1345 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1346 ea041c0e bellard
        for(;;) {
1347 ea041c0e bellard
            tb1 = *ptb;
1348 ea041c0e bellard
            n1 = (long)tb1 & 3;
1349 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1350 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1351 ea041c0e bellard
                break;
1352 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1353 ea041c0e bellard
        }
1354 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1355 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1356 3b46e624 ths
1357 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1358 ea041c0e bellard
        tb_reset_jump(tb, n);
1359 ea041c0e bellard
1360 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1361 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1362 ea041c0e bellard
    }
1363 ea041c0e bellard
}
1364 ea041c0e bellard
1365 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1366 ea041c0e bellard
{
1367 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1368 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1369 ea041c0e bellard
}
1370 ea041c0e bellard
1371 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1372 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1373 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1374 94df27fd Paul Brook
{
1375 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1376 94df27fd Paul Brook
}
1377 94df27fd Paul Brook
#else
1378 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379 d720b93d bellard
{
1380 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1381 9b3c35e0 j_mayer
    target_ulong pd;
1382 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1383 c2f07f81 pbrook
    PhysPageDesc *p;
1384 d720b93d bellard
1385 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1386 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1387 c2f07f81 pbrook
    if (!p) {
1388 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1389 c2f07f81 pbrook
    } else {
1390 c2f07f81 pbrook
        pd = p->phys_offset;
1391 c2f07f81 pbrook
    }
1392 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1393 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1394 d720b93d bellard
}
1395 c27004ec bellard
#endif
1396 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1397 d720b93d bellard
1398 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1399 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1400 c527ee8f Paul Brook
1401 c527ee8f Paul Brook
{
1402 c527ee8f Paul Brook
}
1403 c527ee8f Paul Brook
1404 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1405 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1406 c527ee8f Paul Brook
{
1407 c527ee8f Paul Brook
    return -ENOSYS;
1408 c527ee8f Paul Brook
}
1409 c527ee8f Paul Brook
#else
1410 6658ffb8 pbrook
/* Add a watchpoint.  */
1411 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1412 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1413 6658ffb8 pbrook
{
1414 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1415 c0ce998e aliguori
    CPUWatchpoint *wp;
1416 6658ffb8 pbrook
1417 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1418 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1419 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1420 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1421 b4051334 aliguori
        return -EINVAL;
1422 b4051334 aliguori
    }
1423 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1424 a1d1bb31 aliguori
1425 a1d1bb31 aliguori
    wp->vaddr = addr;
1426 b4051334 aliguori
    wp->len_mask = len_mask;
1427 a1d1bb31 aliguori
    wp->flags = flags;
1428 a1d1bb31 aliguori
1429 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1430 c0ce998e aliguori
    if (flags & BP_GDB)
1431 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1432 c0ce998e aliguori
    else
1433 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1434 6658ffb8 pbrook
1435 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1436 a1d1bb31 aliguori
1437 a1d1bb31 aliguori
    if (watchpoint)
1438 a1d1bb31 aliguori
        *watchpoint = wp;
1439 a1d1bb31 aliguori
    return 0;
1440 6658ffb8 pbrook
}
1441 6658ffb8 pbrook
1442 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1443 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1444 a1d1bb31 aliguori
                          int flags)
1445 6658ffb8 pbrook
{
1446 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1447 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1448 6658ffb8 pbrook
1449 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1450 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1451 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1452 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1453 6658ffb8 pbrook
            return 0;
1454 6658ffb8 pbrook
        }
1455 6658ffb8 pbrook
    }
1456 a1d1bb31 aliguori
    return -ENOENT;
1457 6658ffb8 pbrook
}
1458 6658ffb8 pbrook
1459 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1460 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1461 a1d1bb31 aliguori
{
1462 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1463 7d03f82f edgar_igl
1464 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1465 a1d1bb31 aliguori
1466 a1d1bb31 aliguori
    qemu_free(watchpoint);
1467 a1d1bb31 aliguori
}
1468 a1d1bb31 aliguori
1469 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1470 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1471 a1d1bb31 aliguori
{
1472 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1473 a1d1bb31 aliguori
1474 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1475 a1d1bb31 aliguori
        if (wp->flags & mask)
1476 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1477 c0ce998e aliguori
    }
1478 7d03f82f edgar_igl
}
1479 c527ee8f Paul Brook
#endif
1480 7d03f82f edgar_igl
1481 a1d1bb31 aliguori
/* Add a breakpoint.  */
1482 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1483 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1484 4c3a88a2 bellard
{
1485 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1486 c0ce998e aliguori
    CPUBreakpoint *bp;
1487 3b46e624 ths
1488 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1489 4c3a88a2 bellard
1490 a1d1bb31 aliguori
    bp->pc = pc;
1491 a1d1bb31 aliguori
    bp->flags = flags;
1492 a1d1bb31 aliguori
1493 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1494 c0ce998e aliguori
    if (flags & BP_GDB)
1495 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1496 c0ce998e aliguori
    else
1497 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1498 3b46e624 ths
1499 d720b93d bellard
    breakpoint_invalidate(env, pc);
1500 a1d1bb31 aliguori
1501 a1d1bb31 aliguori
    if (breakpoint)
1502 a1d1bb31 aliguori
        *breakpoint = bp;
1503 4c3a88a2 bellard
    return 0;
1504 4c3a88a2 bellard
#else
1505 a1d1bb31 aliguori
    return -ENOSYS;
1506 4c3a88a2 bellard
#endif
1507 4c3a88a2 bellard
}
1508 4c3a88a2 bellard
1509 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1510 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1511 a1d1bb31 aliguori
{
1512 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1513 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1514 a1d1bb31 aliguori
1515 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1516 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1517 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1518 a1d1bb31 aliguori
            return 0;
1519 a1d1bb31 aliguori
        }
1520 7d03f82f edgar_igl
    }
1521 a1d1bb31 aliguori
    return -ENOENT;
1522 a1d1bb31 aliguori
#else
1523 a1d1bb31 aliguori
    return -ENOSYS;
1524 7d03f82f edgar_igl
#endif
1525 7d03f82f edgar_igl
}
1526 7d03f82f edgar_igl
1527 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1528 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1529 4c3a88a2 bellard
{
1530 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1531 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1532 d720b93d bellard
1533 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1534 a1d1bb31 aliguori
1535 a1d1bb31 aliguori
    qemu_free(breakpoint);
1536 a1d1bb31 aliguori
#endif
1537 a1d1bb31 aliguori
}
1538 a1d1bb31 aliguori
1539 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1540 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1541 a1d1bb31 aliguori
{
1542 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1543 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1544 a1d1bb31 aliguori
1545 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1546 a1d1bb31 aliguori
        if (bp->flags & mask)
1547 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1548 c0ce998e aliguori
    }
1549 4c3a88a2 bellard
#endif
1550 4c3a88a2 bellard
}
1551 4c3a88a2 bellard
1552 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1553 c33a346e bellard
   CPU loop after each instruction */
1554 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1555 c33a346e bellard
{
1556 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1557 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1558 c33a346e bellard
        env->singlestep_enabled = enabled;
1559 e22a25c9 aliguori
        if (kvm_enabled())
1560 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1561 e22a25c9 aliguori
        else {
1562 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1563 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1564 e22a25c9 aliguori
            tb_flush(env);
1565 e22a25c9 aliguori
        }
1566 c33a346e bellard
    }
1567 c33a346e bellard
#endif
1568 c33a346e bellard
}
1569 c33a346e bellard
1570 34865134 bellard
/* enable or disable low levels log */
1571 34865134 bellard
void cpu_set_log(int log_flags)
1572 34865134 bellard
{
1573 34865134 bellard
    loglevel = log_flags;
1574 34865134 bellard
    if (loglevel && !logfile) {
1575 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1576 34865134 bellard
        if (!logfile) {
1577 34865134 bellard
            perror(logfilename);
1578 34865134 bellard
            _exit(1);
1579 34865134 bellard
        }
1580 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1581 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1582 9fa3e853 bellard
        {
1583 b55266b5 blueswir1
            static char logfile_buf[4096];
1584 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1585 9fa3e853 bellard
        }
1586 bf65f53f Filip Navara
#elif !defined(_WIN32)
1587 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1588 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1589 9fa3e853 bellard
#endif
1590 e735b91c pbrook
        log_append = 1;
1591 e735b91c pbrook
    }
1592 e735b91c pbrook
    if (!loglevel && logfile) {
1593 e735b91c pbrook
        fclose(logfile);
1594 e735b91c pbrook
        logfile = NULL;
1595 34865134 bellard
    }
1596 34865134 bellard
}
1597 34865134 bellard
1598 34865134 bellard
void cpu_set_log_filename(const char *filename)
1599 34865134 bellard
{
1600 34865134 bellard
    logfilename = strdup(filename);
1601 e735b91c pbrook
    if (logfile) {
1602 e735b91c pbrook
        fclose(logfile);
1603 e735b91c pbrook
        logfile = NULL;
1604 e735b91c pbrook
    }
1605 e735b91c pbrook
    cpu_set_log(loglevel);
1606 34865134 bellard
}
1607 c33a346e bellard
1608 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1609 ea041c0e bellard
{
1610 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1611 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1612 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1613 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1614 ea041c0e bellard
    TranslationBlock *tb;
1615 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1616 59817ccb bellard
1617 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1618 3098dba0 aurel32
    tb = env->current_tb;
1619 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1620 3098dba0 aurel32
       all the potentially executing TB */
1621 f76cfe56 Riku Voipio
    if (tb) {
1622 3098dba0 aurel32
        env->current_tb = NULL;
1623 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1624 be214e6c aurel32
    }
1625 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1626 3098dba0 aurel32
}
1627 3098dba0 aurel32
1628 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1629 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1630 3098dba0 aurel32
{
1631 3098dba0 aurel32
    int old_mask;
1632 be214e6c aurel32
1633 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1634 68a79315 bellard
    env->interrupt_request |= mask;
1635 3098dba0 aurel32
1636 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1637 8edac960 aliguori
    /*
1638 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1639 8edac960 aliguori
     * case its halted.
1640 8edac960 aliguori
     */
1641 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1642 8edac960 aliguori
        qemu_cpu_kick(env);
1643 8edac960 aliguori
        return;
1644 8edac960 aliguori
    }
1645 8edac960 aliguori
#endif
1646 8edac960 aliguori
1647 2e70f6ef pbrook
    if (use_icount) {
1648 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1649 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1650 2e70f6ef pbrook
        if (!can_do_io(env)
1651 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1652 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1653 2e70f6ef pbrook
        }
1654 2e70f6ef pbrook
#endif
1655 2e70f6ef pbrook
    } else {
1656 3098dba0 aurel32
        cpu_unlink_tb(env);
1657 ea041c0e bellard
    }
1658 ea041c0e bellard
}
1659 ea041c0e bellard
1660 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1661 b54ad049 bellard
{
1662 b54ad049 bellard
    env->interrupt_request &= ~mask;
1663 b54ad049 bellard
}
1664 b54ad049 bellard
1665 3098dba0 aurel32
void cpu_exit(CPUState *env)
1666 3098dba0 aurel32
{
1667 3098dba0 aurel32
    env->exit_request = 1;
1668 3098dba0 aurel32
    cpu_unlink_tb(env);
1669 3098dba0 aurel32
}
1670 3098dba0 aurel32
1671 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1672 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1673 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1674 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1675 f193c797 bellard
      "show target assembly code for each compiled TB" },
1676 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1677 57fec1fe bellard
      "show micro ops for each compiled TB" },
1678 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1679 e01a1157 blueswir1
      "show micro ops "
1680 e01a1157 blueswir1
#ifdef TARGET_I386
1681 e01a1157 blueswir1
      "before eflags optimization and "
1682 f193c797 bellard
#endif
1683 e01a1157 blueswir1
      "after liveness analysis" },
1684 f193c797 bellard
    { CPU_LOG_INT, "int",
1685 f193c797 bellard
      "show interrupts/exceptions in short format" },
1686 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1687 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1688 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1689 e91c8a77 ths
      "show CPU state before block translation" },
1690 f193c797 bellard
#ifdef TARGET_I386
1691 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1692 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1693 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1694 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1695 f193c797 bellard
#endif
1696 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1697 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1698 fd872598 bellard
      "show all i/o ports accesses" },
1699 8e3a9fd2 bellard
#endif
1700 f193c797 bellard
    { 0, NULL, NULL },
1701 f193c797 bellard
};
1702 f193c797 bellard
1703 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1704 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1705 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1706 f6f3fbca Michael S. Tsirkin
1707 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1708 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1709 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1710 f6f3fbca Michael S. Tsirkin
{
1711 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1712 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1713 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1714 f6f3fbca Michael S. Tsirkin
    }
1715 f6f3fbca Michael S. Tsirkin
}
1716 f6f3fbca Michael S. Tsirkin
1717 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1718 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1719 f6f3fbca Michael S. Tsirkin
{
1720 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1721 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1722 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1723 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1724 f6f3fbca Michael S. Tsirkin
            return r;
1725 f6f3fbca Michael S. Tsirkin
    }
1726 f6f3fbca Michael S. Tsirkin
    return 0;
1727 f6f3fbca Michael S. Tsirkin
}
1728 f6f3fbca Michael S. Tsirkin
1729 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1730 f6f3fbca Michael S. Tsirkin
{
1731 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1732 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1733 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1734 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1735 f6f3fbca Michael S. Tsirkin
            return r;
1736 f6f3fbca Michael S. Tsirkin
    }
1737 f6f3fbca Michael S. Tsirkin
    return 0;
1738 f6f3fbca Michael S. Tsirkin
}
1739 f6f3fbca Michael S. Tsirkin
1740 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1741 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1742 f6f3fbca Michael S. Tsirkin
{
1743 5cd2c5b6 Richard Henderson
    int i;
1744 f6f3fbca Michael S. Tsirkin
1745 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1746 5cd2c5b6 Richard Henderson
        return;
1747 5cd2c5b6 Richard Henderson
    }
1748 5cd2c5b6 Richard Henderson
    if (level == 0) {
1749 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1750 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1751 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1752 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1753 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1754 f6f3fbca Michael S. Tsirkin
            }
1755 5cd2c5b6 Richard Henderson
        }
1756 5cd2c5b6 Richard Henderson
    } else {
1757 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1758 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1759 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1760 f6f3fbca Michael S. Tsirkin
        }
1761 f6f3fbca Michael S. Tsirkin
    }
1762 f6f3fbca Michael S. Tsirkin
}
1763 f6f3fbca Michael S. Tsirkin
1764 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1765 f6f3fbca Michael S. Tsirkin
{
1766 5cd2c5b6 Richard Henderson
    int i;
1767 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1768 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1769 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1770 f6f3fbca Michael S. Tsirkin
    }
1771 f6f3fbca Michael S. Tsirkin
}
1772 f6f3fbca Michael S. Tsirkin
1773 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1774 f6f3fbca Michael S. Tsirkin
{
1775 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1776 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1777 f6f3fbca Michael S. Tsirkin
}
1778 f6f3fbca Michael S. Tsirkin
1779 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1780 f6f3fbca Michael S. Tsirkin
{
1781 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1782 f6f3fbca Michael S. Tsirkin
}
1783 f6f3fbca Michael S. Tsirkin
#endif
1784 f6f3fbca Michael S. Tsirkin
1785 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1786 f193c797 bellard
{
1787 f193c797 bellard
    if (strlen(s2) != n)
1788 f193c797 bellard
        return 0;
1789 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1790 f193c797 bellard
}
1791 3b46e624 ths
1792 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1793 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1794 f193c797 bellard
{
1795 c7cd6a37 blueswir1
    const CPULogItem *item;
1796 f193c797 bellard
    int mask;
1797 f193c797 bellard
    const char *p, *p1;
1798 f193c797 bellard
1799 f193c797 bellard
    p = str;
1800 f193c797 bellard
    mask = 0;
1801 f193c797 bellard
    for(;;) {
1802 f193c797 bellard
        p1 = strchr(p, ',');
1803 f193c797 bellard
        if (!p1)
1804 f193c797 bellard
            p1 = p + strlen(p);
1805 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1806 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1807 8e3a9fd2 bellard
                        mask |= item->mask;
1808 8e3a9fd2 bellard
                }
1809 8e3a9fd2 bellard
        } else {
1810 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1811 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1812 f193c797 bellard
                goto found;
1813 f193c797 bellard
        }
1814 f193c797 bellard
        return 0;
1815 8e3a9fd2 bellard
        }
1816 f193c797 bellard
    found:
1817 f193c797 bellard
        mask |= item->mask;
1818 f193c797 bellard
        if (*p1 != ',')
1819 f193c797 bellard
            break;
1820 f193c797 bellard
        p = p1 + 1;
1821 f193c797 bellard
    }
1822 f193c797 bellard
    return mask;
1823 f193c797 bellard
}
1824 ea041c0e bellard
1825 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1826 7501267e bellard
{
1827 7501267e bellard
    va_list ap;
1828 493ae1f0 pbrook
    va_list ap2;
1829 7501267e bellard
1830 7501267e bellard
    va_start(ap, fmt);
1831 493ae1f0 pbrook
    va_copy(ap2, ap);
1832 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1833 7501267e bellard
    vfprintf(stderr, fmt, ap);
1834 7501267e bellard
    fprintf(stderr, "\n");
1835 7501267e bellard
#ifdef TARGET_I386
1836 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1837 7fe48483 bellard
#else
1838 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1839 7501267e bellard
#endif
1840 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1841 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1842 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1843 93fcfe39 aliguori
        qemu_log("\n");
1844 f9373291 j_mayer
#ifdef TARGET_I386
1845 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1846 f9373291 j_mayer
#else
1847 93fcfe39 aliguori
        log_cpu_state(env, 0);
1848 f9373291 j_mayer
#endif
1849 31b1a7b4 aliguori
        qemu_log_flush();
1850 93fcfe39 aliguori
        qemu_log_close();
1851 924edcae balrog
    }
1852 493ae1f0 pbrook
    va_end(ap2);
1853 f9373291 j_mayer
    va_end(ap);
1854 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1855 fd052bf6 Riku Voipio
    {
1856 fd052bf6 Riku Voipio
        struct sigaction act;
1857 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1858 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1859 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1860 fd052bf6 Riku Voipio
    }
1861 fd052bf6 Riku Voipio
#endif
1862 7501267e bellard
    abort();
1863 7501267e bellard
}
1864 7501267e bellard
1865 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1866 c5be9f08 ths
{
1867 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1868 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1869 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1870 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1871 5a38f081 aliguori
    CPUBreakpoint *bp;
1872 5a38f081 aliguori
    CPUWatchpoint *wp;
1873 5a38f081 aliguori
#endif
1874 5a38f081 aliguori
1875 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1876 5a38f081 aliguori
1877 5a38f081 aliguori
    /* Preserve chaining and index. */
1878 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1879 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1880 5a38f081 aliguori
1881 5a38f081 aliguori
    /* Clone all break/watchpoints.
1882 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1883 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1884 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1885 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1886 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1887 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1888 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1889 5a38f081 aliguori
    }
1890 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1891 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1892 5a38f081 aliguori
                              wp->flags, NULL);
1893 5a38f081 aliguori
    }
1894 5a38f081 aliguori
#endif
1895 5a38f081 aliguori
1896 c5be9f08 ths
    return new_env;
1897 c5be9f08 ths
}
1898 c5be9f08 ths
1899 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1900 0124311e bellard
1901 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1902 5c751e99 edgar_igl
{
1903 5c751e99 edgar_igl
    unsigned int i;
1904 5c751e99 edgar_igl
1905 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1906 5c751e99 edgar_igl
       overlap the flushed page.  */
1907 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1908 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1909 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1910 5c751e99 edgar_igl
1911 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1912 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1913 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1914 5c751e99 edgar_igl
}
1915 5c751e99 edgar_igl
1916 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1917 08738984 Igor Kovalenko
    .addr_read  = -1,
1918 08738984 Igor Kovalenko
    .addr_write = -1,
1919 08738984 Igor Kovalenko
    .addr_code  = -1,
1920 08738984 Igor Kovalenko
    .addend     = -1,
1921 08738984 Igor Kovalenko
};
1922 08738984 Igor Kovalenko
1923 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1924 ee8b7021 bellard
   implemented yet) */
1925 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1926 33417e70 bellard
{
1927 33417e70 bellard
    int i;
1928 0124311e bellard
1929 9fa3e853 bellard
#if defined(DEBUG_TLB)
1930 9fa3e853 bellard
    printf("tlb_flush:\n");
1931 9fa3e853 bellard
#endif
1932 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1933 0124311e bellard
       links while we are modifying them */
1934 0124311e bellard
    env->current_tb = NULL;
1935 0124311e bellard
1936 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1937 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1938 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1939 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1940 cfde4bd9 Isaku Yamahata
        }
1941 33417e70 bellard
    }
1942 9fa3e853 bellard
1943 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1944 9fa3e853 bellard
1945 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1946 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1947 e3db7226 bellard
    tlb_flush_count++;
1948 33417e70 bellard
}
1949 33417e70 bellard
1950 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1951 61382a50 bellard
{
1952 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1953 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1954 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1955 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1956 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1957 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1958 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1959 84b7b8e7 bellard
    }
1960 61382a50 bellard
}
1961 61382a50 bellard
1962 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1963 33417e70 bellard
{
1964 8a40a180 bellard
    int i;
1965 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1966 0124311e bellard
1967 9fa3e853 bellard
#if defined(DEBUG_TLB)
1968 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1969 9fa3e853 bellard
#endif
1970 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1971 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1972 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1973 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1974 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1975 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1976 d4c430a8 Paul Brook
#endif
1977 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1978 d4c430a8 Paul Brook
        return;
1979 d4c430a8 Paul Brook
    }
1980 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1981 0124311e bellard
       links while we are modifying them */
1982 0124311e bellard
    env->current_tb = NULL;
1983 61382a50 bellard
1984 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1985 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1986 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1987 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1988 0124311e bellard
1989 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1990 9fa3e853 bellard
}
1991 9fa3e853 bellard
1992 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1993 9fa3e853 bellard
   can be detected */
1994 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1995 9fa3e853 bellard
{
1996 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1997 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1998 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1999 9fa3e853 bellard
}
2000 9fa3e853 bellard
2001 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2002 3a7d929e bellard
   tested for self modifying code */
2003 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2004 3a7d929e bellard
                                    target_ulong vaddr)
2005 9fa3e853 bellard
{
2006 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2007 1ccde1cb bellard
}
2008 1ccde1cb bellard
2009 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2010 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2011 1ccde1cb bellard
{
2012 1ccde1cb bellard
    unsigned long addr;
2013 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2014 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2015 1ccde1cb bellard
        if ((addr - start) < length) {
2016 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2017 1ccde1cb bellard
        }
2018 1ccde1cb bellard
    }
2019 1ccde1cb bellard
}
2020 1ccde1cb bellard
2021 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2022 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2023 0a962c02 bellard
                                     int dirty_flags)
2024 1ccde1cb bellard
{
2025 1ccde1cb bellard
    CPUState *env;
2026 4f2ac237 bellard
    unsigned long length, start1;
2027 f7c11b53 Yoshiaki Tamura
    int i;
2028 1ccde1cb bellard
2029 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2030 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2031 1ccde1cb bellard
2032 1ccde1cb bellard
    length = end - start;
2033 1ccde1cb bellard
    if (length == 0)
2034 1ccde1cb bellard
        return;
2035 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2036 f23db169 bellard
2037 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2038 1ccde1cb bellard
       when accessing the range */
2039 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2040 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2041 5579c7f3 pbrook
       address comparisons below.  */
2042 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2043 5579c7f3 pbrook
            != (end - 1) - start) {
2044 5579c7f3 pbrook
        abort();
2045 5579c7f3 pbrook
    }
2046 5579c7f3 pbrook
2047 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2048 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2049 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2050 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2051 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2052 cfde4bd9 Isaku Yamahata
                                      start1, length);
2053 cfde4bd9 Isaku Yamahata
        }
2054 6a00d601 bellard
    }
2055 1ccde1cb bellard
}
2056 1ccde1cb bellard
2057 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2058 74576198 aliguori
{
2059 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2060 74576198 aliguori
    in_migration = enable;
2061 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2062 f6f3fbca Michael S. Tsirkin
    return ret;
2063 74576198 aliguori
}
2064 74576198 aliguori
2065 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2066 74576198 aliguori
{
2067 74576198 aliguori
    return in_migration;
2068 74576198 aliguori
}
2069 74576198 aliguori
2070 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2071 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2072 2bec46dc aliguori
{
2073 7b8f3b78 Michael S. Tsirkin
    int ret;
2074 151f7749 Jan Kiszka
2075 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2076 151f7749 Jan Kiszka
    return ret;
2077 2bec46dc aliguori
}
2078 2bec46dc aliguori
2079 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2080 3a7d929e bellard
{
2081 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2082 5579c7f3 pbrook
    void *p;
2083 3a7d929e bellard
2084 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2085 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2086 5579c7f3 pbrook
            + tlb_entry->addend);
2087 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2088 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2089 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2090 3a7d929e bellard
        }
2091 3a7d929e bellard
    }
2092 3a7d929e bellard
}
2093 3a7d929e bellard
2094 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2095 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2096 3a7d929e bellard
{
2097 3a7d929e bellard
    int i;
2098 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2099 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2100 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2101 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2102 cfde4bd9 Isaku Yamahata
    }
2103 3a7d929e bellard
}
2104 3a7d929e bellard
2105 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2106 1ccde1cb bellard
{
2107 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2108 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2109 1ccde1cb bellard
}
2110 1ccde1cb bellard
2111 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2112 0f459d16 pbrook
   so that it is no longer dirty */
2113 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2114 1ccde1cb bellard
{
2115 1ccde1cb bellard
    int i;
2116 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2117 1ccde1cb bellard
2118 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2119 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2120 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2121 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2122 9fa3e853 bellard
}
2123 9fa3e853 bellard
2124 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2125 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2126 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2127 d4c430a8 Paul Brook
                               target_ulong size)
2128 d4c430a8 Paul Brook
{
2129 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2130 d4c430a8 Paul Brook
2131 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2132 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2133 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2134 d4c430a8 Paul Brook
        return;
2135 d4c430a8 Paul Brook
    }
2136 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2137 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2138 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2139 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2140 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2141 d4c430a8 Paul Brook
        mask <<= 1;
2142 d4c430a8 Paul Brook
    }
2143 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2144 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2145 d4c430a8 Paul Brook
}
2146 d4c430a8 Paul Brook
2147 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2148 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2149 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2150 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2151 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2152 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2153 9fa3e853 bellard
{
2154 92e873b9 bellard
    PhysPageDesc *p;
2155 4f2ac237 bellard
    unsigned long pd;
2156 9fa3e853 bellard
    unsigned int index;
2157 4f2ac237 bellard
    target_ulong address;
2158 0f459d16 pbrook
    target_ulong code_address;
2159 355b1943 Paul Brook
    unsigned long addend;
2160 84b7b8e7 bellard
    CPUTLBEntry *te;
2161 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2162 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2163 9fa3e853 bellard
2164 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2165 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2166 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2167 d4c430a8 Paul Brook
    }
2168 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2169 9fa3e853 bellard
    if (!p) {
2170 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2171 9fa3e853 bellard
    } else {
2172 9fa3e853 bellard
        pd = p->phys_offset;
2173 9fa3e853 bellard
    }
2174 9fa3e853 bellard
#if defined(DEBUG_TLB)
2175 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2176 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2177 9fa3e853 bellard
#endif
2178 9fa3e853 bellard
2179 0f459d16 pbrook
    address = vaddr;
2180 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2181 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2182 0f459d16 pbrook
        address |= TLB_MMIO;
2183 0f459d16 pbrook
    }
2184 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2185 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2186 0f459d16 pbrook
        /* Normal RAM.  */
2187 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2188 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2189 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2190 0f459d16 pbrook
        else
2191 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2192 0f459d16 pbrook
    } else {
2193 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2194 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2195 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2196 0f459d16 pbrook
           and avoid full address decoding in every device.
2197 0f459d16 pbrook
           We can't use the high bits of pd for this because
2198 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2199 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2200 8da3ff18 pbrook
        if (p) {
2201 8da3ff18 pbrook
            iotlb += p->region_offset;
2202 8da3ff18 pbrook
        } else {
2203 8da3ff18 pbrook
            iotlb += paddr;
2204 8da3ff18 pbrook
        }
2205 0f459d16 pbrook
    }
2206 0f459d16 pbrook
2207 0f459d16 pbrook
    code_address = address;
2208 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2209 0f459d16 pbrook
       watchpoint trap routines.  */
2210 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2211 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2212 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2213 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2214 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2215 0f459d16 pbrook
            address |= TLB_MMIO;
2216 6658ffb8 pbrook
        }
2217 0f459d16 pbrook
    }
2218 d79acba4 balrog
2219 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2220 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2221 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2222 0f459d16 pbrook
    te->addend = addend - vaddr;
2223 0f459d16 pbrook
    if (prot & PAGE_READ) {
2224 0f459d16 pbrook
        te->addr_read = address;
2225 0f459d16 pbrook
    } else {
2226 0f459d16 pbrook
        te->addr_read = -1;
2227 0f459d16 pbrook
    }
2228 5c751e99 edgar_igl
2229 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2230 0f459d16 pbrook
        te->addr_code = code_address;
2231 0f459d16 pbrook
    } else {
2232 0f459d16 pbrook
        te->addr_code = -1;
2233 0f459d16 pbrook
    }
2234 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2235 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2236 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2237 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2238 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2239 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2240 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2241 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2242 9fa3e853 bellard
        } else {
2243 0f459d16 pbrook
            te->addr_write = address;
2244 9fa3e853 bellard
        }
2245 0f459d16 pbrook
    } else {
2246 0f459d16 pbrook
        te->addr_write = -1;
2247 9fa3e853 bellard
    }
2248 9fa3e853 bellard
}
2249 9fa3e853 bellard
2250 0124311e bellard
#else
2251 0124311e bellard
2252 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2253 0124311e bellard
{
2254 0124311e bellard
}
2255 0124311e bellard
2256 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2257 0124311e bellard
{
2258 0124311e bellard
}
2259 0124311e bellard
2260 edf8e2af Mika Westerberg
/*
2261 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2262 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2263 edf8e2af Mika Westerberg
 */
2264 5cd2c5b6 Richard Henderson
2265 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2266 5cd2c5b6 Richard Henderson
{
2267 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2268 5cd2c5b6 Richard Henderson
    void *priv;
2269 5cd2c5b6 Richard Henderson
    unsigned long start;
2270 5cd2c5b6 Richard Henderson
    int prot;
2271 5cd2c5b6 Richard Henderson
};
2272 5cd2c5b6 Richard Henderson
2273 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2274 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2275 5cd2c5b6 Richard Henderson
{
2276 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2277 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2278 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2279 5cd2c5b6 Richard Henderson
            return rc;
2280 5cd2c5b6 Richard Henderson
        }
2281 5cd2c5b6 Richard Henderson
    }
2282 5cd2c5b6 Richard Henderson
2283 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2284 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2285 5cd2c5b6 Richard Henderson
2286 5cd2c5b6 Richard Henderson
    return 0;
2287 5cd2c5b6 Richard Henderson
}
2288 5cd2c5b6 Richard Henderson
2289 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2290 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2291 5cd2c5b6 Richard Henderson
{
2292 b480d9b7 Paul Brook
    abi_ulong pa;
2293 5cd2c5b6 Richard Henderson
    int i, rc;
2294 5cd2c5b6 Richard Henderson
2295 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2296 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2297 5cd2c5b6 Richard Henderson
    }
2298 5cd2c5b6 Richard Henderson
2299 5cd2c5b6 Richard Henderson
    if (level == 0) {
2300 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2301 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2302 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2303 5cd2c5b6 Richard Henderson
2304 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2305 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2306 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2307 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2308 5cd2c5b6 Richard Henderson
                    return rc;
2309 9fa3e853 bellard
                }
2310 9fa3e853 bellard
            }
2311 5cd2c5b6 Richard Henderson
        }
2312 5cd2c5b6 Richard Henderson
    } else {
2313 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2314 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2315 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2316 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2317 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2318 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2319 5cd2c5b6 Richard Henderson
                return rc;
2320 5cd2c5b6 Richard Henderson
            }
2321 5cd2c5b6 Richard Henderson
        }
2322 5cd2c5b6 Richard Henderson
    }
2323 5cd2c5b6 Richard Henderson
2324 5cd2c5b6 Richard Henderson
    return 0;
2325 5cd2c5b6 Richard Henderson
}
2326 5cd2c5b6 Richard Henderson
2327 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2328 5cd2c5b6 Richard Henderson
{
2329 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2330 5cd2c5b6 Richard Henderson
    unsigned long i;
2331 5cd2c5b6 Richard Henderson
2332 5cd2c5b6 Richard Henderson
    data.fn = fn;
2333 5cd2c5b6 Richard Henderson
    data.priv = priv;
2334 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2335 5cd2c5b6 Richard Henderson
    data.prot = 0;
2336 5cd2c5b6 Richard Henderson
2337 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2338 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2339 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2340 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2341 5cd2c5b6 Richard Henderson
            return rc;
2342 9fa3e853 bellard
        }
2343 33417e70 bellard
    }
2344 5cd2c5b6 Richard Henderson
2345 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2346 edf8e2af Mika Westerberg
}
2347 edf8e2af Mika Westerberg
2348 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2349 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2350 edf8e2af Mika Westerberg
{
2351 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2352 edf8e2af Mika Westerberg
2353 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2354 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2355 edf8e2af Mika Westerberg
        start, end, end - start,
2356 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2357 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2358 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2359 edf8e2af Mika Westerberg
2360 edf8e2af Mika Westerberg
    return (0);
2361 edf8e2af Mika Westerberg
}
2362 edf8e2af Mika Westerberg
2363 edf8e2af Mika Westerberg
/* dump memory mappings */
2364 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2365 edf8e2af Mika Westerberg
{
2366 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2367 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2368 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2369 33417e70 bellard
}
2370 33417e70 bellard
2371 53a5960a pbrook
int page_get_flags(target_ulong address)
2372 33417e70 bellard
{
2373 9fa3e853 bellard
    PageDesc *p;
2374 9fa3e853 bellard
2375 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2376 33417e70 bellard
    if (!p)
2377 9fa3e853 bellard
        return 0;
2378 9fa3e853 bellard
    return p->flags;
2379 9fa3e853 bellard
}
2380 9fa3e853 bellard
2381 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2382 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2383 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2384 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2385 9fa3e853 bellard
{
2386 376a7909 Richard Henderson
    target_ulong addr, len;
2387 376a7909 Richard Henderson
2388 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2389 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2390 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2391 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2392 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2393 376a7909 Richard Henderson
#endif
2394 376a7909 Richard Henderson
    assert(start < end);
2395 9fa3e853 bellard
2396 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2397 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2398 376a7909 Richard Henderson
2399 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2400 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2401 376a7909 Richard Henderson
    }
2402 376a7909 Richard Henderson
2403 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2404 376a7909 Richard Henderson
         len != 0;
2405 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2406 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2407 376a7909 Richard Henderson
2408 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2409 376a7909 Richard Henderson
           the code inside.  */
2410 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2411 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2412 9fa3e853 bellard
            p->first_tb) {
2413 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2414 9fa3e853 bellard
        }
2415 9fa3e853 bellard
        p->flags = flags;
2416 9fa3e853 bellard
    }
2417 33417e70 bellard
}
2418 33417e70 bellard
2419 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2420 3d97b40b ths
{
2421 3d97b40b ths
    PageDesc *p;
2422 3d97b40b ths
    target_ulong end;
2423 3d97b40b ths
    target_ulong addr;
2424 3d97b40b ths
2425 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2426 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2427 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2428 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2429 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2430 376a7909 Richard Henderson
#endif
2431 376a7909 Richard Henderson
2432 3e0650a9 Richard Henderson
    if (len == 0) {
2433 3e0650a9 Richard Henderson
        return 0;
2434 3e0650a9 Richard Henderson
    }
2435 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2436 376a7909 Richard Henderson
        /* We've wrapped around.  */
2437 55f280c9 balrog
        return -1;
2438 376a7909 Richard Henderson
    }
2439 55f280c9 balrog
2440 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2441 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2442 3d97b40b ths
2443 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2444 376a7909 Richard Henderson
         len != 0;
2445 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2446 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2447 3d97b40b ths
        if( !p )
2448 3d97b40b ths
            return -1;
2449 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2450 3d97b40b ths
            return -1;
2451 3d97b40b ths
2452 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2453 3d97b40b ths
            return -1;
2454 dae3270c bellard
        if (flags & PAGE_WRITE) {
2455 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2456 dae3270c bellard
                return -1;
2457 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2458 dae3270c bellard
               contains translated code */
2459 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2460 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2461 dae3270c bellard
                    return -1;
2462 dae3270c bellard
            }
2463 dae3270c bellard
            return 0;
2464 dae3270c bellard
        }
2465 3d97b40b ths
    }
2466 3d97b40b ths
    return 0;
2467 3d97b40b ths
}
2468 3d97b40b ths
2469 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2470 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2471 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2472 9fa3e853 bellard
{
2473 45d679d6 Aurelien Jarno
    unsigned int prot;
2474 45d679d6 Aurelien Jarno
    PageDesc *p;
2475 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2476 9fa3e853 bellard
2477 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2478 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2479 c8a706fe pbrook
       practice it seems to be ok.  */
2480 c8a706fe pbrook
    mmap_lock();
2481 c8a706fe pbrook
2482 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2483 45d679d6 Aurelien Jarno
    if (!p) {
2484 c8a706fe pbrook
        mmap_unlock();
2485 9fa3e853 bellard
        return 0;
2486 c8a706fe pbrook
    }
2487 45d679d6 Aurelien Jarno
2488 9fa3e853 bellard
    /* if the page was really writable, then we change its
2489 9fa3e853 bellard
       protection back to writable */
2490 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2491 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2492 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2493 45d679d6 Aurelien Jarno
2494 45d679d6 Aurelien Jarno
        prot = 0;
2495 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2496 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2497 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2498 45d679d6 Aurelien Jarno
            prot |= p->flags;
2499 45d679d6 Aurelien Jarno
2500 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2501 9fa3e853 bellard
               the corresponding translated code. */
2502 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2503 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2504 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2505 9fa3e853 bellard
#endif
2506 9fa3e853 bellard
        }
2507 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2508 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2509 45d679d6 Aurelien Jarno
2510 45d679d6 Aurelien Jarno
        mmap_unlock();
2511 45d679d6 Aurelien Jarno
        return 1;
2512 9fa3e853 bellard
    }
2513 c8a706fe pbrook
    mmap_unlock();
2514 9fa3e853 bellard
    return 0;
2515 9fa3e853 bellard
}
2516 9fa3e853 bellard
2517 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2518 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2519 1ccde1cb bellard
{
2520 1ccde1cb bellard
}
2521 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2522 9fa3e853 bellard
2523 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2524 8da3ff18 pbrook
2525 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2526 c04b2b78 Paul Brook
typedef struct subpage_t {
2527 c04b2b78 Paul Brook
    target_phys_addr_t base;
2528 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2529 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2530 c04b2b78 Paul Brook
} subpage_t;
2531 c04b2b78 Paul Brook
2532 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2533 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2534 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2535 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2536 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2537 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2538 db7b5426 blueswir1
                      need_subpage)                                     \
2539 db7b5426 blueswir1
    do {                                                                \
2540 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2541 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2542 db7b5426 blueswir1
        else {                                                          \
2543 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2544 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2545 db7b5426 blueswir1
                need_subpage = 1;                                       \
2546 db7b5426 blueswir1
        }                                                               \
2547 db7b5426 blueswir1
                                                                        \
2548 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2549 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2550 db7b5426 blueswir1
        else {                                                          \
2551 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2552 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2553 db7b5426 blueswir1
                need_subpage = 1;                                       \
2554 db7b5426 blueswir1
        }                                                               \
2555 db7b5426 blueswir1
    } while (0)
2556 db7b5426 blueswir1
2557 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2558 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2559 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2560 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2561 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2562 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2563 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2564 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2565 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2566 c227f099 Anthony Liguori
                                         ram_addr_t size,
2567 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2568 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2569 33417e70 bellard
{
2570 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2571 92e873b9 bellard
    PhysPageDesc *p;
2572 9d42037b bellard
    CPUState *env;
2573 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2574 f6405247 Richard Henderson
    subpage_t *subpage;
2575 33417e70 bellard
2576 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2577 f6f3fbca Michael S. Tsirkin
2578 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2579 67c4d23c pbrook
        region_offset = start_addr;
2580 67c4d23c pbrook
    }
2581 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2582 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2583 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2584 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2585 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2586 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2587 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2588 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2589 db7b5426 blueswir1
            int need_subpage = 0;
2590 db7b5426 blueswir1
2591 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2592 db7b5426 blueswir1
                          need_subpage);
2593 f6405247 Richard Henderson
            if (need_subpage) {
2594 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2595 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2596 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2597 8da3ff18 pbrook
                                           p->region_offset);
2598 db7b5426 blueswir1
                } else {
2599 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2600 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2601 db7b5426 blueswir1
                }
2602 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2603 8da3ff18 pbrook
                                 region_offset);
2604 8da3ff18 pbrook
                p->region_offset = 0;
2605 db7b5426 blueswir1
            } else {
2606 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2607 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2608 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2609 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2610 db7b5426 blueswir1
            }
2611 db7b5426 blueswir1
        } else {
2612 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2613 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2614 8da3ff18 pbrook
            p->region_offset = region_offset;
2615 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2616 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2617 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2618 0e8f0967 pbrook
            } else {
2619 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2620 db7b5426 blueswir1
                int need_subpage = 0;
2621 db7b5426 blueswir1
2622 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2623 db7b5426 blueswir1
                              end_addr2, need_subpage);
2624 db7b5426 blueswir1
2625 f6405247 Richard Henderson
                if (need_subpage) {
2626 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2627 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2628 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2629 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2630 8da3ff18 pbrook
                                     phys_offset, region_offset);
2631 8da3ff18 pbrook
                    p->region_offset = 0;
2632 db7b5426 blueswir1
                }
2633 db7b5426 blueswir1
            }
2634 db7b5426 blueswir1
        }
2635 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2636 33417e70 bellard
    }
2637 3b46e624 ths
2638 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2639 9d42037b bellard
       reset the modified entries */
2640 9d42037b bellard
    /* XXX: slow ! */
2641 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2642 9d42037b bellard
        tlb_flush(env, 1);
2643 9d42037b bellard
    }
2644 33417e70 bellard
}
2645 33417e70 bellard
2646 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2647 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2648 ba863458 bellard
{
2649 ba863458 bellard
    PhysPageDesc *p;
2650 ba863458 bellard
2651 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2652 ba863458 bellard
    if (!p)
2653 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2654 ba863458 bellard
    return p->phys_offset;
2655 ba863458 bellard
}
2656 ba863458 bellard
2657 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2658 f65ed4c1 aliguori
{
2659 f65ed4c1 aliguori
    if (kvm_enabled())
2660 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2661 f65ed4c1 aliguori
}
2662 f65ed4c1 aliguori
2663 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2664 f65ed4c1 aliguori
{
2665 f65ed4c1 aliguori
    if (kvm_enabled())
2666 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2667 f65ed4c1 aliguori
}
2668 f65ed4c1 aliguori
2669 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2670 62a2744c Sheng Yang
{
2671 62a2744c Sheng Yang
    if (kvm_enabled())
2672 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2673 62a2744c Sheng Yang
}
2674 62a2744c Sheng Yang
2675 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2676 c902760f Marcelo Tosatti
2677 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2678 c902760f Marcelo Tosatti
2679 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2680 c902760f Marcelo Tosatti
2681 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2682 c902760f Marcelo Tosatti
{
2683 c902760f Marcelo Tosatti
    struct statfs fs;
2684 c902760f Marcelo Tosatti
    int ret;
2685 c902760f Marcelo Tosatti
2686 c902760f Marcelo Tosatti
    do {
2687 c902760f Marcelo Tosatti
            ret = statfs(path, &fs);
2688 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2689 c902760f Marcelo Tosatti
2690 c902760f Marcelo Tosatti
    if (ret != 0) {
2691 6adc0549 Michael Tokarev
            perror(path);
2692 c902760f Marcelo Tosatti
            return 0;
2693 c902760f Marcelo Tosatti
    }
2694 c902760f Marcelo Tosatti
2695 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2696 c902760f Marcelo Tosatti
            fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2697 c902760f Marcelo Tosatti
2698 c902760f Marcelo Tosatti
    return fs.f_bsize;
2699 c902760f Marcelo Tosatti
}
2700 c902760f Marcelo Tosatti
2701 c902760f Marcelo Tosatti
static void *file_ram_alloc(ram_addr_t memory, const char *path)
2702 c902760f Marcelo Tosatti
{
2703 c902760f Marcelo Tosatti
    char *filename;
2704 c902760f Marcelo Tosatti
    void *area;
2705 c902760f Marcelo Tosatti
    int fd;
2706 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2707 c902760f Marcelo Tosatti
    int flags;
2708 c902760f Marcelo Tosatti
#endif
2709 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2710 c902760f Marcelo Tosatti
2711 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2712 c902760f Marcelo Tosatti
    if (!hpagesize) {
2713 c902760f Marcelo Tosatti
        return NULL;
2714 c902760f Marcelo Tosatti
    }
2715 c902760f Marcelo Tosatti
2716 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2717 c902760f Marcelo Tosatti
        return NULL;
2718 c902760f Marcelo Tosatti
    }
2719 c902760f Marcelo Tosatti
2720 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2721 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2722 c902760f Marcelo Tosatti
        return NULL;
2723 c902760f Marcelo Tosatti
    }
2724 c902760f Marcelo Tosatti
2725 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2726 c902760f Marcelo Tosatti
        return NULL;
2727 c902760f Marcelo Tosatti
    }
2728 c902760f Marcelo Tosatti
2729 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2730 c902760f Marcelo Tosatti
    if (fd < 0) {
2731 6adc0549 Michael Tokarev
        perror("unable to create backing store for hugepages");
2732 c902760f Marcelo Tosatti
        free(filename);
2733 c902760f Marcelo Tosatti
        return NULL;
2734 c902760f Marcelo Tosatti
    }
2735 c902760f Marcelo Tosatti
    unlink(filename);
2736 c902760f Marcelo Tosatti
    free(filename);
2737 c902760f Marcelo Tosatti
2738 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2739 c902760f Marcelo Tosatti
2740 c902760f Marcelo Tosatti
    /*
2741 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2742 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2743 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2744 c902760f Marcelo Tosatti
     * mmap will fail.
2745 c902760f Marcelo Tosatti
     */
2746 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2747 c902760f Marcelo Tosatti
        perror("ftruncate");
2748 c902760f Marcelo Tosatti
2749 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2750 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2751 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2752 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2753 c902760f Marcelo Tosatti
     */
2754 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2755 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2756 c902760f Marcelo Tosatti
#else
2757 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2758 c902760f Marcelo Tosatti
#endif
2759 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2760 c902760f Marcelo Tosatti
        perror("file_ram_alloc: can't mmap RAM pages");
2761 c902760f Marcelo Tosatti
        close(fd);
2762 c902760f Marcelo Tosatti
        return (NULL);
2763 c902760f Marcelo Tosatti
    }
2764 c902760f Marcelo Tosatti
    return area;
2765 c902760f Marcelo Tosatti
}
2766 c902760f Marcelo Tosatti
#endif
2767 c902760f Marcelo Tosatti
2768 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2769 94a6b54f pbrook
{
2770 94a6b54f pbrook
    RAMBlock *new_block;
2771 94a6b54f pbrook
2772 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2773 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2774 94a6b54f pbrook
2775 c902760f Marcelo Tosatti
    if (mem_path) {
2776 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2777 c902760f Marcelo Tosatti
        new_block->host = file_ram_alloc(size, mem_path);
2778 618a568d Marcelo Tosatti
        if (!new_block->host) {
2779 618a568d Marcelo Tosatti
            new_block->host = qemu_vmalloc(size);
2780 618a568d Marcelo Tosatti
#ifdef MADV_MERGEABLE
2781 618a568d Marcelo Tosatti
            madvise(new_block->host, size, MADV_MERGEABLE);
2782 618a568d Marcelo Tosatti
#endif
2783 618a568d Marcelo Tosatti
        }
2784 c902760f Marcelo Tosatti
#else
2785 c902760f Marcelo Tosatti
        fprintf(stderr, "-mem-path option unsupported\n");
2786 c902760f Marcelo Tosatti
        exit(1);
2787 c902760f Marcelo Tosatti
#endif
2788 c902760f Marcelo Tosatti
    } else {
2789 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2790 c902760f Marcelo Tosatti
        /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2791 c902760f Marcelo Tosatti
        new_block->host = mmap((void*)0x1000000, size,
2792 c902760f Marcelo Tosatti
                                PROT_EXEC|PROT_READ|PROT_WRITE,
2793 c902760f Marcelo Tosatti
                                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2794 6b02494d Alexander Graf
#else
2795 c902760f Marcelo Tosatti
        new_block->host = qemu_vmalloc(size);
2796 6b02494d Alexander Graf
#endif
2797 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2798 c902760f Marcelo Tosatti
        madvise(new_block->host, size, MADV_MERGEABLE);
2799 ccb167e9 Izik Eidus
#endif
2800 c902760f Marcelo Tosatti
    }
2801 f471a17e Alex Williamson
    new_block->offset = ram_list.last_offset;
2802 94a6b54f pbrook
    new_block->length = size;
2803 94a6b54f pbrook
2804 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2805 94a6b54f pbrook
2806 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2807 f471a17e Alex Williamson
        (ram_list.last_offset + size) >> TARGET_PAGE_BITS);
2808 f471a17e Alex Williamson
    memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
2809 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2810 94a6b54f pbrook
2811 f471a17e Alex Williamson
    ram_list.last_offset += size;
2812 94a6b54f pbrook
2813 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2814 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2815 6f0437e8 Jan Kiszka
2816 94a6b54f pbrook
    return new_block->offset;
2817 94a6b54f pbrook
}
2818 e9a1ab19 bellard
2819 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2820 e9a1ab19 bellard
{
2821 94a6b54f pbrook
    /* TODO: implement this.  */
2822 e9a1ab19 bellard
}
2823 e9a1ab19 bellard
2824 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2825 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2826 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2827 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2828 5579c7f3 pbrook

2829 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2830 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2831 5579c7f3 pbrook
 */
2832 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2833 dc828ca1 pbrook
{
2834 94a6b54f pbrook
    RAMBlock *block;
2835 94a6b54f pbrook
2836 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2837 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
2838 f471a17e Alex Williamson
            QLIST_REMOVE(block, next);
2839 f471a17e Alex Williamson
            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2840 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
2841 f471a17e Alex Williamson
        }
2842 94a6b54f pbrook
    }
2843 f471a17e Alex Williamson
2844 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2845 f471a17e Alex Williamson
    abort();
2846 f471a17e Alex Williamson
2847 f471a17e Alex Williamson
    return NULL;
2848 dc828ca1 pbrook
}
2849 dc828ca1 pbrook
2850 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2851 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2852 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2853 5579c7f3 pbrook
{
2854 94a6b54f pbrook
    RAMBlock *block;
2855 94a6b54f pbrook
    uint8_t *host = ptr;
2856 94a6b54f pbrook
2857 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2858 f471a17e Alex Williamson
        if (host - block->host < block->length) {
2859 f471a17e Alex Williamson
            return block->offset + (host - block->host);
2860 f471a17e Alex Williamson
        }
2861 94a6b54f pbrook
    }
2862 f471a17e Alex Williamson
2863 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram pointer %p\n", ptr);
2864 f471a17e Alex Williamson
    abort();
2865 f471a17e Alex Williamson
2866 f471a17e Alex Williamson
    return 0;
2867 5579c7f3 pbrook
}
2868 5579c7f3 pbrook
2869 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2870 33417e70 bellard
{
2871 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2872 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2873 67d3b957 pbrook
#endif
2874 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2875 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2876 e18231a3 blueswir1
#endif
2877 e18231a3 blueswir1
    return 0;
2878 e18231a3 blueswir1
}
2879 e18231a3 blueswir1
2880 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2881 e18231a3 blueswir1
{
2882 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2883 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2884 e18231a3 blueswir1
#endif
2885 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2886 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2887 e18231a3 blueswir1
#endif
2888 e18231a3 blueswir1
    return 0;
2889 e18231a3 blueswir1
}
2890 e18231a3 blueswir1
2891 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2892 e18231a3 blueswir1
{
2893 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2894 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2895 e18231a3 blueswir1
#endif
2896 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2897 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2898 b4f0a316 blueswir1
#endif
2899 33417e70 bellard
    return 0;
2900 33417e70 bellard
}
2901 33417e70 bellard
2902 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2903 33417e70 bellard
{
2904 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2905 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2906 67d3b957 pbrook
#endif
2907 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2908 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2909 e18231a3 blueswir1
#endif
2910 e18231a3 blueswir1
}
2911 e18231a3 blueswir1
2912 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2913 e18231a3 blueswir1
{
2914 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2915 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2916 e18231a3 blueswir1
#endif
2917 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2918 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2919 e18231a3 blueswir1
#endif
2920 e18231a3 blueswir1
}
2921 e18231a3 blueswir1
2922 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2923 e18231a3 blueswir1
{
2924 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2925 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2926 e18231a3 blueswir1
#endif
2927 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2928 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2929 b4f0a316 blueswir1
#endif
2930 33417e70 bellard
}
2931 33417e70 bellard
2932 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2933 33417e70 bellard
    unassigned_mem_readb,
2934 e18231a3 blueswir1
    unassigned_mem_readw,
2935 e18231a3 blueswir1
    unassigned_mem_readl,
2936 33417e70 bellard
};
2937 33417e70 bellard
2938 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2939 33417e70 bellard
    unassigned_mem_writeb,
2940 e18231a3 blueswir1
    unassigned_mem_writew,
2941 e18231a3 blueswir1
    unassigned_mem_writel,
2942 33417e70 bellard
};
2943 33417e70 bellard
2944 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2945 0f459d16 pbrook
                                uint32_t val)
2946 9fa3e853 bellard
{
2947 3a7d929e bellard
    int dirty_flags;
2948 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2949 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2950 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2951 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2952 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2953 9fa3e853 bellard
#endif
2954 3a7d929e bellard
    }
2955 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2956 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2957 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2958 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2959 f23db169 bellard
       flushed */
2960 f23db169 bellard
    if (dirty_flags == 0xff)
2961 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2962 9fa3e853 bellard
}
2963 9fa3e853 bellard
2964 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2965 0f459d16 pbrook
                                uint32_t val)
2966 9fa3e853 bellard
{
2967 3a7d929e bellard
    int dirty_flags;
2968 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2969 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2970 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2971 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2972 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2973 9fa3e853 bellard
#endif
2974 3a7d929e bellard
    }
2975 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2976 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2977 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2978 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2979 f23db169 bellard
       flushed */
2980 f23db169 bellard
    if (dirty_flags == 0xff)
2981 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2982 9fa3e853 bellard
}
2983 9fa3e853 bellard
2984 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2985 0f459d16 pbrook
                                uint32_t val)
2986 9fa3e853 bellard
{
2987 3a7d929e bellard
    int dirty_flags;
2988 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2989 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2990 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2991 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2992 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2993 9fa3e853 bellard
#endif
2994 3a7d929e bellard
    }
2995 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2996 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2997 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2998 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2999 f23db169 bellard
       flushed */
3000 f23db169 bellard
    if (dirty_flags == 0xff)
3001 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3002 9fa3e853 bellard
}
3003 9fa3e853 bellard
3004 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3005 9fa3e853 bellard
    NULL, /* never used */
3006 9fa3e853 bellard
    NULL, /* never used */
3007 9fa3e853 bellard
    NULL, /* never used */
3008 9fa3e853 bellard
};
3009 9fa3e853 bellard
3010 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3011 1ccde1cb bellard
    notdirty_mem_writeb,
3012 1ccde1cb bellard
    notdirty_mem_writew,
3013 1ccde1cb bellard
    notdirty_mem_writel,
3014 1ccde1cb bellard
};
3015 1ccde1cb bellard
3016 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3017 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3018 0f459d16 pbrook
{
3019 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3020 06d55cc1 aliguori
    target_ulong pc, cs_base;
3021 06d55cc1 aliguori
    TranslationBlock *tb;
3022 0f459d16 pbrook
    target_ulong vaddr;
3023 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3024 06d55cc1 aliguori
    int cpu_flags;
3025 0f459d16 pbrook
3026 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3027 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3028 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3029 06d55cc1 aliguori
         * current instruction. */
3030 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3031 06d55cc1 aliguori
        return;
3032 06d55cc1 aliguori
    }
3033 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3034 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3035 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3036 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3037 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3038 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3039 6e140f28 aliguori
                env->watchpoint_hit = wp;
3040 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3041 6e140f28 aliguori
                if (!tb) {
3042 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3043 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3044 6e140f28 aliguori
                }
3045 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3046 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3047 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3048 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3049 6e140f28 aliguori
                } else {
3050 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3051 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3052 6e140f28 aliguori
                }
3053 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3054 06d55cc1 aliguori
            }
3055 6e140f28 aliguori
        } else {
3056 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3057 0f459d16 pbrook
        }
3058 0f459d16 pbrook
    }
3059 0f459d16 pbrook
}
3060 0f459d16 pbrook
3061 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3062 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3063 6658ffb8 pbrook
   phys routines.  */
3064 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3065 6658ffb8 pbrook
{
3066 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3067 6658ffb8 pbrook
    return ldub_phys(addr);
3068 6658ffb8 pbrook
}
3069 6658ffb8 pbrook
3070 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3071 6658ffb8 pbrook
{
3072 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3073 6658ffb8 pbrook
    return lduw_phys(addr);
3074 6658ffb8 pbrook
}
3075 6658ffb8 pbrook
3076 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3077 6658ffb8 pbrook
{
3078 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3079 6658ffb8 pbrook
    return ldl_phys(addr);
3080 6658ffb8 pbrook
}
3081 6658ffb8 pbrook
3082 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3083 6658ffb8 pbrook
                             uint32_t val)
3084 6658ffb8 pbrook
{
3085 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3086 6658ffb8 pbrook
    stb_phys(addr, val);
3087 6658ffb8 pbrook
}
3088 6658ffb8 pbrook
3089 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3090 6658ffb8 pbrook
                             uint32_t val)
3091 6658ffb8 pbrook
{
3092 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3093 6658ffb8 pbrook
    stw_phys(addr, val);
3094 6658ffb8 pbrook
}
3095 6658ffb8 pbrook
3096 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3097 6658ffb8 pbrook
                             uint32_t val)
3098 6658ffb8 pbrook
{
3099 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3100 6658ffb8 pbrook
    stl_phys(addr, val);
3101 6658ffb8 pbrook
}
3102 6658ffb8 pbrook
3103 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3104 6658ffb8 pbrook
    watch_mem_readb,
3105 6658ffb8 pbrook
    watch_mem_readw,
3106 6658ffb8 pbrook
    watch_mem_readl,
3107 6658ffb8 pbrook
};
3108 6658ffb8 pbrook
3109 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3110 6658ffb8 pbrook
    watch_mem_writeb,
3111 6658ffb8 pbrook
    watch_mem_writew,
3112 6658ffb8 pbrook
    watch_mem_writel,
3113 6658ffb8 pbrook
};
3114 6658ffb8 pbrook
3115 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3116 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3117 f6405247 Richard Henderson
                                        unsigned int len)
3118 db7b5426 blueswir1
{
3119 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3120 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3121 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3122 db7b5426 blueswir1
           mmio, len, addr, idx);
3123 db7b5426 blueswir1
#endif
3124 db7b5426 blueswir1
3125 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3126 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3127 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3128 db7b5426 blueswir1
}
3129 db7b5426 blueswir1
3130 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3131 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3132 db7b5426 blueswir1
{
3133 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3134 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3135 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3136 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3137 db7b5426 blueswir1
#endif
3138 f6405247 Richard Henderson
3139 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3140 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3141 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3142 db7b5426 blueswir1
}
3143 db7b5426 blueswir1
3144 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3145 db7b5426 blueswir1
{
3146 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3147 db7b5426 blueswir1
}
3148 db7b5426 blueswir1
3149 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3150 db7b5426 blueswir1
                            uint32_t value)
3151 db7b5426 blueswir1
{
3152 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3153 db7b5426 blueswir1
}
3154 db7b5426 blueswir1
3155 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3156 db7b5426 blueswir1
{
3157 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3158 db7b5426 blueswir1
}
3159 db7b5426 blueswir1
3160 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3161 db7b5426 blueswir1
                            uint32_t value)
3162 db7b5426 blueswir1
{
3163 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3164 db7b5426 blueswir1
}
3165 db7b5426 blueswir1
3166 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3167 db7b5426 blueswir1
{
3168 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3169 db7b5426 blueswir1
}
3170 db7b5426 blueswir1
3171 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3172 f6405247 Richard Henderson
                            uint32_t value)
3173 db7b5426 blueswir1
{
3174 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3175 db7b5426 blueswir1
}
3176 db7b5426 blueswir1
3177 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3178 db7b5426 blueswir1
    &subpage_readb,
3179 db7b5426 blueswir1
    &subpage_readw,
3180 db7b5426 blueswir1
    &subpage_readl,
3181 db7b5426 blueswir1
};
3182 db7b5426 blueswir1
3183 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3184 db7b5426 blueswir1
    &subpage_writeb,
3185 db7b5426 blueswir1
    &subpage_writew,
3186 db7b5426 blueswir1
    &subpage_writel,
3187 db7b5426 blueswir1
};
3188 db7b5426 blueswir1
3189 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3190 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3191 db7b5426 blueswir1
{
3192 db7b5426 blueswir1
    int idx, eidx;
3193 db7b5426 blueswir1
3194 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3195 db7b5426 blueswir1
        return -1;
3196 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3197 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3198 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3199 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3200 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3201 db7b5426 blueswir1
#endif
3202 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3203 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3204 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3205 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3206 db7b5426 blueswir1
    }
3207 db7b5426 blueswir1
3208 db7b5426 blueswir1
    return 0;
3209 db7b5426 blueswir1
}
3210 db7b5426 blueswir1
3211 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3212 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3213 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3214 db7b5426 blueswir1
{
3215 c227f099 Anthony Liguori
    subpage_t *mmio;
3216 db7b5426 blueswir1
    int subpage_memory;
3217 db7b5426 blueswir1
3218 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3219 1eec614b aliguori
3220 1eec614b aliguori
    mmio->base = base;
3221 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3222 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3223 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3224 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3225 db7b5426 blueswir1
#endif
3226 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3227 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3228 db7b5426 blueswir1
3229 db7b5426 blueswir1
    return mmio;
3230 db7b5426 blueswir1
}
3231 db7b5426 blueswir1
3232 88715657 aliguori
static int get_free_io_mem_idx(void)
3233 88715657 aliguori
{
3234 88715657 aliguori
    int i;
3235 88715657 aliguori
3236 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3237 88715657 aliguori
        if (!io_mem_used[i]) {
3238 88715657 aliguori
            io_mem_used[i] = 1;
3239 88715657 aliguori
            return i;
3240 88715657 aliguori
        }
3241 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3242 88715657 aliguori
    return -1;
3243 88715657 aliguori
}
3244 88715657 aliguori
3245 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3246 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3247 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3248 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3249 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3250 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3251 4254fab8 blueswir1
   returned if error. */
3252 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3253 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3254 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3255 1eed09cb Avi Kivity
                                        void *opaque)
3256 33417e70 bellard
{
3257 3cab721d Richard Henderson
    int i;
3258 3cab721d Richard Henderson
3259 33417e70 bellard
    if (io_index <= 0) {
3260 88715657 aliguori
        io_index = get_free_io_mem_idx();
3261 88715657 aliguori
        if (io_index == -1)
3262 88715657 aliguori
            return io_index;
3263 33417e70 bellard
    } else {
3264 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3265 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3266 33417e70 bellard
            return -1;
3267 33417e70 bellard
    }
3268 b5ff1b31 bellard
3269 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3270 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3271 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3272 3cab721d Richard Henderson
    }
3273 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3274 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3275 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3276 3cab721d Richard Henderson
    }
3277 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3278 f6405247 Richard Henderson
3279 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3280 33417e70 bellard
}
3281 61382a50 bellard
3282 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3283 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3284 1eed09cb Avi Kivity
                           void *opaque)
3285 1eed09cb Avi Kivity
{
3286 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3287 1eed09cb Avi Kivity
}
3288 1eed09cb Avi Kivity
3289 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3290 88715657 aliguori
{
3291 88715657 aliguori
    int i;
3292 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3293 88715657 aliguori
3294 88715657 aliguori
    for (i=0;i < 3; i++) {
3295 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3296 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3297 88715657 aliguori
    }
3298 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3299 88715657 aliguori
    io_mem_used[io_index] = 0;
3300 88715657 aliguori
}
3301 88715657 aliguori
3302 e9179ce1 Avi Kivity
static void io_mem_init(void)
3303 e9179ce1 Avi Kivity
{
3304 e9179ce1 Avi Kivity
    int i;
3305 e9179ce1 Avi Kivity
3306 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3307 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3308 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3309 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3310 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3311 e9179ce1 Avi Kivity
3312 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3313 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3314 e9179ce1 Avi Kivity
}
3315 e9179ce1 Avi Kivity
3316 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3317 e2eef170 pbrook
3318 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3319 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3320 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3321 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3322 13eb76e0 bellard
{
3323 13eb76e0 bellard
    int l, flags;
3324 13eb76e0 bellard
    target_ulong page;
3325 53a5960a pbrook
    void * p;
3326 13eb76e0 bellard
3327 13eb76e0 bellard
    while (len > 0) {
3328 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3329 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3330 13eb76e0 bellard
        if (l > len)
3331 13eb76e0 bellard
            l = len;
3332 13eb76e0 bellard
        flags = page_get_flags(page);
3333 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3334 a68fe89c Paul Brook
            return -1;
3335 13eb76e0 bellard
        if (is_write) {
3336 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3337 a68fe89c Paul Brook
                return -1;
3338 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3339 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3340 a68fe89c Paul Brook
                return -1;
3341 72fb7daa aurel32
            memcpy(p, buf, l);
3342 72fb7daa aurel32
            unlock_user(p, addr, l);
3343 13eb76e0 bellard
        } else {
3344 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3345 a68fe89c Paul Brook
                return -1;
3346 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3347 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3348 a68fe89c Paul Brook
                return -1;
3349 72fb7daa aurel32
            memcpy(buf, p, l);
3350 5b257578 aurel32
            unlock_user(p, addr, 0);
3351 13eb76e0 bellard
        }
3352 13eb76e0 bellard
        len -= l;
3353 13eb76e0 bellard
        buf += l;
3354 13eb76e0 bellard
        addr += l;
3355 13eb76e0 bellard
    }
3356 a68fe89c Paul Brook
    return 0;
3357 13eb76e0 bellard
}
3358 8df1cd07 bellard
3359 13eb76e0 bellard
#else
3360 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3361 13eb76e0 bellard
                            int len, int is_write)
3362 13eb76e0 bellard
{
3363 13eb76e0 bellard
    int l, io_index;
3364 13eb76e0 bellard
    uint8_t *ptr;
3365 13eb76e0 bellard
    uint32_t val;
3366 c227f099 Anthony Liguori
    target_phys_addr_t page;
3367 2e12669a bellard
    unsigned long pd;
3368 92e873b9 bellard
    PhysPageDesc *p;
3369 3b46e624 ths
3370 13eb76e0 bellard
    while (len > 0) {
3371 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3372 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3373 13eb76e0 bellard
        if (l > len)
3374 13eb76e0 bellard
            l = len;
3375 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3376 13eb76e0 bellard
        if (!p) {
3377 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3378 13eb76e0 bellard
        } else {
3379 13eb76e0 bellard
            pd = p->phys_offset;
3380 13eb76e0 bellard
        }
3381 3b46e624 ths
3382 13eb76e0 bellard
        if (is_write) {
3383 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3384 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3385 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3386 8da3ff18 pbrook
                if (p)
3387 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3388 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3389 6a00d601 bellard
                   potential bugs */
3390 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3391 1c213d19 bellard
                    /* 32 bit write access */
3392 c27004ec bellard
                    val = ldl_p(buf);
3393 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3394 13eb76e0 bellard
                    l = 4;
3395 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3396 1c213d19 bellard
                    /* 16 bit write access */
3397 c27004ec bellard
                    val = lduw_p(buf);
3398 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3399 13eb76e0 bellard
                    l = 2;
3400 13eb76e0 bellard
                } else {
3401 1c213d19 bellard
                    /* 8 bit write access */
3402 c27004ec bellard
                    val = ldub_p(buf);
3403 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3404 13eb76e0 bellard
                    l = 1;
3405 13eb76e0 bellard
                }
3406 13eb76e0 bellard
            } else {
3407 b448f2f3 bellard
                unsigned long addr1;
3408 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3409 13eb76e0 bellard
                /* RAM case */
3410 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3411 13eb76e0 bellard
                memcpy(ptr, buf, l);
3412 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3413 3a7d929e bellard
                    /* invalidate code */
3414 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3415 3a7d929e bellard
                    /* set dirty bit */
3416 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3417 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3418 3a7d929e bellard
                }
3419 13eb76e0 bellard
            }
3420 13eb76e0 bellard
        } else {
3421 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3422 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3423 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3424 13eb76e0 bellard
                /* I/O case */
3425 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3426 8da3ff18 pbrook
                if (p)
3427 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3428 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3429 13eb76e0 bellard
                    /* 32 bit read access */
3430 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3431 c27004ec bellard
                    stl_p(buf, val);
3432 13eb76e0 bellard
                    l = 4;
3433 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3434 13eb76e0 bellard
                    /* 16 bit read access */
3435 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3436 c27004ec bellard
                    stw_p(buf, val);
3437 13eb76e0 bellard
                    l = 2;
3438 13eb76e0 bellard
                } else {
3439 1c213d19 bellard
                    /* 8 bit read access */
3440 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3441 c27004ec bellard
                    stb_p(buf, val);
3442 13eb76e0 bellard
                    l = 1;
3443 13eb76e0 bellard
                }
3444 13eb76e0 bellard
            } else {
3445 13eb76e0 bellard
                /* RAM case */
3446 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3447 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3448 13eb76e0 bellard
                memcpy(buf, ptr, l);
3449 13eb76e0 bellard
            }
3450 13eb76e0 bellard
        }
3451 13eb76e0 bellard
        len -= l;
3452 13eb76e0 bellard
        buf += l;
3453 13eb76e0 bellard
        addr += l;
3454 13eb76e0 bellard
    }
3455 13eb76e0 bellard
}
3456 8df1cd07 bellard
3457 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3458 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3459 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3460 d0ecd2aa bellard
{
3461 d0ecd2aa bellard
    int l;
3462 d0ecd2aa bellard
    uint8_t *ptr;
3463 c227f099 Anthony Liguori
    target_phys_addr_t page;
3464 d0ecd2aa bellard
    unsigned long pd;
3465 d0ecd2aa bellard
    PhysPageDesc *p;
3466 3b46e624 ths
3467 d0ecd2aa bellard
    while (len > 0) {
3468 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3469 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3470 d0ecd2aa bellard
        if (l > len)
3471 d0ecd2aa bellard
            l = len;
3472 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3473 d0ecd2aa bellard
        if (!p) {
3474 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3475 d0ecd2aa bellard
        } else {
3476 d0ecd2aa bellard
            pd = p->phys_offset;
3477 d0ecd2aa bellard
        }
3478 3b46e624 ths
3479 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3480 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3481 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3482 d0ecd2aa bellard
            /* do nothing */
3483 d0ecd2aa bellard
        } else {
3484 d0ecd2aa bellard
            unsigned long addr1;
3485 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3486 d0ecd2aa bellard
            /* ROM/RAM case */
3487 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3488 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3489 d0ecd2aa bellard
        }
3490 d0ecd2aa bellard
        len -= l;
3491 d0ecd2aa bellard
        buf += l;
3492 d0ecd2aa bellard
        addr += l;
3493 d0ecd2aa bellard
    }
3494 d0ecd2aa bellard
}
3495 d0ecd2aa bellard
3496 6d16c2f8 aliguori
typedef struct {
3497 6d16c2f8 aliguori
    void *buffer;
3498 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3499 c227f099 Anthony Liguori
    target_phys_addr_t len;
3500 6d16c2f8 aliguori
} BounceBuffer;
3501 6d16c2f8 aliguori
3502 6d16c2f8 aliguori
static BounceBuffer bounce;
3503 6d16c2f8 aliguori
3504 ba223c29 aliguori
typedef struct MapClient {
3505 ba223c29 aliguori
    void *opaque;
3506 ba223c29 aliguori
    void (*callback)(void *opaque);
3507 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3508 ba223c29 aliguori
} MapClient;
3509 ba223c29 aliguori
3510 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3511 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3512 ba223c29 aliguori
3513 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3514 ba223c29 aliguori
{
3515 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3516 ba223c29 aliguori
3517 ba223c29 aliguori
    client->opaque = opaque;
3518 ba223c29 aliguori
    client->callback = callback;
3519 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3520 ba223c29 aliguori
    return client;
3521 ba223c29 aliguori
}
3522 ba223c29 aliguori
3523 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3524 ba223c29 aliguori
{
3525 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3526 ba223c29 aliguori
3527 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3528 34d5e948 Isaku Yamahata
    qemu_free(client);
3529 ba223c29 aliguori
}
3530 ba223c29 aliguori
3531 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3532 ba223c29 aliguori
{
3533 ba223c29 aliguori
    MapClient *client;
3534 ba223c29 aliguori
3535 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3536 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3537 ba223c29 aliguori
        client->callback(client->opaque);
3538 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3539 ba223c29 aliguori
    }
3540 ba223c29 aliguori
}
3541 ba223c29 aliguori
3542 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3543 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3544 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3545 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3546 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3547 ba223c29 aliguori
 * likely to succeed.
3548 6d16c2f8 aliguori
 */
3549 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3550 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3551 6d16c2f8 aliguori
                              int is_write)
3552 6d16c2f8 aliguori
{
3553 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3554 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3555 6d16c2f8 aliguori
    int l;
3556 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3557 6d16c2f8 aliguori
    uint8_t *ptr;
3558 c227f099 Anthony Liguori
    target_phys_addr_t page;
3559 6d16c2f8 aliguori
    unsigned long pd;
3560 6d16c2f8 aliguori
    PhysPageDesc *p;
3561 6d16c2f8 aliguori
    unsigned long addr1;
3562 6d16c2f8 aliguori
3563 6d16c2f8 aliguori
    while (len > 0) {
3564 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3565 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3566 6d16c2f8 aliguori
        if (l > len)
3567 6d16c2f8 aliguori
            l = len;
3568 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3569 6d16c2f8 aliguori
        if (!p) {
3570 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3571 6d16c2f8 aliguori
        } else {
3572 6d16c2f8 aliguori
            pd = p->phys_offset;
3573 6d16c2f8 aliguori
        }
3574 6d16c2f8 aliguori
3575 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3576 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3577 6d16c2f8 aliguori
                break;
3578 6d16c2f8 aliguori
            }
3579 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3580 6d16c2f8 aliguori
            bounce.addr = addr;
3581 6d16c2f8 aliguori
            bounce.len = l;
3582 6d16c2f8 aliguori
            if (!is_write) {
3583 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3584 6d16c2f8 aliguori
            }
3585 6d16c2f8 aliguori
            ptr = bounce.buffer;
3586 6d16c2f8 aliguori
        } else {
3587 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3588 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3589 6d16c2f8 aliguori
        }
3590 6d16c2f8 aliguori
        if (!done) {
3591 6d16c2f8 aliguori
            ret = ptr;
3592 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3593 6d16c2f8 aliguori
            break;
3594 6d16c2f8 aliguori
        }
3595 6d16c2f8 aliguori
3596 6d16c2f8 aliguori
        len -= l;
3597 6d16c2f8 aliguori
        addr += l;
3598 6d16c2f8 aliguori
        done += l;
3599 6d16c2f8 aliguori
    }
3600 6d16c2f8 aliguori
    *plen = done;
3601 6d16c2f8 aliguori
    return ret;
3602 6d16c2f8 aliguori
}
3603 6d16c2f8 aliguori
3604 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3605 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3606 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3607 6d16c2f8 aliguori
 */
3608 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3609 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3610 6d16c2f8 aliguori
{
3611 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3612 6d16c2f8 aliguori
        if (is_write) {
3613 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3614 6d16c2f8 aliguori
            while (access_len) {
3615 6d16c2f8 aliguori
                unsigned l;
3616 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3617 6d16c2f8 aliguori
                if (l > access_len)
3618 6d16c2f8 aliguori
                    l = access_len;
3619 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3620 6d16c2f8 aliguori
                    /* invalidate code */
3621 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3622 6d16c2f8 aliguori
                    /* set dirty bit */
3623 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3624 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3625 6d16c2f8 aliguori
                }
3626 6d16c2f8 aliguori
                addr1 += l;
3627 6d16c2f8 aliguori
                access_len -= l;
3628 6d16c2f8 aliguori
            }
3629 6d16c2f8 aliguori
        }
3630 6d16c2f8 aliguori
        return;
3631 6d16c2f8 aliguori
    }
3632 6d16c2f8 aliguori
    if (is_write) {
3633 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3634 6d16c2f8 aliguori
    }
3635 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3636 6d16c2f8 aliguori
    bounce.buffer = NULL;
3637 ba223c29 aliguori
    cpu_notify_map_clients();
3638 6d16c2f8 aliguori
}
3639 d0ecd2aa bellard
3640 8df1cd07 bellard
/* warning: addr must be aligned */
3641 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3642 8df1cd07 bellard
{
3643 8df1cd07 bellard
    int io_index;
3644 8df1cd07 bellard
    uint8_t *ptr;
3645 8df1cd07 bellard
    uint32_t val;
3646 8df1cd07 bellard
    unsigned long pd;
3647 8df1cd07 bellard
    PhysPageDesc *p;
3648 8df1cd07 bellard
3649 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3650 8df1cd07 bellard
    if (!p) {
3651 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3652 8df1cd07 bellard
    } else {
3653 8df1cd07 bellard
        pd = p->phys_offset;
3654 8df1cd07 bellard
    }
3655 3b46e624 ths
3656 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3657 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3658 8df1cd07 bellard
        /* I/O case */
3659 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3660 8da3ff18 pbrook
        if (p)
3661 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3662 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3663 8df1cd07 bellard
    } else {
3664 8df1cd07 bellard
        /* RAM case */
3665 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3666 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3667 8df1cd07 bellard
        val = ldl_p(ptr);
3668 8df1cd07 bellard
    }
3669 8df1cd07 bellard
    return val;
3670 8df1cd07 bellard
}
3671 8df1cd07 bellard
3672 84b7b8e7 bellard
/* warning: addr must be aligned */
3673 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3674 84b7b8e7 bellard
{
3675 84b7b8e7 bellard
    int io_index;
3676 84b7b8e7 bellard
    uint8_t *ptr;
3677 84b7b8e7 bellard
    uint64_t val;
3678 84b7b8e7 bellard
    unsigned long pd;
3679 84b7b8e7 bellard
    PhysPageDesc *p;
3680 84b7b8e7 bellard
3681 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3682 84b7b8e7 bellard
    if (!p) {
3683 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3684 84b7b8e7 bellard
    } else {
3685 84b7b8e7 bellard
        pd = p->phys_offset;
3686 84b7b8e7 bellard
    }
3687 3b46e624 ths
3688 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3689 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3690 84b7b8e7 bellard
        /* I/O case */
3691 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3692 8da3ff18 pbrook
        if (p)
3693 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3694 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3695 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3696 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3697 84b7b8e7 bellard
#else
3698 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3699 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3700 84b7b8e7 bellard
#endif
3701 84b7b8e7 bellard
    } else {
3702 84b7b8e7 bellard
        /* RAM case */
3703 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3704 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3705 84b7b8e7 bellard
        val = ldq_p(ptr);
3706 84b7b8e7 bellard
    }
3707 84b7b8e7 bellard
    return val;
3708 84b7b8e7 bellard
}
3709 84b7b8e7 bellard
3710 aab33094 bellard
/* XXX: optimize */
3711 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3712 aab33094 bellard
{
3713 aab33094 bellard
    uint8_t val;
3714 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3715 aab33094 bellard
    return val;
3716 aab33094 bellard
}
3717 aab33094 bellard
3718 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3719 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3720 aab33094 bellard
{
3721 733f0b02 Michael S. Tsirkin
    int io_index;
3722 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3723 733f0b02 Michael S. Tsirkin
    uint64_t val;
3724 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3725 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3726 733f0b02 Michael S. Tsirkin
3727 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3728 733f0b02 Michael S. Tsirkin
    if (!p) {
3729 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3730 733f0b02 Michael S. Tsirkin
    } else {
3731 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3732 733f0b02 Michael S. Tsirkin
    }
3733 733f0b02 Michael S. Tsirkin
3734 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3735 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
3736 733f0b02 Michael S. Tsirkin
        /* I/O case */
3737 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3738 733f0b02 Michael S. Tsirkin
        if (p)
3739 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3740 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3741 733f0b02 Michael S. Tsirkin
    } else {
3742 733f0b02 Michael S. Tsirkin
        /* RAM case */
3743 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3744 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
3745 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
3746 733f0b02 Michael S. Tsirkin
    }
3747 733f0b02 Michael S. Tsirkin
    return val;
3748 aab33094 bellard
}
3749 aab33094 bellard
3750 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3751 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3752 8df1cd07 bellard
   bits are used to track modified PTEs */
3753 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3754 8df1cd07 bellard
{
3755 8df1cd07 bellard
    int io_index;
3756 8df1cd07 bellard
    uint8_t *ptr;
3757 8df1cd07 bellard
    unsigned long pd;
3758 8df1cd07 bellard
    PhysPageDesc *p;
3759 8df1cd07 bellard
3760 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3761 8df1cd07 bellard
    if (!p) {
3762 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3763 8df1cd07 bellard
    } else {
3764 8df1cd07 bellard
        pd = p->phys_offset;
3765 8df1cd07 bellard
    }
3766 3b46e624 ths
3767 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3768 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3769 8da3ff18 pbrook
        if (p)
3770 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3771 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3772 8df1cd07 bellard
    } else {
3773 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3774 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3775 8df1cd07 bellard
        stl_p(ptr, val);
3776 74576198 aliguori
3777 74576198 aliguori
        if (unlikely(in_migration)) {
3778 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3779 74576198 aliguori
                /* invalidate code */
3780 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3781 74576198 aliguori
                /* set dirty bit */
3782 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3783 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3784 74576198 aliguori
            }
3785 74576198 aliguori
        }
3786 8df1cd07 bellard
    }
3787 8df1cd07 bellard
}
3788 8df1cd07 bellard
3789 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3790 bc98a7ef j_mayer
{
3791 bc98a7ef j_mayer
    int io_index;
3792 bc98a7ef j_mayer
    uint8_t *ptr;
3793 bc98a7ef j_mayer
    unsigned long pd;
3794 bc98a7ef j_mayer
    PhysPageDesc *p;
3795 bc98a7ef j_mayer
3796 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3797 bc98a7ef j_mayer
    if (!p) {
3798 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3799 bc98a7ef j_mayer
    } else {
3800 bc98a7ef j_mayer
        pd = p->phys_offset;
3801 bc98a7ef j_mayer
    }
3802 3b46e624 ths
3803 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3804 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3805 8da3ff18 pbrook
        if (p)
3806 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3807 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3808 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3809 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3810 bc98a7ef j_mayer
#else
3811 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3812 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3813 bc98a7ef j_mayer
#endif
3814 bc98a7ef j_mayer
    } else {
3815 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3816 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3817 bc98a7ef j_mayer
        stq_p(ptr, val);
3818 bc98a7ef j_mayer
    }
3819 bc98a7ef j_mayer
}
3820 bc98a7ef j_mayer
3821 8df1cd07 bellard
/* warning: addr must be aligned */
3822 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3823 8df1cd07 bellard
{
3824 8df1cd07 bellard
    int io_index;
3825 8df1cd07 bellard
    uint8_t *ptr;
3826 8df1cd07 bellard
    unsigned long pd;
3827 8df1cd07 bellard
    PhysPageDesc *p;
3828 8df1cd07 bellard
3829 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3830 8df1cd07 bellard
    if (!p) {
3831 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3832 8df1cd07 bellard
    } else {
3833 8df1cd07 bellard
        pd = p->phys_offset;
3834 8df1cd07 bellard
    }
3835 3b46e624 ths
3836 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3837 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3838 8da3ff18 pbrook
        if (p)
3839 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3840 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3841 8df1cd07 bellard
    } else {
3842 8df1cd07 bellard
        unsigned long addr1;
3843 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3844 8df1cd07 bellard
        /* RAM case */
3845 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3846 8df1cd07 bellard
        stl_p(ptr, val);
3847 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3848 3a7d929e bellard
            /* invalidate code */
3849 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3850 3a7d929e bellard
            /* set dirty bit */
3851 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3852 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3853 3a7d929e bellard
        }
3854 8df1cd07 bellard
    }
3855 8df1cd07 bellard
}
3856 8df1cd07 bellard
3857 aab33094 bellard
/* XXX: optimize */
3858 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3859 aab33094 bellard
{
3860 aab33094 bellard
    uint8_t v = val;
3861 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3862 aab33094 bellard
}
3863 aab33094 bellard
3864 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
3865 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3866 aab33094 bellard
{
3867 733f0b02 Michael S. Tsirkin
    int io_index;
3868 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
3869 733f0b02 Michael S. Tsirkin
    unsigned long pd;
3870 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
3871 733f0b02 Michael S. Tsirkin
3872 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3873 733f0b02 Michael S. Tsirkin
    if (!p) {
3874 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
3875 733f0b02 Michael S. Tsirkin
    } else {
3876 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
3877 733f0b02 Michael S. Tsirkin
    }
3878 733f0b02 Michael S. Tsirkin
3879 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3880 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3881 733f0b02 Michael S. Tsirkin
        if (p)
3882 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3883 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3884 733f0b02 Michael S. Tsirkin
    } else {
3885 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
3886 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3887 733f0b02 Michael S. Tsirkin
        /* RAM case */
3888 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
3889 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
3890 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
3891 733f0b02 Michael S. Tsirkin
            /* invalidate code */
3892 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3893 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
3894 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
3895 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
3896 733f0b02 Michael S. Tsirkin
        }
3897 733f0b02 Michael S. Tsirkin
    }
3898 aab33094 bellard
}
3899 aab33094 bellard
3900 aab33094 bellard
/* XXX: optimize */
3901 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3902 aab33094 bellard
{
3903 aab33094 bellard
    val = tswap64(val);
3904 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3905 aab33094 bellard
}
3906 aab33094 bellard
3907 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3908 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3909 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3910 13eb76e0 bellard
{
3911 13eb76e0 bellard
    int l;
3912 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3913 9b3c35e0 j_mayer
    target_ulong page;
3914 13eb76e0 bellard
3915 13eb76e0 bellard
    while (len > 0) {
3916 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3917 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3918 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3919 13eb76e0 bellard
        if (phys_addr == -1)
3920 13eb76e0 bellard
            return -1;
3921 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3922 13eb76e0 bellard
        if (l > len)
3923 13eb76e0 bellard
            l = len;
3924 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3925 5e2972fd aliguori
        if (is_write)
3926 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3927 5e2972fd aliguori
        else
3928 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3929 13eb76e0 bellard
        len -= l;
3930 13eb76e0 bellard
        buf += l;
3931 13eb76e0 bellard
        addr += l;
3932 13eb76e0 bellard
    }
3933 13eb76e0 bellard
    return 0;
3934 13eb76e0 bellard
}
3935 a68fe89c Paul Brook
#endif
3936 13eb76e0 bellard
3937 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3938 2e70f6ef pbrook
   must be at the end of the TB */
3939 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3940 2e70f6ef pbrook
{
3941 2e70f6ef pbrook
    TranslationBlock *tb;
3942 2e70f6ef pbrook
    uint32_t n, cflags;
3943 2e70f6ef pbrook
    target_ulong pc, cs_base;
3944 2e70f6ef pbrook
    uint64_t flags;
3945 2e70f6ef pbrook
3946 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3947 2e70f6ef pbrook
    if (!tb) {
3948 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3949 2e70f6ef pbrook
                  retaddr);
3950 2e70f6ef pbrook
    }
3951 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3952 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3953 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3954 bf20dc07 ths
       occurred.  */
3955 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3956 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3957 2e70f6ef pbrook
    n++;
3958 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3959 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3960 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3961 2e70f6ef pbrook
       branch.  */
3962 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3963 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3964 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3965 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3966 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3967 2e70f6ef pbrook
    }
3968 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3969 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3970 2e70f6ef pbrook
            && n > 1) {
3971 2e70f6ef pbrook
        env->pc -= 2;
3972 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3973 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3974 2e70f6ef pbrook
    }
3975 2e70f6ef pbrook
#endif
3976 2e70f6ef pbrook
    /* This should never happen.  */
3977 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3978 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3979 2e70f6ef pbrook
3980 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3981 2e70f6ef pbrook
    pc = tb->pc;
3982 2e70f6ef pbrook
    cs_base = tb->cs_base;
3983 2e70f6ef pbrook
    flags = tb->flags;
3984 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3985 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3986 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3987 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3988 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3989 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3990 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3991 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3992 2e70f6ef pbrook
       second new TB.  */
3993 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3994 2e70f6ef pbrook
}
3995 2e70f6ef pbrook
3996 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
3997 b3755a91 Paul Brook
3998 e3db7226 bellard
void dump_exec_info(FILE *f,
3999 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4000 e3db7226 bellard
{
4001 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4002 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4003 e3db7226 bellard
    TranslationBlock *tb;
4004 3b46e624 ths
4005 e3db7226 bellard
    target_code_size = 0;
4006 e3db7226 bellard
    max_target_code_size = 0;
4007 e3db7226 bellard
    cross_page = 0;
4008 e3db7226 bellard
    direct_jmp_count = 0;
4009 e3db7226 bellard
    direct_jmp2_count = 0;
4010 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4011 e3db7226 bellard
        tb = &tbs[i];
4012 e3db7226 bellard
        target_code_size += tb->size;
4013 e3db7226 bellard
        if (tb->size > max_target_code_size)
4014 e3db7226 bellard
            max_target_code_size = tb->size;
4015 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4016 e3db7226 bellard
            cross_page++;
4017 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4018 e3db7226 bellard
            direct_jmp_count++;
4019 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4020 e3db7226 bellard
                direct_jmp2_count++;
4021 e3db7226 bellard
            }
4022 e3db7226 bellard
        }
4023 e3db7226 bellard
    }
4024 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4025 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4026 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
4027 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4028 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4029 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4030 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4031 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4032 e3db7226 bellard
                max_target_code_size);
4033 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4034 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4035 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4036 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4037 5fafdf24 ths
            cross_page,
4038 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4039 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4040 5fafdf24 ths
                direct_jmp_count,
4041 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4042 e3db7226 bellard
                direct_jmp2_count,
4043 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4044 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4045 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4046 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4047 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4048 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4049 e3db7226 bellard
}
4050 e3db7226 bellard
4051 61382a50 bellard
#define MMUSUFFIX _cmmu
4052 61382a50 bellard
#define GETPC() NULL
4053 61382a50 bellard
#define env cpu_single_env
4054 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4055 61382a50 bellard
4056 61382a50 bellard
#define SHIFT 0
4057 61382a50 bellard
#include "softmmu_template.h"
4058 61382a50 bellard
4059 61382a50 bellard
#define SHIFT 1
4060 61382a50 bellard
#include "softmmu_template.h"
4061 61382a50 bellard
4062 61382a50 bellard
#define SHIFT 2
4063 61382a50 bellard
#include "softmmu_template.h"
4064 61382a50 bellard
4065 61382a50 bellard
#define SHIFT 3
4066 61382a50 bellard
#include "softmmu_template.h"
4067 61382a50 bellard
4068 61382a50 bellard
#undef env
4069 61382a50 bellard
4070 61382a50 bellard
#endif