Statistics
| Branch: | Revision:

root / exec.c @ cd19cfa2

History | View | Annotate | Download (129.3 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
37 53a5960a pbrook
#include <qemu.h>
38 fd052bf6 Riku Voipio
#include <signal.h>
39 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 f01576f1 Juergen Lock
#include <sys/param.h>
41 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
42 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
43 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44 f01576f1 Juergen Lock
#include <sys/time.h>
45 f01576f1 Juergen Lock
#include <sys/proc.h>
46 f01576f1 Juergen Lock
#include <machine/profile.h>
47 f01576f1 Juergen Lock
#define _KERNEL
48 f01576f1 Juergen Lock
#include <sys/user.h>
49 f01576f1 Juergen Lock
#undef _KERNEL
50 f01576f1 Juergen Lock
#undef sigqueue
51 f01576f1 Juergen Lock
#include <libutil.h>
52 f01576f1 Juergen Lock
#endif
53 f01576f1 Juergen Lock
#endif
54 53a5960a pbrook
#endif
55 54936004 bellard
56 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
57 66e85a21 bellard
//#define DEBUG_FLUSH
58 9fa3e853 bellard
//#define DEBUG_TLB
59 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
60 fd6ce8f6 bellard
61 fd6ce8f6 bellard
/* make various TB consistency checks */
62 5fafdf24 ths
//#define DEBUG_TB_CHECK
63 5fafdf24 ths
//#define DEBUG_TLB_CHECK
64 fd6ce8f6 bellard
65 1196be37 ths
//#define DEBUG_IOPORT
66 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
67 1196be37 ths
68 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
69 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
70 99773bd4 pbrook
#undef DEBUG_TB_CHECK
71 99773bd4 pbrook
#endif
72 99773bd4 pbrook
73 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
74 9fa3e853 bellard
75 bdaf78e0 blueswir1
static TranslationBlock *tbs;
76 24ab68ac Stefan Weil
static int code_gen_max_blocks;
77 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 bdaf78e0 blueswir1
static int nb_tbs;
79 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
80 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81 fd6ce8f6 bellard
82 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
83 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
85 d03d860b blueswir1
 section close to code segment. */
86 d03d860b blueswir1
#define code_gen_section                                \
87 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
88 d03d860b blueswir1
    __attribute__((aligned (32)))
89 f8e2af11 Stefan Weil
#elif defined(_WIN32)
90 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
91 f8e2af11 Stefan Weil
#define code_gen_section                                \
92 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
93 d03d860b blueswir1
#else
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 d03d860b blueswir1
#endif
97 d03d860b blueswir1
98 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
99 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
100 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
101 26a5f13b bellard
/* threshold to flush the translated code buffer */
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
103 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
104 fd6ce8f6 bellard
105 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
106 9fa3e853 bellard
int phys_ram_fd;
107 74576198 aliguori
static int in_migration;
108 94a6b54f pbrook
109 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110 e2eef170 pbrook
#endif
111 9fa3e853 bellard
112 6a00d601 bellard
CPUState *first_cpu;
113 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
114 6a00d601 bellard
   cpu_exec() */
115 5fafdf24 ths
CPUState *cpu_single_env;
116 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
117 bf20dc07 ths
   1 = Precise instruction counting.
118 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
119 2e70f6ef pbrook
int use_icount = 0;
120 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
121 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
122 2e70f6ef pbrook
int64_t qemu_icount;
123 6a00d601 bellard
124 54936004 bellard
typedef struct PageDesc {
125 92e873b9 bellard
    /* list of TBs intersecting this ram page */
126 fd6ce8f6 bellard
    TranslationBlock *first_tb;
127 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
128 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
129 9fa3e853 bellard
    unsigned int code_write_count;
130 9fa3e853 bellard
    uint8_t *code_bitmap;
131 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
132 9fa3e853 bellard
    unsigned long flags;
133 9fa3e853 bellard
#endif
134 54936004 bellard
} PageDesc;
135 54936004 bellard
136 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
137 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
138 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
139 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141 41c1b1c9 Paul Brook
#else
142 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143 41c1b1c9 Paul Brook
#endif
144 bedb69ea j_mayer
#else
145 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146 bedb69ea j_mayer
#endif
147 54936004 bellard
148 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
149 5cd2c5b6 Richard Henderson
#define L2_BITS 10
150 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
151 54936004 bellard
152 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
153 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
154 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
158 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
159 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161 5cd2c5b6 Richard Henderson
#else
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
163 5cd2c5b6 Richard Henderson
#endif
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167 5cd2c5b6 Richard Henderson
#else
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
169 5cd2c5b6 Richard Henderson
#endif
170 5cd2c5b6 Richard Henderson
171 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173 5cd2c5b6 Richard Henderson
174 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 83fb7adf bellard
unsigned long qemu_real_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_bits;
179 83fb7adf bellard
unsigned long qemu_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_mask;
181 54936004 bellard
182 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
183 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
184 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
185 54936004 bellard
186 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
187 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
188 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
189 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
190 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
191 41c1b1c9 Paul Brook
} PhysPageDesc;
192 41c1b1c9 Paul Brook
193 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
194 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
195 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
196 6d9a1304 Paul Brook
197 e2eef170 pbrook
static void io_mem_init(void);
198 e2eef170 pbrook
199 33417e70 bellard
/* io memory support */
200 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
204 6658ffb8 pbrook
static int io_mem_watch;
205 6658ffb8 pbrook
#endif
206 33417e70 bellard
207 34865134 bellard
/* log support */
208 1e8b27ca Juha Riihimรคki
#ifdef WIN32
209 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
210 1e8b27ca Juha Riihimรคki
#else
211 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
212 1e8b27ca Juha Riihimรคki
#endif
213 34865134 bellard
FILE *logfile;
214 34865134 bellard
int loglevel;
215 e735b91c pbrook
static int log_append = 0;
216 34865134 bellard
217 e3db7226 bellard
/* statistics */
218 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
219 e3db7226 bellard
static int tlb_flush_count;
220 b3755a91 Paul Brook
#endif
221 e3db7226 bellard
static int tb_flush_count;
222 e3db7226 bellard
static int tb_phys_invalidate_count;
223 e3db7226 bellard
224 7cb69cae bellard
#ifdef _WIN32
225 7cb69cae bellard
static void map_exec(void *addr, long size)
226 7cb69cae bellard
{
227 7cb69cae bellard
    DWORD old_protect;
228 7cb69cae bellard
    VirtualProtect(addr, size,
229 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
230 7cb69cae bellard
    
231 7cb69cae bellard
}
232 7cb69cae bellard
#else
233 7cb69cae bellard
static void map_exec(void *addr, long size)
234 7cb69cae bellard
{
235 4369415f bellard
    unsigned long start, end, page_size;
236 7cb69cae bellard
    
237 4369415f bellard
    page_size = getpagesize();
238 7cb69cae bellard
    start = (unsigned long)addr;
239 4369415f bellard
    start &= ~(page_size - 1);
240 7cb69cae bellard
    
241 7cb69cae bellard
    end = (unsigned long)addr + size;
242 4369415f bellard
    end += page_size - 1;
243 4369415f bellard
    end &= ~(page_size - 1);
244 7cb69cae bellard
    
245 7cb69cae bellard
    mprotect((void *)start, end - start,
246 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
247 7cb69cae bellard
}
248 7cb69cae bellard
#endif
249 7cb69cae bellard
250 b346ff46 bellard
static void page_init(void)
251 54936004 bellard
{
252 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
253 54936004 bellard
       TARGET_PAGE_SIZE */
254 c2b48b69 aliguori
#ifdef _WIN32
255 c2b48b69 aliguori
    {
256 c2b48b69 aliguori
        SYSTEM_INFO system_info;
257 c2b48b69 aliguori
258 c2b48b69 aliguori
        GetSystemInfo(&system_info);
259 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
260 c2b48b69 aliguori
    }
261 c2b48b69 aliguori
#else
262 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
263 c2b48b69 aliguori
#endif
264 83fb7adf bellard
    if (qemu_host_page_size == 0)
265 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
266 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
268 83fb7adf bellard
    qemu_host_page_bits = 0;
269 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 83fb7adf bellard
        qemu_host_page_bits++;
271 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272 50a9569b balrog
273 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274 50a9569b balrog
    {
275 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
276 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
277 f01576f1 Juergen Lock
        int i, cnt;
278 f01576f1 Juergen Lock
279 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
280 f01576f1 Juergen Lock
        if (freep) {
281 f01576f1 Juergen Lock
            mmap_lock();
282 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
283 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
286 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
287 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
288 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289 f01576f1 Juergen Lock
290 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
291 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
292 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 f01576f1 Juergen Lock
                    } else {
294 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 f01576f1 Juergen Lock
                        endaddr = ~0ul;
296 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297 f01576f1 Juergen Lock
#endif
298 f01576f1 Juergen Lock
                    }
299 f01576f1 Juergen Lock
                }
300 f01576f1 Juergen Lock
            }
301 f01576f1 Juergen Lock
            free(freep);
302 f01576f1 Juergen Lock
            mmap_unlock();
303 f01576f1 Juergen Lock
        }
304 f01576f1 Juergen Lock
#else
305 50a9569b balrog
        FILE *f;
306 50a9569b balrog
307 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
308 5cd2c5b6 Richard Henderson
309 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
310 50a9569b balrog
        if (f) {
311 5cd2c5b6 Richard Henderson
            mmap_lock();
312 5cd2c5b6 Richard Henderson
313 50a9569b balrog
            do {
314 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
315 5cd2c5b6 Richard Henderson
                int n;
316 5cd2c5b6 Richard Henderson
317 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
320 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321 5cd2c5b6 Richard Henderson
322 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
323 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
324 5cd2c5b6 Richard Henderson
                    } else {
325 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
326 5cd2c5b6 Richard Henderson
                    }
327 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328 50a9569b balrog
                }
329 50a9569b balrog
            } while (!feof(f));
330 5cd2c5b6 Richard Henderson
331 50a9569b balrog
            fclose(f);
332 5cd2c5b6 Richard Henderson
            mmap_unlock();
333 50a9569b balrog
        }
334 f01576f1 Juergen Lock
#endif
335 50a9569b balrog
    }
336 50a9569b balrog
#endif
337 54936004 bellard
}
338 54936004 bellard
339 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340 54936004 bellard
{
341 41c1b1c9 Paul Brook
    PageDesc *pd;
342 41c1b1c9 Paul Brook
    void **lp;
343 41c1b1c9 Paul Brook
    int i;
344 41c1b1c9 Paul Brook
345 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
346 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
348 5cd2c5b6 Richard Henderson
    do {                                                \
349 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351 5cd2c5b6 Richard Henderson
    } while (0)
352 5cd2c5b6 Richard Henderson
#else
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
354 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
355 17e2377a pbrook
#endif
356 434929bf aliguori
357 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
358 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359 5cd2c5b6 Richard Henderson
360 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
361 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 5cd2c5b6 Richard Henderson
        void **p = *lp;
363 5cd2c5b6 Richard Henderson
364 5cd2c5b6 Richard Henderson
        if (p == NULL) {
365 5cd2c5b6 Richard Henderson
            if (!alloc) {
366 5cd2c5b6 Richard Henderson
                return NULL;
367 5cd2c5b6 Richard Henderson
            }
368 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
369 5cd2c5b6 Richard Henderson
            *lp = p;
370 17e2377a pbrook
        }
371 5cd2c5b6 Richard Henderson
372 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 5cd2c5b6 Richard Henderson
    }
374 5cd2c5b6 Richard Henderson
375 5cd2c5b6 Richard Henderson
    pd = *lp;
376 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
377 5cd2c5b6 Richard Henderson
        if (!alloc) {
378 5cd2c5b6 Richard Henderson
            return NULL;
379 5cd2c5b6 Richard Henderson
        }
380 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 5cd2c5b6 Richard Henderson
        *lp = pd;
382 54936004 bellard
    }
383 5cd2c5b6 Richard Henderson
384 5cd2c5b6 Richard Henderson
#undef ALLOC
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
387 54936004 bellard
}
388 54936004 bellard
389 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
390 54936004 bellard
{
391 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
392 fd6ce8f6 bellard
}
393 fd6ce8f6 bellard
394 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
395 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396 92e873b9 bellard
{
397 e3f4e2a4 pbrook
    PhysPageDesc *pd;
398 5cd2c5b6 Richard Henderson
    void **lp;
399 5cd2c5b6 Richard Henderson
    int i;
400 92e873b9 bellard
401 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
402 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403 108c49b8 bellard
404 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
405 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 5cd2c5b6 Richard Henderson
        void **p = *lp;
407 5cd2c5b6 Richard Henderson
        if (p == NULL) {
408 5cd2c5b6 Richard Henderson
            if (!alloc) {
409 5cd2c5b6 Richard Henderson
                return NULL;
410 5cd2c5b6 Richard Henderson
            }
411 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 5cd2c5b6 Richard Henderson
        }
413 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414 108c49b8 bellard
    }
415 5cd2c5b6 Richard Henderson
416 e3f4e2a4 pbrook
    pd = *lp;
417 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
418 e3f4e2a4 pbrook
        int i;
419 5cd2c5b6 Richard Henderson
420 5cd2c5b6 Richard Henderson
        if (!alloc) {
421 108c49b8 bellard
            return NULL;
422 5cd2c5b6 Richard Henderson
        }
423 5cd2c5b6 Richard Henderson
424 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425 5cd2c5b6 Richard Henderson
426 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
427 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429 67c4d23c pbrook
        }
430 92e873b9 bellard
    }
431 5cd2c5b6 Richard Henderson
432 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
433 92e873b9 bellard
}
434 92e873b9 bellard
435 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436 92e873b9 bellard
{
437 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
438 92e873b9 bellard
}
439 92e873b9 bellard
440 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
441 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442 3a7d929e bellard
                                    target_ulong vaddr);
443 c8a706fe pbrook
#define mmap_lock() do { } while(0)
444 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
445 9fa3e853 bellard
#endif
446 fd6ce8f6 bellard
447 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448 4369415f bellard
449 4369415f bellard
#if defined(CONFIG_USER_ONLY)
450 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
451 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
452 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
453 4369415f bellard
#endif
454 4369415f bellard
455 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
456 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
458 4369415f bellard
#endif
459 4369415f bellard
460 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
461 26a5f13b bellard
{
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
464 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
466 4369415f bellard
#else
467 26a5f13b bellard
    code_gen_buffer_size = tb_size;
468 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
469 4369415f bellard
#if defined(CONFIG_USER_ONLY)
470 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
471 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
#else
473 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
474 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475 4369415f bellard
#endif
476 26a5f13b bellard
    }
477 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
480 26a5f13b bellard
       the host cpu and OS */
481 26a5f13b bellard
#if defined(__linux__) 
482 26a5f13b bellard
    {
483 26a5f13b bellard
        int flags;
484 141ac468 blueswir1
        void *start = NULL;
485 141ac468 blueswir1
486 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487 26a5f13b bellard
#if defined(__x86_64__)
488 26a5f13b bellard
        flags |= MAP_32BIT;
489 26a5f13b bellard
        /* Cannot map more than that */
490 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
492 141ac468 blueswir1
#elif defined(__sparc_v9__)
493 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
494 141ac468 blueswir1
        flags |= MAP_FIXED;
495 141ac468 blueswir1
        start = (void *) 0x60000000UL;
496 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
498 1cb0661e balrog
#elif defined(__arm__)
499 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
500 1cb0661e balrog
        flags |= MAP_FIXED;
501 1cb0661e balrog
        start = (void *) 0x01000000UL;
502 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
504 eba0b893 Richard Henderson
#elif defined(__s390x__)
505 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
506 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
507 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 eba0b893 Richard Henderson
        }
510 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
511 26a5f13b bellard
#endif
512 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514 26a5f13b bellard
                               flags, -1, 0);
515 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
516 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 26a5f13b bellard
            exit(1);
518 26a5f13b bellard
        }
519 26a5f13b bellard
    }
520 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
522 06e67a82 aliguori
    {
523 06e67a82 aliguori
        int flags;
524 06e67a82 aliguori
        void *addr = NULL;
525 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
526 06e67a82 aliguori
#if defined(__x86_64__)
527 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 06e67a82 aliguori
         * 0x40000000 is free */
529 06e67a82 aliguori
        flags |= MAP_FIXED;
530 06e67a82 aliguori
        addr = (void *)0x40000000;
531 06e67a82 aliguori
        /* Cannot map more than that */
532 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
533 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
534 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
535 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
536 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
537 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
538 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
540 4cd31ad2 Blue Swirl
        }
541 06e67a82 aliguori
#endif
542 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
544 06e67a82 aliguori
                               flags, -1, 0);
545 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
546 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 06e67a82 aliguori
            exit(1);
548 06e67a82 aliguori
        }
549 06e67a82 aliguori
    }
550 26a5f13b bellard
#else
551 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
552 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
553 26a5f13b bellard
#endif
554 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
557 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
558 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560 26a5f13b bellard
}
561 26a5f13b bellard
562 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
564 26a5f13b bellard
   size. */
565 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
566 26a5f13b bellard
{
567 26a5f13b bellard
    cpu_gen_init();
568 26a5f13b bellard
    code_gen_alloc(tb_size);
569 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
570 4369415f bellard
    page_init();
571 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
572 26a5f13b bellard
    io_mem_init();
573 e2eef170 pbrook
#endif
574 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
576 9002ec79 Richard Henderson
       initialize the prologue now.  */
577 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
578 9002ec79 Richard Henderson
#endif
579 26a5f13b bellard
}
580 26a5f13b bellard
581 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582 9656f324 pbrook
583 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
584 e7f4eff7 Juan Quintela
{
585 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
586 9656f324 pbrook
587 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 3098dba0 aurel32
       version_id is increased. */
589 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
590 9656f324 pbrook
    tlb_flush(env, 1);
591 9656f324 pbrook
592 9656f324 pbrook
    return 0;
593 9656f324 pbrook
}
594 e7f4eff7 Juan Quintela
595 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
596 e7f4eff7 Juan Quintela
    .name = "cpu_common",
597 e7f4eff7 Juan Quintela
    .version_id = 1,
598 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
599 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
600 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
601 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
602 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
603 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
604 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
605 e7f4eff7 Juan Quintela
    }
606 e7f4eff7 Juan Quintela
};
607 9656f324 pbrook
#endif
608 9656f324 pbrook
609 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
610 950f1472 Glauber Costa
{
611 950f1472 Glauber Costa
    CPUState *env = first_cpu;
612 950f1472 Glauber Costa
613 950f1472 Glauber Costa
    while (env) {
614 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
615 950f1472 Glauber Costa
            break;
616 950f1472 Glauber Costa
        env = env->next_cpu;
617 950f1472 Glauber Costa
    }
618 950f1472 Glauber Costa
619 950f1472 Glauber Costa
    return env;
620 950f1472 Glauber Costa
}
621 950f1472 Glauber Costa
622 6a00d601 bellard
void cpu_exec_init(CPUState *env)
623 fd6ce8f6 bellard
{
624 6a00d601 bellard
    CPUState **penv;
625 6a00d601 bellard
    int cpu_index;
626 6a00d601 bellard
627 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
628 c2764719 pbrook
    cpu_list_lock();
629 c2764719 pbrook
#endif
630 6a00d601 bellard
    env->next_cpu = NULL;
631 6a00d601 bellard
    penv = &first_cpu;
632 6a00d601 bellard
    cpu_index = 0;
633 6a00d601 bellard
    while (*penv != NULL) {
634 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
635 6a00d601 bellard
        cpu_index++;
636 6a00d601 bellard
    }
637 6a00d601 bellard
    env->cpu_index = cpu_index;
638 268a362c aliguori
    env->numa_node = 0;
639 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
640 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
641 6a00d601 bellard
    *penv = env;
642 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
643 c2764719 pbrook
    cpu_list_unlock();
644 c2764719 pbrook
#endif
645 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
646 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
647 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
648 b3c7724c pbrook
                    cpu_save, cpu_load, env);
649 b3c7724c pbrook
#endif
650 fd6ce8f6 bellard
}
651 fd6ce8f6 bellard
652 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
653 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
654 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
655 d1a1eb74 Tristan Gingold
{
656 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
657 d1a1eb74 Tristan Gingold
658 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
659 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
660 d1a1eb74 Tristan Gingold
        return NULL;
661 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
662 d1a1eb74 Tristan Gingold
    tb->pc = pc;
663 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
664 d1a1eb74 Tristan Gingold
    return tb;
665 d1a1eb74 Tristan Gingold
}
666 d1a1eb74 Tristan Gingold
667 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
668 d1a1eb74 Tristan Gingold
{
669 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
670 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
671 d1a1eb74 Tristan Gingold
       be the last one generated.  */
672 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
673 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
674 d1a1eb74 Tristan Gingold
        nb_tbs--;
675 d1a1eb74 Tristan Gingold
    }
676 d1a1eb74 Tristan Gingold
}
677 d1a1eb74 Tristan Gingold
678 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
679 9fa3e853 bellard
{
680 9fa3e853 bellard
    if (p->code_bitmap) {
681 59817ccb bellard
        qemu_free(p->code_bitmap);
682 9fa3e853 bellard
        p->code_bitmap = NULL;
683 9fa3e853 bellard
    }
684 9fa3e853 bellard
    p->code_write_count = 0;
685 9fa3e853 bellard
}
686 9fa3e853 bellard
687 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
688 5cd2c5b6 Richard Henderson
689 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
690 fd6ce8f6 bellard
{
691 5cd2c5b6 Richard Henderson
    int i;
692 fd6ce8f6 bellard
693 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
694 5cd2c5b6 Richard Henderson
        return;
695 5cd2c5b6 Richard Henderson
    }
696 5cd2c5b6 Richard Henderson
    if (level == 0) {
697 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
698 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
699 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
700 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
701 fd6ce8f6 bellard
        }
702 5cd2c5b6 Richard Henderson
    } else {
703 5cd2c5b6 Richard Henderson
        void **pp = *lp;
704 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
705 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
706 5cd2c5b6 Richard Henderson
        }
707 5cd2c5b6 Richard Henderson
    }
708 5cd2c5b6 Richard Henderson
}
709 5cd2c5b6 Richard Henderson
710 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
711 5cd2c5b6 Richard Henderson
{
712 5cd2c5b6 Richard Henderson
    int i;
713 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
714 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
715 fd6ce8f6 bellard
    }
716 fd6ce8f6 bellard
}
717 fd6ce8f6 bellard
718 fd6ce8f6 bellard
/* flush all the translation blocks */
719 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
720 6a00d601 bellard
void tb_flush(CPUState *env1)
721 fd6ce8f6 bellard
{
722 6a00d601 bellard
    CPUState *env;
723 0124311e bellard
#if defined(DEBUG_FLUSH)
724 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
726 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
727 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
728 fd6ce8f6 bellard
#endif
729 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
730 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
731 a208e54a pbrook
732 fd6ce8f6 bellard
    nb_tbs = 0;
733 3b46e624 ths
734 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
735 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
736 6a00d601 bellard
    }
737 9fa3e853 bellard
738 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
739 fd6ce8f6 bellard
    page_flush_tb();
740 9fa3e853 bellard
741 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
742 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
743 d4e8164f bellard
       expensive */
744 e3db7226 bellard
    tb_flush_count++;
745 fd6ce8f6 bellard
}
746 fd6ce8f6 bellard
747 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
748 fd6ce8f6 bellard
749 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
750 fd6ce8f6 bellard
{
751 fd6ce8f6 bellard
    TranslationBlock *tb;
752 fd6ce8f6 bellard
    int i;
753 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
754 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
755 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
756 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
757 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
758 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
759 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
760 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
761 fd6ce8f6 bellard
            }
762 fd6ce8f6 bellard
        }
763 fd6ce8f6 bellard
    }
764 fd6ce8f6 bellard
}
765 fd6ce8f6 bellard
766 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
767 fd6ce8f6 bellard
static void tb_page_check(void)
768 fd6ce8f6 bellard
{
769 fd6ce8f6 bellard
    TranslationBlock *tb;
770 fd6ce8f6 bellard
    int i, flags1, flags2;
771 3b46e624 ths
772 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
774 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
775 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
776 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
777 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
778 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
779 fd6ce8f6 bellard
            }
780 fd6ce8f6 bellard
        }
781 fd6ce8f6 bellard
    }
782 fd6ce8f6 bellard
}
783 fd6ce8f6 bellard
784 fd6ce8f6 bellard
#endif
785 fd6ce8f6 bellard
786 fd6ce8f6 bellard
/* invalidate one TB */
787 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
788 fd6ce8f6 bellard
                             int next_offset)
789 fd6ce8f6 bellard
{
790 fd6ce8f6 bellard
    TranslationBlock *tb1;
791 fd6ce8f6 bellard
    for(;;) {
792 fd6ce8f6 bellard
        tb1 = *ptb;
793 fd6ce8f6 bellard
        if (tb1 == tb) {
794 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
795 fd6ce8f6 bellard
            break;
796 fd6ce8f6 bellard
        }
797 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
798 fd6ce8f6 bellard
    }
799 fd6ce8f6 bellard
}
800 fd6ce8f6 bellard
801 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
802 9fa3e853 bellard
{
803 9fa3e853 bellard
    TranslationBlock *tb1;
804 9fa3e853 bellard
    unsigned int n1;
805 9fa3e853 bellard
806 9fa3e853 bellard
    for(;;) {
807 9fa3e853 bellard
        tb1 = *ptb;
808 9fa3e853 bellard
        n1 = (long)tb1 & 3;
809 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810 9fa3e853 bellard
        if (tb1 == tb) {
811 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
812 9fa3e853 bellard
            break;
813 9fa3e853 bellard
        }
814 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
815 9fa3e853 bellard
    }
816 9fa3e853 bellard
}
817 9fa3e853 bellard
818 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
819 d4e8164f bellard
{
820 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
821 d4e8164f bellard
    unsigned int n1;
822 d4e8164f bellard
823 d4e8164f bellard
    ptb = &tb->jmp_next[n];
824 d4e8164f bellard
    tb1 = *ptb;
825 d4e8164f bellard
    if (tb1) {
826 d4e8164f bellard
        /* find tb(n) in circular list */
827 d4e8164f bellard
        for(;;) {
828 d4e8164f bellard
            tb1 = *ptb;
829 d4e8164f bellard
            n1 = (long)tb1 & 3;
830 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
831 d4e8164f bellard
            if (n1 == n && tb1 == tb)
832 d4e8164f bellard
                break;
833 d4e8164f bellard
            if (n1 == 2) {
834 d4e8164f bellard
                ptb = &tb1->jmp_first;
835 d4e8164f bellard
            } else {
836 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
837 d4e8164f bellard
            }
838 d4e8164f bellard
        }
839 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
840 d4e8164f bellard
        *ptb = tb->jmp_next[n];
841 d4e8164f bellard
842 d4e8164f bellard
        tb->jmp_next[n] = NULL;
843 d4e8164f bellard
    }
844 d4e8164f bellard
}
845 d4e8164f bellard
846 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
847 d4e8164f bellard
   another TB */
848 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
849 d4e8164f bellard
{
850 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
851 d4e8164f bellard
}
852 d4e8164f bellard
853 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
854 fd6ce8f6 bellard
{
855 6a00d601 bellard
    CPUState *env;
856 8a40a180 bellard
    PageDesc *p;
857 d4e8164f bellard
    unsigned int h, n1;
858 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
859 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
860 3b46e624 ths
861 8a40a180 bellard
    /* remove the TB from the hash list */
862 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
863 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
864 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
865 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
866 8a40a180 bellard
867 8a40a180 bellard
    /* remove the TB from the page list */
868 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
869 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
870 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
871 8a40a180 bellard
        invalidate_page_bitmap(p);
872 8a40a180 bellard
    }
873 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
874 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
875 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
876 8a40a180 bellard
        invalidate_page_bitmap(p);
877 8a40a180 bellard
    }
878 8a40a180 bellard
879 36bdbe54 bellard
    tb_invalidated_flag = 1;
880 59817ccb bellard
881 fd6ce8f6 bellard
    /* remove the TB from the hash list */
882 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
883 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
884 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
885 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
886 6a00d601 bellard
    }
887 d4e8164f bellard
888 d4e8164f bellard
    /* suppress this TB from the two jump lists */
889 d4e8164f bellard
    tb_jmp_remove(tb, 0);
890 d4e8164f bellard
    tb_jmp_remove(tb, 1);
891 d4e8164f bellard
892 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
893 d4e8164f bellard
    tb1 = tb->jmp_first;
894 d4e8164f bellard
    for(;;) {
895 d4e8164f bellard
        n1 = (long)tb1 & 3;
896 d4e8164f bellard
        if (n1 == 2)
897 d4e8164f bellard
            break;
898 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
899 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
900 d4e8164f bellard
        tb_reset_jump(tb1, n1);
901 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
902 d4e8164f bellard
        tb1 = tb2;
903 d4e8164f bellard
    }
904 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
905 9fa3e853 bellard
906 e3db7226 bellard
    tb_phys_invalidate_count++;
907 9fa3e853 bellard
}
908 9fa3e853 bellard
909 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
910 9fa3e853 bellard
{
911 9fa3e853 bellard
    int end, mask, end1;
912 9fa3e853 bellard
913 9fa3e853 bellard
    end = start + len;
914 9fa3e853 bellard
    tab += start >> 3;
915 9fa3e853 bellard
    mask = 0xff << (start & 7);
916 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
917 9fa3e853 bellard
        if (start < end) {
918 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
919 9fa3e853 bellard
            *tab |= mask;
920 9fa3e853 bellard
        }
921 9fa3e853 bellard
    } else {
922 9fa3e853 bellard
        *tab++ |= mask;
923 9fa3e853 bellard
        start = (start + 8) & ~7;
924 9fa3e853 bellard
        end1 = end & ~7;
925 9fa3e853 bellard
        while (start < end1) {
926 9fa3e853 bellard
            *tab++ = 0xff;
927 9fa3e853 bellard
            start += 8;
928 9fa3e853 bellard
        }
929 9fa3e853 bellard
        if (start < end) {
930 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
931 9fa3e853 bellard
            *tab |= mask;
932 9fa3e853 bellard
        }
933 9fa3e853 bellard
    }
934 9fa3e853 bellard
}
935 9fa3e853 bellard
936 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
937 9fa3e853 bellard
{
938 9fa3e853 bellard
    int n, tb_start, tb_end;
939 9fa3e853 bellard
    TranslationBlock *tb;
940 3b46e624 ths
941 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
942 9fa3e853 bellard
943 9fa3e853 bellard
    tb = p->first_tb;
944 9fa3e853 bellard
    while (tb != NULL) {
945 9fa3e853 bellard
        n = (long)tb & 3;
946 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
947 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
948 9fa3e853 bellard
        if (n == 0) {
949 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
950 9fa3e853 bellard
               it is not a problem */
951 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
952 9fa3e853 bellard
            tb_end = tb_start + tb->size;
953 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
954 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
955 9fa3e853 bellard
        } else {
956 9fa3e853 bellard
            tb_start = 0;
957 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
958 9fa3e853 bellard
        }
959 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
960 9fa3e853 bellard
        tb = tb->page_next[n];
961 9fa3e853 bellard
    }
962 9fa3e853 bellard
}
963 9fa3e853 bellard
964 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
965 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
966 2e70f6ef pbrook
                              int flags, int cflags)
967 d720b93d bellard
{
968 d720b93d bellard
    TranslationBlock *tb;
969 d720b93d bellard
    uint8_t *tc_ptr;
970 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
971 41c1b1c9 Paul Brook
    target_ulong virt_page2;
972 d720b93d bellard
    int code_gen_size;
973 d720b93d bellard
974 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
975 c27004ec bellard
    tb = tb_alloc(pc);
976 d720b93d bellard
    if (!tb) {
977 d720b93d bellard
        /* flush must be done */
978 d720b93d bellard
        tb_flush(env);
979 d720b93d bellard
        /* cannot fail at this point */
980 c27004ec bellard
        tb = tb_alloc(pc);
981 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
982 2e70f6ef pbrook
        tb_invalidated_flag = 1;
983 d720b93d bellard
    }
984 d720b93d bellard
    tc_ptr = code_gen_ptr;
985 d720b93d bellard
    tb->tc_ptr = tc_ptr;
986 d720b93d bellard
    tb->cs_base = cs_base;
987 d720b93d bellard
    tb->flags = flags;
988 d720b93d bellard
    tb->cflags = cflags;
989 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
990 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
991 3b46e624 ths
992 d720b93d bellard
    /* check next page if needed */
993 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
994 d720b93d bellard
    phys_page2 = -1;
995 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
996 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
997 d720b93d bellard
    }
998 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
999 2e70f6ef pbrook
    return tb;
1000 d720b93d bellard
}
1001 3b46e624 ths
1002 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1003 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1004 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1005 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1006 d720b93d bellard
   TB if code is modified inside this TB. */
1007 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1008 d720b93d bellard
                                   int is_cpu_write_access)
1009 d720b93d bellard
{
1010 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1011 d720b93d bellard
    CPUState *env = cpu_single_env;
1012 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1013 6b917547 aliguori
    PageDesc *p;
1014 6b917547 aliguori
    int n;
1015 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1016 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1017 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1018 6b917547 aliguori
    int current_tb_modified = 0;
1019 6b917547 aliguori
    target_ulong current_pc = 0;
1020 6b917547 aliguori
    target_ulong current_cs_base = 0;
1021 6b917547 aliguori
    int current_flags = 0;
1022 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1023 9fa3e853 bellard
1024 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1025 5fafdf24 ths
    if (!p)
1026 9fa3e853 bellard
        return;
1027 5fafdf24 ths
    if (!p->code_bitmap &&
1028 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1029 d720b93d bellard
        is_cpu_write_access) {
1030 9fa3e853 bellard
        /* build code bitmap */
1031 9fa3e853 bellard
        build_page_bitmap(p);
1032 9fa3e853 bellard
    }
1033 9fa3e853 bellard
1034 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1035 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1036 9fa3e853 bellard
    tb = p->first_tb;
1037 9fa3e853 bellard
    while (tb != NULL) {
1038 9fa3e853 bellard
        n = (long)tb & 3;
1039 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1040 9fa3e853 bellard
        tb_next = tb->page_next[n];
1041 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1042 9fa3e853 bellard
        if (n == 0) {
1043 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1044 9fa3e853 bellard
               it is not a problem */
1045 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1046 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1047 9fa3e853 bellard
        } else {
1048 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1049 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1050 9fa3e853 bellard
        }
1051 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1052 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1053 d720b93d bellard
            if (current_tb_not_found) {
1054 d720b93d bellard
                current_tb_not_found = 0;
1055 d720b93d bellard
                current_tb = NULL;
1056 2e70f6ef pbrook
                if (env->mem_io_pc) {
1057 d720b93d bellard
                    /* now we have a real cpu fault */
1058 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1059 d720b93d bellard
                }
1060 d720b93d bellard
            }
1061 d720b93d bellard
            if (current_tb == tb &&
1062 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1063 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1064 d720b93d bellard
                its execution. We could be more precise by checking
1065 d720b93d bellard
                that the modification is after the current PC, but it
1066 d720b93d bellard
                would require a specialized function to partially
1067 d720b93d bellard
                restore the CPU state */
1068 3b46e624 ths
1069 d720b93d bellard
                current_tb_modified = 1;
1070 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1071 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1072 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 6b917547 aliguori
                                     &current_flags);
1074 d720b93d bellard
            }
1075 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1076 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1077 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1078 6f5a9f7e bellard
            saved_tb = NULL;
1079 6f5a9f7e bellard
            if (env) {
1080 6f5a9f7e bellard
                saved_tb = env->current_tb;
1081 6f5a9f7e bellard
                env->current_tb = NULL;
1082 6f5a9f7e bellard
            }
1083 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1084 6f5a9f7e bellard
            if (env) {
1085 6f5a9f7e bellard
                env->current_tb = saved_tb;
1086 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1087 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1088 6f5a9f7e bellard
            }
1089 9fa3e853 bellard
        }
1090 9fa3e853 bellard
        tb = tb_next;
1091 9fa3e853 bellard
    }
1092 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1093 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1094 9fa3e853 bellard
    if (!p->first_tb) {
1095 9fa3e853 bellard
        invalidate_page_bitmap(p);
1096 d720b93d bellard
        if (is_cpu_write_access) {
1097 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1098 d720b93d bellard
        }
1099 d720b93d bellard
    }
1100 d720b93d bellard
#endif
1101 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1102 d720b93d bellard
    if (current_tb_modified) {
1103 d720b93d bellard
        /* we generate a block containing just the instruction
1104 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1105 d720b93d bellard
           itself */
1106 ea1c1802 bellard
        env->current_tb = NULL;
1107 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1108 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1109 9fa3e853 bellard
    }
1110 fd6ce8f6 bellard
#endif
1111 9fa3e853 bellard
}
1112 fd6ce8f6 bellard
1113 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1114 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1115 9fa3e853 bellard
{
1116 9fa3e853 bellard
    PageDesc *p;
1117 9fa3e853 bellard
    int offset, b;
1118 59817ccb bellard
#if 0
1119 a4193c8a bellard
    if (1) {
1120 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1121 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1122 93fcfe39 aliguori
                  cpu_single_env->eip,
1123 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1124 59817ccb bellard
    }
1125 59817ccb bellard
#endif
1126 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1127 5fafdf24 ths
    if (!p)
1128 9fa3e853 bellard
        return;
1129 9fa3e853 bellard
    if (p->code_bitmap) {
1130 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1131 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1132 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1133 9fa3e853 bellard
            goto do_invalidate;
1134 9fa3e853 bellard
    } else {
1135 9fa3e853 bellard
    do_invalidate:
1136 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1137 9fa3e853 bellard
    }
1138 9fa3e853 bellard
}
1139 9fa3e853 bellard
1140 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1141 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1142 d720b93d bellard
                                    unsigned long pc, void *puc)
1143 9fa3e853 bellard
{
1144 6b917547 aliguori
    TranslationBlock *tb;
1145 9fa3e853 bellard
    PageDesc *p;
1146 6b917547 aliguori
    int n;
1147 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1148 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1149 d720b93d bellard
    CPUState *env = cpu_single_env;
1150 6b917547 aliguori
    int current_tb_modified = 0;
1151 6b917547 aliguori
    target_ulong current_pc = 0;
1152 6b917547 aliguori
    target_ulong current_cs_base = 0;
1153 6b917547 aliguori
    int current_flags = 0;
1154 d720b93d bellard
#endif
1155 9fa3e853 bellard
1156 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1157 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1158 5fafdf24 ths
    if (!p)
1159 9fa3e853 bellard
        return;
1160 9fa3e853 bellard
    tb = p->first_tb;
1161 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1162 d720b93d bellard
    if (tb && pc != 0) {
1163 d720b93d bellard
        current_tb = tb_find_pc(pc);
1164 d720b93d bellard
    }
1165 d720b93d bellard
#endif
1166 9fa3e853 bellard
    while (tb != NULL) {
1167 9fa3e853 bellard
        n = (long)tb & 3;
1168 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1169 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1170 d720b93d bellard
        if (current_tb == tb &&
1171 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1172 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1173 d720b93d bellard
                   its execution. We could be more precise by checking
1174 d720b93d bellard
                   that the modification is after the current PC, but it
1175 d720b93d bellard
                   would require a specialized function to partially
1176 d720b93d bellard
                   restore the CPU state */
1177 3b46e624 ths
1178 d720b93d bellard
            current_tb_modified = 1;
1179 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1180 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1181 6b917547 aliguori
                                 &current_flags);
1182 d720b93d bellard
        }
1183 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1184 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1185 9fa3e853 bellard
        tb = tb->page_next[n];
1186 9fa3e853 bellard
    }
1187 fd6ce8f6 bellard
    p->first_tb = NULL;
1188 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1189 d720b93d bellard
    if (current_tb_modified) {
1190 d720b93d bellard
        /* we generate a block containing just the instruction
1191 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1192 d720b93d bellard
           itself */
1193 ea1c1802 bellard
        env->current_tb = NULL;
1194 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1195 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1196 d720b93d bellard
    }
1197 d720b93d bellard
#endif
1198 fd6ce8f6 bellard
}
1199 9fa3e853 bellard
#endif
1200 fd6ce8f6 bellard
1201 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1202 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1203 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1204 fd6ce8f6 bellard
{
1205 fd6ce8f6 bellard
    PageDesc *p;
1206 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1207 9fa3e853 bellard
1208 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1209 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1210 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1211 9fa3e853 bellard
    last_first_tb = p->first_tb;
1212 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1213 9fa3e853 bellard
    invalidate_page_bitmap(p);
1214 fd6ce8f6 bellard
1215 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1216 d720b93d bellard
1217 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1218 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1219 53a5960a pbrook
        target_ulong addr;
1220 53a5960a pbrook
        PageDesc *p2;
1221 9fa3e853 bellard
        int prot;
1222 9fa3e853 bellard
1223 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1224 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1225 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1226 fd6ce8f6 bellard
        prot = 0;
1227 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1228 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1229 53a5960a pbrook
1230 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1231 53a5960a pbrook
            if (!p2)
1232 53a5960a pbrook
                continue;
1233 53a5960a pbrook
            prot |= p2->flags;
1234 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1235 53a5960a pbrook
          }
1236 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1237 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1238 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1239 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1240 53a5960a pbrook
               page_addr);
1241 fd6ce8f6 bellard
#endif
1242 fd6ce8f6 bellard
    }
1243 9fa3e853 bellard
#else
1244 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1245 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1246 9fa3e853 bellard
       allocated in a physical page */
1247 9fa3e853 bellard
    if (!last_first_tb) {
1248 6a00d601 bellard
        tlb_protect_code(page_addr);
1249 9fa3e853 bellard
    }
1250 9fa3e853 bellard
#endif
1251 d720b93d bellard
1252 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1253 fd6ce8f6 bellard
}
1254 fd6ce8f6 bellard
1255 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1256 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1257 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1258 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1259 d4e8164f bellard
{
1260 9fa3e853 bellard
    unsigned int h;
1261 9fa3e853 bellard
    TranslationBlock **ptb;
1262 9fa3e853 bellard
1263 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1264 c8a706fe pbrook
       before we are done.  */
1265 c8a706fe pbrook
    mmap_lock();
1266 9fa3e853 bellard
    /* add in the physical hash table */
1267 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1268 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1269 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1270 9fa3e853 bellard
    *ptb = tb;
1271 fd6ce8f6 bellard
1272 fd6ce8f6 bellard
    /* add in the page list */
1273 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1274 9fa3e853 bellard
    if (phys_page2 != -1)
1275 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1276 9fa3e853 bellard
    else
1277 9fa3e853 bellard
        tb->page_addr[1] = -1;
1278 9fa3e853 bellard
1279 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1280 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1281 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1282 d4e8164f bellard
1283 d4e8164f bellard
    /* init original jump addresses */
1284 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1285 d4e8164f bellard
        tb_reset_jump(tb, 0);
1286 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1287 d4e8164f bellard
        tb_reset_jump(tb, 1);
1288 8a40a180 bellard
1289 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1290 8a40a180 bellard
    tb_page_check();
1291 8a40a180 bellard
#endif
1292 c8a706fe pbrook
    mmap_unlock();
1293 fd6ce8f6 bellard
}
1294 fd6ce8f6 bellard
1295 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1296 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1297 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1298 fd6ce8f6 bellard
{
1299 9fa3e853 bellard
    int m_min, m_max, m;
1300 9fa3e853 bellard
    unsigned long v;
1301 9fa3e853 bellard
    TranslationBlock *tb;
1302 a513fe19 bellard
1303 a513fe19 bellard
    if (nb_tbs <= 0)
1304 a513fe19 bellard
        return NULL;
1305 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1306 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1307 a513fe19 bellard
        return NULL;
1308 a513fe19 bellard
    /* binary search (cf Knuth) */
1309 a513fe19 bellard
    m_min = 0;
1310 a513fe19 bellard
    m_max = nb_tbs - 1;
1311 a513fe19 bellard
    while (m_min <= m_max) {
1312 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1313 a513fe19 bellard
        tb = &tbs[m];
1314 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1315 a513fe19 bellard
        if (v == tc_ptr)
1316 a513fe19 bellard
            return tb;
1317 a513fe19 bellard
        else if (tc_ptr < v) {
1318 a513fe19 bellard
            m_max = m - 1;
1319 a513fe19 bellard
        } else {
1320 a513fe19 bellard
            m_min = m + 1;
1321 a513fe19 bellard
        }
1322 5fafdf24 ths
    }
1323 a513fe19 bellard
    return &tbs[m_max];
1324 a513fe19 bellard
}
1325 7501267e bellard
1326 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1327 ea041c0e bellard
1328 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1329 ea041c0e bellard
{
1330 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1331 ea041c0e bellard
    unsigned int n1;
1332 ea041c0e bellard
1333 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1334 ea041c0e bellard
    if (tb1 != NULL) {
1335 ea041c0e bellard
        /* find head of list */
1336 ea041c0e bellard
        for(;;) {
1337 ea041c0e bellard
            n1 = (long)tb1 & 3;
1338 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1339 ea041c0e bellard
            if (n1 == 2)
1340 ea041c0e bellard
                break;
1341 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1342 ea041c0e bellard
        }
1343 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1344 ea041c0e bellard
        tb_next = tb1;
1345 ea041c0e bellard
1346 ea041c0e bellard
        /* remove tb from the jmp_first list */
1347 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1348 ea041c0e bellard
        for(;;) {
1349 ea041c0e bellard
            tb1 = *ptb;
1350 ea041c0e bellard
            n1 = (long)tb1 & 3;
1351 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1352 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1353 ea041c0e bellard
                break;
1354 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1355 ea041c0e bellard
        }
1356 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1357 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1358 3b46e624 ths
1359 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1360 ea041c0e bellard
        tb_reset_jump(tb, n);
1361 ea041c0e bellard
1362 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1363 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1364 ea041c0e bellard
    }
1365 ea041c0e bellard
}
1366 ea041c0e bellard
1367 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1368 ea041c0e bellard
{
1369 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1370 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1371 ea041c0e bellard
}
1372 ea041c0e bellard
1373 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1374 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1375 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1376 94df27fd Paul Brook
{
1377 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1378 94df27fd Paul Brook
}
1379 94df27fd Paul Brook
#else
1380 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381 d720b93d bellard
{
1382 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1383 9b3c35e0 j_mayer
    target_ulong pd;
1384 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1385 c2f07f81 pbrook
    PhysPageDesc *p;
1386 d720b93d bellard
1387 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1388 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1389 c2f07f81 pbrook
    if (!p) {
1390 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1391 c2f07f81 pbrook
    } else {
1392 c2f07f81 pbrook
        pd = p->phys_offset;
1393 c2f07f81 pbrook
    }
1394 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1395 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1396 d720b93d bellard
}
1397 c27004ec bellard
#endif
1398 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1399 d720b93d bellard
1400 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1401 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1402 c527ee8f Paul Brook
1403 c527ee8f Paul Brook
{
1404 c527ee8f Paul Brook
}
1405 c527ee8f Paul Brook
1406 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1407 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1408 c527ee8f Paul Brook
{
1409 c527ee8f Paul Brook
    return -ENOSYS;
1410 c527ee8f Paul Brook
}
1411 c527ee8f Paul Brook
#else
1412 6658ffb8 pbrook
/* Add a watchpoint.  */
1413 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1414 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1415 6658ffb8 pbrook
{
1416 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1417 c0ce998e aliguori
    CPUWatchpoint *wp;
1418 6658ffb8 pbrook
1419 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1420 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1421 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1422 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1423 b4051334 aliguori
        return -EINVAL;
1424 b4051334 aliguori
    }
1425 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1426 a1d1bb31 aliguori
1427 a1d1bb31 aliguori
    wp->vaddr = addr;
1428 b4051334 aliguori
    wp->len_mask = len_mask;
1429 a1d1bb31 aliguori
    wp->flags = flags;
1430 a1d1bb31 aliguori
1431 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1432 c0ce998e aliguori
    if (flags & BP_GDB)
1433 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1434 c0ce998e aliguori
    else
1435 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1436 6658ffb8 pbrook
1437 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1438 a1d1bb31 aliguori
1439 a1d1bb31 aliguori
    if (watchpoint)
1440 a1d1bb31 aliguori
        *watchpoint = wp;
1441 a1d1bb31 aliguori
    return 0;
1442 6658ffb8 pbrook
}
1443 6658ffb8 pbrook
1444 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1445 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1446 a1d1bb31 aliguori
                          int flags)
1447 6658ffb8 pbrook
{
1448 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1449 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1450 6658ffb8 pbrook
1451 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1452 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1453 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1454 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1455 6658ffb8 pbrook
            return 0;
1456 6658ffb8 pbrook
        }
1457 6658ffb8 pbrook
    }
1458 a1d1bb31 aliguori
    return -ENOENT;
1459 6658ffb8 pbrook
}
1460 6658ffb8 pbrook
1461 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1462 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1463 a1d1bb31 aliguori
{
1464 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1465 7d03f82f edgar_igl
1466 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1467 a1d1bb31 aliguori
1468 a1d1bb31 aliguori
    qemu_free(watchpoint);
1469 a1d1bb31 aliguori
}
1470 a1d1bb31 aliguori
1471 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1472 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1473 a1d1bb31 aliguori
{
1474 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1475 a1d1bb31 aliguori
1476 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1477 a1d1bb31 aliguori
        if (wp->flags & mask)
1478 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1479 c0ce998e aliguori
    }
1480 7d03f82f edgar_igl
}
1481 c527ee8f Paul Brook
#endif
1482 7d03f82f edgar_igl
1483 a1d1bb31 aliguori
/* Add a breakpoint.  */
1484 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1485 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1486 4c3a88a2 bellard
{
1487 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1488 c0ce998e aliguori
    CPUBreakpoint *bp;
1489 3b46e624 ths
1490 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1491 4c3a88a2 bellard
1492 a1d1bb31 aliguori
    bp->pc = pc;
1493 a1d1bb31 aliguori
    bp->flags = flags;
1494 a1d1bb31 aliguori
1495 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1496 c0ce998e aliguori
    if (flags & BP_GDB)
1497 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1498 c0ce998e aliguori
    else
1499 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1500 3b46e624 ths
1501 d720b93d bellard
    breakpoint_invalidate(env, pc);
1502 a1d1bb31 aliguori
1503 a1d1bb31 aliguori
    if (breakpoint)
1504 a1d1bb31 aliguori
        *breakpoint = bp;
1505 4c3a88a2 bellard
    return 0;
1506 4c3a88a2 bellard
#else
1507 a1d1bb31 aliguori
    return -ENOSYS;
1508 4c3a88a2 bellard
#endif
1509 4c3a88a2 bellard
}
1510 4c3a88a2 bellard
1511 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1512 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1513 a1d1bb31 aliguori
{
1514 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1515 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1516 a1d1bb31 aliguori
1517 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1518 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1519 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1520 a1d1bb31 aliguori
            return 0;
1521 a1d1bb31 aliguori
        }
1522 7d03f82f edgar_igl
    }
1523 a1d1bb31 aliguori
    return -ENOENT;
1524 a1d1bb31 aliguori
#else
1525 a1d1bb31 aliguori
    return -ENOSYS;
1526 7d03f82f edgar_igl
#endif
1527 7d03f82f edgar_igl
}
1528 7d03f82f edgar_igl
1529 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1530 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1531 4c3a88a2 bellard
{
1532 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1533 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1534 d720b93d bellard
1535 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1536 a1d1bb31 aliguori
1537 a1d1bb31 aliguori
    qemu_free(breakpoint);
1538 a1d1bb31 aliguori
#endif
1539 a1d1bb31 aliguori
}
1540 a1d1bb31 aliguori
1541 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1542 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1543 a1d1bb31 aliguori
{
1544 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1545 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1546 a1d1bb31 aliguori
1547 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1548 a1d1bb31 aliguori
        if (bp->flags & mask)
1549 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1550 c0ce998e aliguori
    }
1551 4c3a88a2 bellard
#endif
1552 4c3a88a2 bellard
}
1553 4c3a88a2 bellard
1554 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1555 c33a346e bellard
   CPU loop after each instruction */
1556 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1557 c33a346e bellard
{
1558 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1559 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1560 c33a346e bellard
        env->singlestep_enabled = enabled;
1561 e22a25c9 aliguori
        if (kvm_enabled())
1562 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1563 e22a25c9 aliguori
        else {
1564 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1565 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1566 e22a25c9 aliguori
            tb_flush(env);
1567 e22a25c9 aliguori
        }
1568 c33a346e bellard
    }
1569 c33a346e bellard
#endif
1570 c33a346e bellard
}
1571 c33a346e bellard
1572 34865134 bellard
/* enable or disable low levels log */
1573 34865134 bellard
void cpu_set_log(int log_flags)
1574 34865134 bellard
{
1575 34865134 bellard
    loglevel = log_flags;
1576 34865134 bellard
    if (loglevel && !logfile) {
1577 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1578 34865134 bellard
        if (!logfile) {
1579 34865134 bellard
            perror(logfilename);
1580 34865134 bellard
            _exit(1);
1581 34865134 bellard
        }
1582 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1583 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1584 9fa3e853 bellard
        {
1585 b55266b5 blueswir1
            static char logfile_buf[4096];
1586 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1587 9fa3e853 bellard
        }
1588 bf65f53f Filip Navara
#elif !defined(_WIN32)
1589 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1590 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1591 9fa3e853 bellard
#endif
1592 e735b91c pbrook
        log_append = 1;
1593 e735b91c pbrook
    }
1594 e735b91c pbrook
    if (!loglevel && logfile) {
1595 e735b91c pbrook
        fclose(logfile);
1596 e735b91c pbrook
        logfile = NULL;
1597 34865134 bellard
    }
1598 34865134 bellard
}
1599 34865134 bellard
1600 34865134 bellard
void cpu_set_log_filename(const char *filename)
1601 34865134 bellard
{
1602 34865134 bellard
    logfilename = strdup(filename);
1603 e735b91c pbrook
    if (logfile) {
1604 e735b91c pbrook
        fclose(logfile);
1605 e735b91c pbrook
        logfile = NULL;
1606 e735b91c pbrook
    }
1607 e735b91c pbrook
    cpu_set_log(loglevel);
1608 34865134 bellard
}
1609 c33a346e bellard
1610 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1611 ea041c0e bellard
{
1612 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1613 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1614 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1615 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1616 ea041c0e bellard
    TranslationBlock *tb;
1617 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1618 59817ccb bellard
1619 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1620 3098dba0 aurel32
    tb = env->current_tb;
1621 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1622 3098dba0 aurel32
       all the potentially executing TB */
1623 f76cfe56 Riku Voipio
    if (tb) {
1624 3098dba0 aurel32
        env->current_tb = NULL;
1625 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1626 be214e6c aurel32
    }
1627 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1628 3098dba0 aurel32
}
1629 3098dba0 aurel32
1630 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1631 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1632 3098dba0 aurel32
{
1633 3098dba0 aurel32
    int old_mask;
1634 be214e6c aurel32
1635 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1636 68a79315 bellard
    env->interrupt_request |= mask;
1637 3098dba0 aurel32
1638 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1639 8edac960 aliguori
    /*
1640 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1641 8edac960 aliguori
     * case its halted.
1642 8edac960 aliguori
     */
1643 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1644 8edac960 aliguori
        qemu_cpu_kick(env);
1645 8edac960 aliguori
        return;
1646 8edac960 aliguori
    }
1647 8edac960 aliguori
#endif
1648 8edac960 aliguori
1649 2e70f6ef pbrook
    if (use_icount) {
1650 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1651 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1652 2e70f6ef pbrook
        if (!can_do_io(env)
1653 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1654 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1655 2e70f6ef pbrook
        }
1656 2e70f6ef pbrook
#endif
1657 2e70f6ef pbrook
    } else {
1658 3098dba0 aurel32
        cpu_unlink_tb(env);
1659 ea041c0e bellard
    }
1660 ea041c0e bellard
}
1661 ea041c0e bellard
1662 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1663 b54ad049 bellard
{
1664 b54ad049 bellard
    env->interrupt_request &= ~mask;
1665 b54ad049 bellard
}
1666 b54ad049 bellard
1667 3098dba0 aurel32
void cpu_exit(CPUState *env)
1668 3098dba0 aurel32
{
1669 3098dba0 aurel32
    env->exit_request = 1;
1670 3098dba0 aurel32
    cpu_unlink_tb(env);
1671 3098dba0 aurel32
}
1672 3098dba0 aurel32
1673 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1674 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1675 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1676 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1677 f193c797 bellard
      "show target assembly code for each compiled TB" },
1678 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1679 57fec1fe bellard
      "show micro ops for each compiled TB" },
1680 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1681 e01a1157 blueswir1
      "show micro ops "
1682 e01a1157 blueswir1
#ifdef TARGET_I386
1683 e01a1157 blueswir1
      "before eflags optimization and "
1684 f193c797 bellard
#endif
1685 e01a1157 blueswir1
      "after liveness analysis" },
1686 f193c797 bellard
    { CPU_LOG_INT, "int",
1687 f193c797 bellard
      "show interrupts/exceptions in short format" },
1688 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1689 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1690 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1691 e91c8a77 ths
      "show CPU state before block translation" },
1692 f193c797 bellard
#ifdef TARGET_I386
1693 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1694 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1695 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1696 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1697 f193c797 bellard
#endif
1698 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1699 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1700 fd872598 bellard
      "show all i/o ports accesses" },
1701 8e3a9fd2 bellard
#endif
1702 f193c797 bellard
    { 0, NULL, NULL },
1703 f193c797 bellard
};
1704 f193c797 bellard
1705 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1706 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1707 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1708 f6f3fbca Michael S. Tsirkin
1709 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1710 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1711 9742bf26 Yoshiaki Tamura
                                  ram_addr_t phys_offset)
1712 f6f3fbca Michael S. Tsirkin
{
1713 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1714 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1715 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1716 f6f3fbca Michael S. Tsirkin
    }
1717 f6f3fbca Michael S. Tsirkin
}
1718 f6f3fbca Michael S. Tsirkin
1719 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1720 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1721 f6f3fbca Michael S. Tsirkin
{
1722 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1723 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1724 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1725 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1726 f6f3fbca Michael S. Tsirkin
            return r;
1727 f6f3fbca Michael S. Tsirkin
    }
1728 f6f3fbca Michael S. Tsirkin
    return 0;
1729 f6f3fbca Michael S. Tsirkin
}
1730 f6f3fbca Michael S. Tsirkin
1731 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1732 f6f3fbca Michael S. Tsirkin
{
1733 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1734 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1735 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1736 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1737 f6f3fbca Michael S. Tsirkin
            return r;
1738 f6f3fbca Michael S. Tsirkin
    }
1739 f6f3fbca Michael S. Tsirkin
    return 0;
1740 f6f3fbca Michael S. Tsirkin
}
1741 f6f3fbca Michael S. Tsirkin
1742 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1743 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1744 f6f3fbca Michael S. Tsirkin
{
1745 5cd2c5b6 Richard Henderson
    int i;
1746 f6f3fbca Michael S. Tsirkin
1747 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1748 5cd2c5b6 Richard Henderson
        return;
1749 5cd2c5b6 Richard Henderson
    }
1750 5cd2c5b6 Richard Henderson
    if (level == 0) {
1751 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1752 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1753 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1754 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1755 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1756 f6f3fbca Michael S. Tsirkin
            }
1757 5cd2c5b6 Richard Henderson
        }
1758 5cd2c5b6 Richard Henderson
    } else {
1759 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1760 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1761 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1762 f6f3fbca Michael S. Tsirkin
        }
1763 f6f3fbca Michael S. Tsirkin
    }
1764 f6f3fbca Michael S. Tsirkin
}
1765 f6f3fbca Michael S. Tsirkin
1766 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1767 f6f3fbca Michael S. Tsirkin
{
1768 5cd2c5b6 Richard Henderson
    int i;
1769 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1770 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1771 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1772 f6f3fbca Michael S. Tsirkin
    }
1773 f6f3fbca Michael S. Tsirkin
}
1774 f6f3fbca Michael S. Tsirkin
1775 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1776 f6f3fbca Michael S. Tsirkin
{
1777 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1778 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1779 f6f3fbca Michael S. Tsirkin
}
1780 f6f3fbca Michael S. Tsirkin
1781 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1782 f6f3fbca Michael S. Tsirkin
{
1783 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1784 f6f3fbca Michael S. Tsirkin
}
1785 f6f3fbca Michael S. Tsirkin
#endif
1786 f6f3fbca Michael S. Tsirkin
1787 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1788 f193c797 bellard
{
1789 f193c797 bellard
    if (strlen(s2) != n)
1790 f193c797 bellard
        return 0;
1791 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1792 f193c797 bellard
}
1793 3b46e624 ths
1794 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1795 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1796 f193c797 bellard
{
1797 c7cd6a37 blueswir1
    const CPULogItem *item;
1798 f193c797 bellard
    int mask;
1799 f193c797 bellard
    const char *p, *p1;
1800 f193c797 bellard
1801 f193c797 bellard
    p = str;
1802 f193c797 bellard
    mask = 0;
1803 f193c797 bellard
    for(;;) {
1804 f193c797 bellard
        p1 = strchr(p, ',');
1805 f193c797 bellard
        if (!p1)
1806 f193c797 bellard
            p1 = p + strlen(p);
1807 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1808 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1809 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1810 9742bf26 Yoshiaki Tamura
            }
1811 9742bf26 Yoshiaki Tamura
        } else {
1812 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1813 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1814 9742bf26 Yoshiaki Tamura
                    goto found;
1815 9742bf26 Yoshiaki Tamura
            }
1816 9742bf26 Yoshiaki Tamura
            return 0;
1817 f193c797 bellard
        }
1818 f193c797 bellard
    found:
1819 f193c797 bellard
        mask |= item->mask;
1820 f193c797 bellard
        if (*p1 != ',')
1821 f193c797 bellard
            break;
1822 f193c797 bellard
        p = p1 + 1;
1823 f193c797 bellard
    }
1824 f193c797 bellard
    return mask;
1825 f193c797 bellard
}
1826 ea041c0e bellard
1827 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1828 7501267e bellard
{
1829 7501267e bellard
    va_list ap;
1830 493ae1f0 pbrook
    va_list ap2;
1831 7501267e bellard
1832 7501267e bellard
    va_start(ap, fmt);
1833 493ae1f0 pbrook
    va_copy(ap2, ap);
1834 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1835 7501267e bellard
    vfprintf(stderr, fmt, ap);
1836 7501267e bellard
    fprintf(stderr, "\n");
1837 7501267e bellard
#ifdef TARGET_I386
1838 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1839 7fe48483 bellard
#else
1840 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1841 7501267e bellard
#endif
1842 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1843 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1844 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1845 93fcfe39 aliguori
        qemu_log("\n");
1846 f9373291 j_mayer
#ifdef TARGET_I386
1847 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1848 f9373291 j_mayer
#else
1849 93fcfe39 aliguori
        log_cpu_state(env, 0);
1850 f9373291 j_mayer
#endif
1851 31b1a7b4 aliguori
        qemu_log_flush();
1852 93fcfe39 aliguori
        qemu_log_close();
1853 924edcae balrog
    }
1854 493ae1f0 pbrook
    va_end(ap2);
1855 f9373291 j_mayer
    va_end(ap);
1856 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1857 fd052bf6 Riku Voipio
    {
1858 fd052bf6 Riku Voipio
        struct sigaction act;
1859 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1860 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1861 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1862 fd052bf6 Riku Voipio
    }
1863 fd052bf6 Riku Voipio
#endif
1864 7501267e bellard
    abort();
1865 7501267e bellard
}
1866 7501267e bellard
1867 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1868 c5be9f08 ths
{
1869 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1870 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1871 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1872 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1873 5a38f081 aliguori
    CPUBreakpoint *bp;
1874 5a38f081 aliguori
    CPUWatchpoint *wp;
1875 5a38f081 aliguori
#endif
1876 5a38f081 aliguori
1877 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1878 5a38f081 aliguori
1879 5a38f081 aliguori
    /* Preserve chaining and index. */
1880 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1881 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1882 5a38f081 aliguori
1883 5a38f081 aliguori
    /* Clone all break/watchpoints.
1884 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1885 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1886 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1887 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1888 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1889 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1890 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1891 5a38f081 aliguori
    }
1892 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1893 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1894 5a38f081 aliguori
                              wp->flags, NULL);
1895 5a38f081 aliguori
    }
1896 5a38f081 aliguori
#endif
1897 5a38f081 aliguori
1898 c5be9f08 ths
    return new_env;
1899 c5be9f08 ths
}
1900 c5be9f08 ths
1901 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1902 0124311e bellard
1903 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1904 5c751e99 edgar_igl
{
1905 5c751e99 edgar_igl
    unsigned int i;
1906 5c751e99 edgar_igl
1907 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1908 5c751e99 edgar_igl
       overlap the flushed page.  */
1909 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1910 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1911 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1912 5c751e99 edgar_igl
1913 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1914 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1915 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1916 5c751e99 edgar_igl
}
1917 5c751e99 edgar_igl
1918 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1919 08738984 Igor Kovalenko
    .addr_read  = -1,
1920 08738984 Igor Kovalenko
    .addr_write = -1,
1921 08738984 Igor Kovalenko
    .addr_code  = -1,
1922 08738984 Igor Kovalenko
    .addend     = -1,
1923 08738984 Igor Kovalenko
};
1924 08738984 Igor Kovalenko
1925 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1926 ee8b7021 bellard
   implemented yet) */
1927 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1928 33417e70 bellard
{
1929 33417e70 bellard
    int i;
1930 0124311e bellard
1931 9fa3e853 bellard
#if defined(DEBUG_TLB)
1932 9fa3e853 bellard
    printf("tlb_flush:\n");
1933 9fa3e853 bellard
#endif
1934 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1935 0124311e bellard
       links while we are modifying them */
1936 0124311e bellard
    env->current_tb = NULL;
1937 0124311e bellard
1938 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1939 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1940 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1941 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1942 cfde4bd9 Isaku Yamahata
        }
1943 33417e70 bellard
    }
1944 9fa3e853 bellard
1945 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1946 9fa3e853 bellard
1947 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1948 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1949 e3db7226 bellard
    tlb_flush_count++;
1950 33417e70 bellard
}
1951 33417e70 bellard
1952 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1953 61382a50 bellard
{
1954 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1955 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1956 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1957 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1958 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1959 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1960 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1961 84b7b8e7 bellard
    }
1962 61382a50 bellard
}
1963 61382a50 bellard
1964 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1965 33417e70 bellard
{
1966 8a40a180 bellard
    int i;
1967 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1968 0124311e bellard
1969 9fa3e853 bellard
#if defined(DEBUG_TLB)
1970 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1971 9fa3e853 bellard
#endif
1972 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1973 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1974 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1975 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1976 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1977 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1978 d4c430a8 Paul Brook
#endif
1979 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1980 d4c430a8 Paul Brook
        return;
1981 d4c430a8 Paul Brook
    }
1982 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1983 0124311e bellard
       links while we are modifying them */
1984 0124311e bellard
    env->current_tb = NULL;
1985 61382a50 bellard
1986 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1987 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1988 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1989 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1990 0124311e bellard
1991 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1992 9fa3e853 bellard
}
1993 9fa3e853 bellard
1994 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1995 9fa3e853 bellard
   can be detected */
1996 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1997 9fa3e853 bellard
{
1998 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1999 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2000 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2001 9fa3e853 bellard
}
2002 9fa3e853 bellard
2003 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2004 3a7d929e bellard
   tested for self modifying code */
2005 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2006 3a7d929e bellard
                                    target_ulong vaddr)
2007 9fa3e853 bellard
{
2008 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2009 1ccde1cb bellard
}
2010 1ccde1cb bellard
2011 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2012 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2013 1ccde1cb bellard
{
2014 1ccde1cb bellard
    unsigned long addr;
2015 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2016 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2017 1ccde1cb bellard
        if ((addr - start) < length) {
2018 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2019 1ccde1cb bellard
        }
2020 1ccde1cb bellard
    }
2021 1ccde1cb bellard
}
2022 1ccde1cb bellard
2023 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2024 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2025 0a962c02 bellard
                                     int dirty_flags)
2026 1ccde1cb bellard
{
2027 1ccde1cb bellard
    CPUState *env;
2028 4f2ac237 bellard
    unsigned long length, start1;
2029 f7c11b53 Yoshiaki Tamura
    int i;
2030 1ccde1cb bellard
2031 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2032 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2033 1ccde1cb bellard
2034 1ccde1cb bellard
    length = end - start;
2035 1ccde1cb bellard
    if (length == 0)
2036 1ccde1cb bellard
        return;
2037 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2038 f23db169 bellard
2039 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2040 1ccde1cb bellard
       when accessing the range */
2041 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2042 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2043 5579c7f3 pbrook
       address comparisons below.  */
2044 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2045 5579c7f3 pbrook
            != (end - 1) - start) {
2046 5579c7f3 pbrook
        abort();
2047 5579c7f3 pbrook
    }
2048 5579c7f3 pbrook
2049 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2050 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2051 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2052 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2053 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2054 cfde4bd9 Isaku Yamahata
                                      start1, length);
2055 cfde4bd9 Isaku Yamahata
        }
2056 6a00d601 bellard
    }
2057 1ccde1cb bellard
}
2058 1ccde1cb bellard
2059 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2060 74576198 aliguori
{
2061 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2062 74576198 aliguori
    in_migration = enable;
2063 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2064 f6f3fbca Michael S. Tsirkin
    return ret;
2065 74576198 aliguori
}
2066 74576198 aliguori
2067 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2068 74576198 aliguori
{
2069 74576198 aliguori
    return in_migration;
2070 74576198 aliguori
}
2071 74576198 aliguori
2072 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2073 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2074 2bec46dc aliguori
{
2075 7b8f3b78 Michael S. Tsirkin
    int ret;
2076 151f7749 Jan Kiszka
2077 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2078 151f7749 Jan Kiszka
    return ret;
2079 2bec46dc aliguori
}
2080 2bec46dc aliguori
2081 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2082 e5896b12 Anthony PERARD
                           ram_addr_t size)
2083 e5896b12 Anthony PERARD
{
2084 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2085 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2086 e5896b12 Anthony PERARD
        if (client->log_start) {
2087 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2088 e5896b12 Anthony PERARD
            if (r < 0) {
2089 e5896b12 Anthony PERARD
                return r;
2090 e5896b12 Anthony PERARD
            }
2091 e5896b12 Anthony PERARD
        }
2092 e5896b12 Anthony PERARD
    }
2093 e5896b12 Anthony PERARD
    return 0;
2094 e5896b12 Anthony PERARD
}
2095 e5896b12 Anthony PERARD
2096 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2097 e5896b12 Anthony PERARD
                          ram_addr_t size)
2098 e5896b12 Anthony PERARD
{
2099 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2100 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2101 e5896b12 Anthony PERARD
        if (client->log_stop) {
2102 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2103 e5896b12 Anthony PERARD
            if (r < 0) {
2104 e5896b12 Anthony PERARD
                return r;
2105 e5896b12 Anthony PERARD
            }
2106 e5896b12 Anthony PERARD
        }
2107 e5896b12 Anthony PERARD
    }
2108 e5896b12 Anthony PERARD
    return 0;
2109 e5896b12 Anthony PERARD
}
2110 e5896b12 Anthony PERARD
2111 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2112 3a7d929e bellard
{
2113 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2114 5579c7f3 pbrook
    void *p;
2115 3a7d929e bellard
2116 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2117 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2118 5579c7f3 pbrook
            + tlb_entry->addend);
2119 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2120 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2121 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2122 3a7d929e bellard
        }
2123 3a7d929e bellard
    }
2124 3a7d929e bellard
}
2125 3a7d929e bellard
2126 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2127 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2128 3a7d929e bellard
{
2129 3a7d929e bellard
    int i;
2130 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2131 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2132 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2133 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2134 cfde4bd9 Isaku Yamahata
    }
2135 3a7d929e bellard
}
2136 3a7d929e bellard
2137 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2138 1ccde1cb bellard
{
2139 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2140 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2141 1ccde1cb bellard
}
2142 1ccde1cb bellard
2143 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2144 0f459d16 pbrook
   so that it is no longer dirty */
2145 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2146 1ccde1cb bellard
{
2147 1ccde1cb bellard
    int i;
2148 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2149 1ccde1cb bellard
2150 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2151 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2152 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2153 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2154 9fa3e853 bellard
}
2155 9fa3e853 bellard
2156 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2157 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2158 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2159 d4c430a8 Paul Brook
                               target_ulong size)
2160 d4c430a8 Paul Brook
{
2161 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2162 d4c430a8 Paul Brook
2163 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2164 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2165 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2166 d4c430a8 Paul Brook
        return;
2167 d4c430a8 Paul Brook
    }
2168 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2169 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2170 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2171 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2172 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2173 d4c430a8 Paul Brook
        mask <<= 1;
2174 d4c430a8 Paul Brook
    }
2175 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2176 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2177 d4c430a8 Paul Brook
}
2178 d4c430a8 Paul Brook
2179 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2180 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2181 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2182 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2183 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2184 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2185 9fa3e853 bellard
{
2186 92e873b9 bellard
    PhysPageDesc *p;
2187 4f2ac237 bellard
    unsigned long pd;
2188 9fa3e853 bellard
    unsigned int index;
2189 4f2ac237 bellard
    target_ulong address;
2190 0f459d16 pbrook
    target_ulong code_address;
2191 355b1943 Paul Brook
    unsigned long addend;
2192 84b7b8e7 bellard
    CPUTLBEntry *te;
2193 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2194 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2195 9fa3e853 bellard
2196 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2197 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2198 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2199 d4c430a8 Paul Brook
    }
2200 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2201 9fa3e853 bellard
    if (!p) {
2202 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2203 9fa3e853 bellard
    } else {
2204 9fa3e853 bellard
        pd = p->phys_offset;
2205 9fa3e853 bellard
    }
2206 9fa3e853 bellard
#if defined(DEBUG_TLB)
2207 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2208 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2209 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2210 9fa3e853 bellard
#endif
2211 9fa3e853 bellard
2212 0f459d16 pbrook
    address = vaddr;
2213 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2214 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2215 0f459d16 pbrook
        address |= TLB_MMIO;
2216 0f459d16 pbrook
    }
2217 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2218 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2219 0f459d16 pbrook
        /* Normal RAM.  */
2220 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2221 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2222 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2223 0f459d16 pbrook
        else
2224 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2225 0f459d16 pbrook
    } else {
2226 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2227 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2228 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2229 0f459d16 pbrook
           and avoid full address decoding in every device.
2230 0f459d16 pbrook
           We can't use the high bits of pd for this because
2231 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2232 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2233 8da3ff18 pbrook
        if (p) {
2234 8da3ff18 pbrook
            iotlb += p->region_offset;
2235 8da3ff18 pbrook
        } else {
2236 8da3ff18 pbrook
            iotlb += paddr;
2237 8da3ff18 pbrook
        }
2238 0f459d16 pbrook
    }
2239 0f459d16 pbrook
2240 0f459d16 pbrook
    code_address = address;
2241 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2242 0f459d16 pbrook
       watchpoint trap routines.  */
2243 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2244 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2245 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2246 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2247 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2248 bf298f83 Jun Koi
                address |= TLB_MMIO;
2249 bf298f83 Jun Koi
                break;
2250 bf298f83 Jun Koi
            }
2251 6658ffb8 pbrook
        }
2252 0f459d16 pbrook
    }
2253 d79acba4 balrog
2254 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2255 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2256 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2257 0f459d16 pbrook
    te->addend = addend - vaddr;
2258 0f459d16 pbrook
    if (prot & PAGE_READ) {
2259 0f459d16 pbrook
        te->addr_read = address;
2260 0f459d16 pbrook
    } else {
2261 0f459d16 pbrook
        te->addr_read = -1;
2262 0f459d16 pbrook
    }
2263 5c751e99 edgar_igl
2264 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2265 0f459d16 pbrook
        te->addr_code = code_address;
2266 0f459d16 pbrook
    } else {
2267 0f459d16 pbrook
        te->addr_code = -1;
2268 0f459d16 pbrook
    }
2269 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2270 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2271 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2272 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2273 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2274 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2275 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2276 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2277 9fa3e853 bellard
        } else {
2278 0f459d16 pbrook
            te->addr_write = address;
2279 9fa3e853 bellard
        }
2280 0f459d16 pbrook
    } else {
2281 0f459d16 pbrook
        te->addr_write = -1;
2282 9fa3e853 bellard
    }
2283 9fa3e853 bellard
}
2284 9fa3e853 bellard
2285 0124311e bellard
#else
2286 0124311e bellard
2287 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2288 0124311e bellard
{
2289 0124311e bellard
}
2290 0124311e bellard
2291 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2292 0124311e bellard
{
2293 0124311e bellard
}
2294 0124311e bellard
2295 edf8e2af Mika Westerberg
/*
2296 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2297 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2298 edf8e2af Mika Westerberg
 */
2299 5cd2c5b6 Richard Henderson
2300 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2301 5cd2c5b6 Richard Henderson
{
2302 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2303 5cd2c5b6 Richard Henderson
    void *priv;
2304 5cd2c5b6 Richard Henderson
    unsigned long start;
2305 5cd2c5b6 Richard Henderson
    int prot;
2306 5cd2c5b6 Richard Henderson
};
2307 5cd2c5b6 Richard Henderson
2308 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2309 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2310 5cd2c5b6 Richard Henderson
{
2311 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2312 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2313 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2314 5cd2c5b6 Richard Henderson
            return rc;
2315 5cd2c5b6 Richard Henderson
        }
2316 5cd2c5b6 Richard Henderson
    }
2317 5cd2c5b6 Richard Henderson
2318 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2319 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2320 5cd2c5b6 Richard Henderson
2321 5cd2c5b6 Richard Henderson
    return 0;
2322 5cd2c5b6 Richard Henderson
}
2323 5cd2c5b6 Richard Henderson
2324 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2325 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2326 5cd2c5b6 Richard Henderson
{
2327 b480d9b7 Paul Brook
    abi_ulong pa;
2328 5cd2c5b6 Richard Henderson
    int i, rc;
2329 5cd2c5b6 Richard Henderson
2330 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2331 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2332 5cd2c5b6 Richard Henderson
    }
2333 5cd2c5b6 Richard Henderson
2334 5cd2c5b6 Richard Henderson
    if (level == 0) {
2335 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2336 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2337 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2338 5cd2c5b6 Richard Henderson
2339 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2340 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2341 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2342 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2343 5cd2c5b6 Richard Henderson
                    return rc;
2344 9fa3e853 bellard
                }
2345 9fa3e853 bellard
            }
2346 5cd2c5b6 Richard Henderson
        }
2347 5cd2c5b6 Richard Henderson
    } else {
2348 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2349 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2350 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2351 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2352 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2353 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2354 5cd2c5b6 Richard Henderson
                return rc;
2355 5cd2c5b6 Richard Henderson
            }
2356 5cd2c5b6 Richard Henderson
        }
2357 5cd2c5b6 Richard Henderson
    }
2358 5cd2c5b6 Richard Henderson
2359 5cd2c5b6 Richard Henderson
    return 0;
2360 5cd2c5b6 Richard Henderson
}
2361 5cd2c5b6 Richard Henderson
2362 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2363 5cd2c5b6 Richard Henderson
{
2364 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2365 5cd2c5b6 Richard Henderson
    unsigned long i;
2366 5cd2c5b6 Richard Henderson
2367 5cd2c5b6 Richard Henderson
    data.fn = fn;
2368 5cd2c5b6 Richard Henderson
    data.priv = priv;
2369 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2370 5cd2c5b6 Richard Henderson
    data.prot = 0;
2371 5cd2c5b6 Richard Henderson
2372 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2373 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2374 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2375 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2376 5cd2c5b6 Richard Henderson
            return rc;
2377 9fa3e853 bellard
        }
2378 33417e70 bellard
    }
2379 5cd2c5b6 Richard Henderson
2380 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2381 edf8e2af Mika Westerberg
}
2382 edf8e2af Mika Westerberg
2383 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2384 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2385 edf8e2af Mika Westerberg
{
2386 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2387 edf8e2af Mika Westerberg
2388 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2389 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2390 edf8e2af Mika Westerberg
        start, end, end - start,
2391 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2392 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2393 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2394 edf8e2af Mika Westerberg
2395 edf8e2af Mika Westerberg
    return (0);
2396 edf8e2af Mika Westerberg
}
2397 edf8e2af Mika Westerberg
2398 edf8e2af Mika Westerberg
/* dump memory mappings */
2399 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2400 edf8e2af Mika Westerberg
{
2401 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2402 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2403 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2404 33417e70 bellard
}
2405 33417e70 bellard
2406 53a5960a pbrook
int page_get_flags(target_ulong address)
2407 33417e70 bellard
{
2408 9fa3e853 bellard
    PageDesc *p;
2409 9fa3e853 bellard
2410 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2411 33417e70 bellard
    if (!p)
2412 9fa3e853 bellard
        return 0;
2413 9fa3e853 bellard
    return p->flags;
2414 9fa3e853 bellard
}
2415 9fa3e853 bellard
2416 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2417 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2418 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2419 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2420 9fa3e853 bellard
{
2421 376a7909 Richard Henderson
    target_ulong addr, len;
2422 376a7909 Richard Henderson
2423 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2424 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2425 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2426 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2428 376a7909 Richard Henderson
#endif
2429 376a7909 Richard Henderson
    assert(start < end);
2430 9fa3e853 bellard
2431 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2432 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2433 376a7909 Richard Henderson
2434 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2435 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2436 376a7909 Richard Henderson
    }
2437 376a7909 Richard Henderson
2438 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2439 376a7909 Richard Henderson
         len != 0;
2440 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2441 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2442 376a7909 Richard Henderson
2443 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2444 376a7909 Richard Henderson
           the code inside.  */
2445 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2446 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2447 9fa3e853 bellard
            p->first_tb) {
2448 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2449 9fa3e853 bellard
        }
2450 9fa3e853 bellard
        p->flags = flags;
2451 9fa3e853 bellard
    }
2452 33417e70 bellard
}
2453 33417e70 bellard
2454 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2455 3d97b40b ths
{
2456 3d97b40b ths
    PageDesc *p;
2457 3d97b40b ths
    target_ulong end;
2458 3d97b40b ths
    target_ulong addr;
2459 3d97b40b ths
2460 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2461 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2462 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2463 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2464 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2465 376a7909 Richard Henderson
#endif
2466 376a7909 Richard Henderson
2467 3e0650a9 Richard Henderson
    if (len == 0) {
2468 3e0650a9 Richard Henderson
        return 0;
2469 3e0650a9 Richard Henderson
    }
2470 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2471 376a7909 Richard Henderson
        /* We've wrapped around.  */
2472 55f280c9 balrog
        return -1;
2473 376a7909 Richard Henderson
    }
2474 55f280c9 balrog
2475 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2476 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2477 3d97b40b ths
2478 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2479 376a7909 Richard Henderson
         len != 0;
2480 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2481 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2482 3d97b40b ths
        if( !p )
2483 3d97b40b ths
            return -1;
2484 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2485 3d97b40b ths
            return -1;
2486 3d97b40b ths
2487 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2488 3d97b40b ths
            return -1;
2489 dae3270c bellard
        if (flags & PAGE_WRITE) {
2490 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2491 dae3270c bellard
                return -1;
2492 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2493 dae3270c bellard
               contains translated code */
2494 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2495 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2496 dae3270c bellard
                    return -1;
2497 dae3270c bellard
            }
2498 dae3270c bellard
            return 0;
2499 dae3270c bellard
        }
2500 3d97b40b ths
    }
2501 3d97b40b ths
    return 0;
2502 3d97b40b ths
}
2503 3d97b40b ths
2504 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2505 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2506 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2507 9fa3e853 bellard
{
2508 45d679d6 Aurelien Jarno
    unsigned int prot;
2509 45d679d6 Aurelien Jarno
    PageDesc *p;
2510 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2511 9fa3e853 bellard
2512 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2513 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2514 c8a706fe pbrook
       practice it seems to be ok.  */
2515 c8a706fe pbrook
    mmap_lock();
2516 c8a706fe pbrook
2517 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2518 45d679d6 Aurelien Jarno
    if (!p) {
2519 c8a706fe pbrook
        mmap_unlock();
2520 9fa3e853 bellard
        return 0;
2521 c8a706fe pbrook
    }
2522 45d679d6 Aurelien Jarno
2523 9fa3e853 bellard
    /* if the page was really writable, then we change its
2524 9fa3e853 bellard
       protection back to writable */
2525 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2526 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2527 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2528 45d679d6 Aurelien Jarno
2529 45d679d6 Aurelien Jarno
        prot = 0;
2530 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2531 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2532 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2533 45d679d6 Aurelien Jarno
            prot |= p->flags;
2534 45d679d6 Aurelien Jarno
2535 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2536 9fa3e853 bellard
               the corresponding translated code. */
2537 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2538 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2539 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2540 9fa3e853 bellard
#endif
2541 9fa3e853 bellard
        }
2542 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2543 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2544 45d679d6 Aurelien Jarno
2545 45d679d6 Aurelien Jarno
        mmap_unlock();
2546 45d679d6 Aurelien Jarno
        return 1;
2547 9fa3e853 bellard
    }
2548 c8a706fe pbrook
    mmap_unlock();
2549 9fa3e853 bellard
    return 0;
2550 9fa3e853 bellard
}
2551 9fa3e853 bellard
2552 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2553 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2554 1ccde1cb bellard
{
2555 1ccde1cb bellard
}
2556 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2557 9fa3e853 bellard
2558 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2559 8da3ff18 pbrook
2560 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2561 c04b2b78 Paul Brook
typedef struct subpage_t {
2562 c04b2b78 Paul Brook
    target_phys_addr_t base;
2563 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2564 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2565 c04b2b78 Paul Brook
} subpage_t;
2566 c04b2b78 Paul Brook
2567 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2568 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2569 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2570 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2571 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2572 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2573 db7b5426 blueswir1
                      need_subpage)                                     \
2574 db7b5426 blueswir1
    do {                                                                \
2575 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2576 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2577 db7b5426 blueswir1
        else {                                                          \
2578 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2579 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2580 db7b5426 blueswir1
                need_subpage = 1;                                       \
2581 db7b5426 blueswir1
        }                                                               \
2582 db7b5426 blueswir1
                                                                        \
2583 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2584 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2585 db7b5426 blueswir1
        else {                                                          \
2586 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2587 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2588 db7b5426 blueswir1
                need_subpage = 1;                                       \
2589 db7b5426 blueswir1
        }                                                               \
2590 db7b5426 blueswir1
    } while (0)
2591 db7b5426 blueswir1
2592 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2593 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2594 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2595 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2596 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2597 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2598 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2599 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2600 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2601 c227f099 Anthony Liguori
                                         ram_addr_t size,
2602 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2603 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2604 33417e70 bellard
{
2605 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2606 92e873b9 bellard
    PhysPageDesc *p;
2607 9d42037b bellard
    CPUState *env;
2608 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2609 f6405247 Richard Henderson
    subpage_t *subpage;
2610 33417e70 bellard
2611 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2612 f6f3fbca Michael S. Tsirkin
2613 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2614 67c4d23c pbrook
        region_offset = start_addr;
2615 67c4d23c pbrook
    }
2616 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2617 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2618 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2619 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2620 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2621 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2622 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2623 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2624 db7b5426 blueswir1
            int need_subpage = 0;
2625 db7b5426 blueswir1
2626 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2627 db7b5426 blueswir1
                          need_subpage);
2628 f6405247 Richard Henderson
            if (need_subpage) {
2629 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2630 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2631 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2632 8da3ff18 pbrook
                                           p->region_offset);
2633 db7b5426 blueswir1
                } else {
2634 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2635 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2636 db7b5426 blueswir1
                }
2637 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2638 8da3ff18 pbrook
                                 region_offset);
2639 8da3ff18 pbrook
                p->region_offset = 0;
2640 db7b5426 blueswir1
            } else {
2641 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2642 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2643 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2644 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2645 db7b5426 blueswir1
            }
2646 db7b5426 blueswir1
        } else {
2647 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2648 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2649 8da3ff18 pbrook
            p->region_offset = region_offset;
2650 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2651 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2652 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2653 0e8f0967 pbrook
            } else {
2654 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2655 db7b5426 blueswir1
                int need_subpage = 0;
2656 db7b5426 blueswir1
2657 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2658 db7b5426 blueswir1
                              end_addr2, need_subpage);
2659 db7b5426 blueswir1
2660 f6405247 Richard Henderson
                if (need_subpage) {
2661 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2662 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2663 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2664 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2665 8da3ff18 pbrook
                                     phys_offset, region_offset);
2666 8da3ff18 pbrook
                    p->region_offset = 0;
2667 db7b5426 blueswir1
                }
2668 db7b5426 blueswir1
            }
2669 db7b5426 blueswir1
        }
2670 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2671 33417e70 bellard
    }
2672 3b46e624 ths
2673 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2674 9d42037b bellard
       reset the modified entries */
2675 9d42037b bellard
    /* XXX: slow ! */
2676 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2677 9d42037b bellard
        tlb_flush(env, 1);
2678 9d42037b bellard
    }
2679 33417e70 bellard
}
2680 33417e70 bellard
2681 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2682 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2683 ba863458 bellard
{
2684 ba863458 bellard
    PhysPageDesc *p;
2685 ba863458 bellard
2686 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 ba863458 bellard
    if (!p)
2688 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2689 ba863458 bellard
    return p->phys_offset;
2690 ba863458 bellard
}
2691 ba863458 bellard
2692 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2693 f65ed4c1 aliguori
{
2694 f65ed4c1 aliguori
    if (kvm_enabled())
2695 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2696 f65ed4c1 aliguori
}
2697 f65ed4c1 aliguori
2698 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2699 f65ed4c1 aliguori
{
2700 f65ed4c1 aliguori
    if (kvm_enabled())
2701 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2702 f65ed4c1 aliguori
}
2703 f65ed4c1 aliguori
2704 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2705 62a2744c Sheng Yang
{
2706 62a2744c Sheng Yang
    if (kvm_enabled())
2707 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2708 62a2744c Sheng Yang
}
2709 62a2744c Sheng Yang
2710 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2711 c902760f Marcelo Tosatti
2712 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2713 c902760f Marcelo Tosatti
2714 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2715 c902760f Marcelo Tosatti
2716 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2717 c902760f Marcelo Tosatti
{
2718 c902760f Marcelo Tosatti
    struct statfs fs;
2719 c902760f Marcelo Tosatti
    int ret;
2720 c902760f Marcelo Tosatti
2721 c902760f Marcelo Tosatti
    do {
2722 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2723 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2724 c902760f Marcelo Tosatti
2725 c902760f Marcelo Tosatti
    if (ret != 0) {
2726 9742bf26 Yoshiaki Tamura
        perror(path);
2727 9742bf26 Yoshiaki Tamura
        return 0;
2728 c902760f Marcelo Tosatti
    }
2729 c902760f Marcelo Tosatti
2730 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2731 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2732 c902760f Marcelo Tosatti
2733 c902760f Marcelo Tosatti
    return fs.f_bsize;
2734 c902760f Marcelo Tosatti
}
2735 c902760f Marcelo Tosatti
2736 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2737 04b16653 Alex Williamson
                            ram_addr_t memory,
2738 04b16653 Alex Williamson
                            const char *path)
2739 c902760f Marcelo Tosatti
{
2740 c902760f Marcelo Tosatti
    char *filename;
2741 c902760f Marcelo Tosatti
    void *area;
2742 c902760f Marcelo Tosatti
    int fd;
2743 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2744 c902760f Marcelo Tosatti
    int flags;
2745 c902760f Marcelo Tosatti
#endif
2746 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2747 c902760f Marcelo Tosatti
2748 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2749 c902760f Marcelo Tosatti
    if (!hpagesize) {
2750 9742bf26 Yoshiaki Tamura
        return NULL;
2751 c902760f Marcelo Tosatti
    }
2752 c902760f Marcelo Tosatti
2753 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2754 c902760f Marcelo Tosatti
        return NULL;
2755 c902760f Marcelo Tosatti
    }
2756 c902760f Marcelo Tosatti
2757 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2758 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2759 c902760f Marcelo Tosatti
        return NULL;
2760 c902760f Marcelo Tosatti
    }
2761 c902760f Marcelo Tosatti
2762 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2763 9742bf26 Yoshiaki Tamura
        return NULL;
2764 c902760f Marcelo Tosatti
    }
2765 c902760f Marcelo Tosatti
2766 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2767 c902760f Marcelo Tosatti
    if (fd < 0) {
2768 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2769 9742bf26 Yoshiaki Tamura
        free(filename);
2770 9742bf26 Yoshiaki Tamura
        return NULL;
2771 c902760f Marcelo Tosatti
    }
2772 c902760f Marcelo Tosatti
    unlink(filename);
2773 c902760f Marcelo Tosatti
    free(filename);
2774 c902760f Marcelo Tosatti
2775 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2776 c902760f Marcelo Tosatti
2777 c902760f Marcelo Tosatti
    /*
2778 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2779 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2780 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2781 c902760f Marcelo Tosatti
     * mmap will fail.
2782 c902760f Marcelo Tosatti
     */
2783 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2784 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2785 c902760f Marcelo Tosatti
2786 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2787 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2788 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2789 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2790 c902760f Marcelo Tosatti
     */
2791 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2792 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2793 c902760f Marcelo Tosatti
#else
2794 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2795 c902760f Marcelo Tosatti
#endif
2796 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2797 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2798 9742bf26 Yoshiaki Tamura
        close(fd);
2799 9742bf26 Yoshiaki Tamura
        return (NULL);
2800 c902760f Marcelo Tosatti
    }
2801 04b16653 Alex Williamson
    block->fd = fd;
2802 c902760f Marcelo Tosatti
    return area;
2803 c902760f Marcelo Tosatti
}
2804 c902760f Marcelo Tosatti
#endif
2805 c902760f Marcelo Tosatti
2806 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2807 d17b5288 Alex Williamson
{
2808 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2809 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2810 04b16653 Alex Williamson
2811 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2812 04b16653 Alex Williamson
        return 0;
2813 04b16653 Alex Williamson
2814 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2815 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2816 04b16653 Alex Williamson
2817 04b16653 Alex Williamson
        end = block->offset + block->length;
2818 04b16653 Alex Williamson
2819 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2820 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2821 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2822 04b16653 Alex Williamson
            }
2823 04b16653 Alex Williamson
        }
2824 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2825 04b16653 Alex Williamson
            offset =  end;
2826 04b16653 Alex Williamson
            mingap = next - end;
2827 04b16653 Alex Williamson
        }
2828 04b16653 Alex Williamson
    }
2829 04b16653 Alex Williamson
    return offset;
2830 04b16653 Alex Williamson
}
2831 04b16653 Alex Williamson
2832 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2833 04b16653 Alex Williamson
{
2834 d17b5288 Alex Williamson
    RAMBlock *block;
2835 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2836 d17b5288 Alex Williamson
2837 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2838 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2839 d17b5288 Alex Williamson
2840 d17b5288 Alex Williamson
    return last;
2841 d17b5288 Alex Williamson
}
2842 d17b5288 Alex Williamson
2843 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2844 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2845 84b89d78 Cam Macdonell
{
2846 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2847 84b89d78 Cam Macdonell
2848 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2849 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2850 84b89d78 Cam Macdonell
2851 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2852 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2853 84b89d78 Cam Macdonell
        if (id) {
2854 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2855 84b89d78 Cam Macdonell
            qemu_free(id);
2856 84b89d78 Cam Macdonell
        }
2857 84b89d78 Cam Macdonell
    }
2858 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2859 84b89d78 Cam Macdonell
2860 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2861 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2862 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2863 84b89d78 Cam Macdonell
                    new_block->idstr);
2864 84b89d78 Cam Macdonell
            abort();
2865 84b89d78 Cam Macdonell
        }
2866 84b89d78 Cam Macdonell
    }
2867 84b89d78 Cam Macdonell
2868 6977dfe6 Yoshiaki Tamura
    if (host) {
2869 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2870 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2871 6977dfe6 Yoshiaki Tamura
    } else {
2872 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2873 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2874 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2875 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2876 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2877 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2878 6977dfe6 Yoshiaki Tamura
            }
2879 c902760f Marcelo Tosatti
#else
2880 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2881 6977dfe6 Yoshiaki Tamura
            exit(1);
2882 c902760f Marcelo Tosatti
#endif
2883 6977dfe6 Yoshiaki Tamura
        } else {
2884 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2885 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2886 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2887 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2888 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2889 6b02494d Alexander Graf
#else
2890 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2891 6b02494d Alexander Graf
#endif
2892 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2893 6977dfe6 Yoshiaki Tamura
        }
2894 c902760f Marcelo Tosatti
    }
2895 6977dfe6 Yoshiaki Tamura
2896 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2897 94a6b54f pbrook
    new_block->length = size;
2898 94a6b54f pbrook
2899 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2900 94a6b54f pbrook
2901 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2902 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2903 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2904 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2905 94a6b54f pbrook
2906 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2907 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2908 6f0437e8 Jan Kiszka
2909 94a6b54f pbrook
    return new_block->offset;
2910 94a6b54f pbrook
}
2911 e9a1ab19 bellard
2912 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2913 6977dfe6 Yoshiaki Tamura
{
2914 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2915 6977dfe6 Yoshiaki Tamura
}
2916 6977dfe6 Yoshiaki Tamura
2917 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2918 e9a1ab19 bellard
{
2919 04b16653 Alex Williamson
    RAMBlock *block;
2920 04b16653 Alex Williamson
2921 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2922 04b16653 Alex Williamson
        if (addr == block->offset) {
2923 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2924 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2925 cd19cfa2 Huang Ying
                ;
2926 cd19cfa2 Huang Ying
            } else if (mem_path) {
2927 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2928 04b16653 Alex Williamson
                if (block->fd) {
2929 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2930 04b16653 Alex Williamson
                    close(block->fd);
2931 04b16653 Alex Williamson
                } else {
2932 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2933 04b16653 Alex Williamson
                }
2934 04b16653 Alex Williamson
#endif
2935 04b16653 Alex Williamson
            } else {
2936 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2937 04b16653 Alex Williamson
                munmap(block->host, block->length);
2938 04b16653 Alex Williamson
#else
2939 04b16653 Alex Williamson
                qemu_vfree(block->host);
2940 04b16653 Alex Williamson
#endif
2941 04b16653 Alex Williamson
            }
2942 04b16653 Alex Williamson
            qemu_free(block);
2943 04b16653 Alex Williamson
            return;
2944 04b16653 Alex Williamson
        }
2945 04b16653 Alex Williamson
    }
2946 04b16653 Alex Williamson
2947 e9a1ab19 bellard
}
2948 e9a1ab19 bellard
2949 cd19cfa2 Huang Ying
#ifndef _WIN32
2950 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2951 cd19cfa2 Huang Ying
{
2952 cd19cfa2 Huang Ying
    RAMBlock *block;
2953 cd19cfa2 Huang Ying
    ram_addr_t offset;
2954 cd19cfa2 Huang Ying
    int flags;
2955 cd19cfa2 Huang Ying
    void *area, *vaddr;
2956 cd19cfa2 Huang Ying
2957 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2958 cd19cfa2 Huang Ying
        offset = addr - block->offset;
2959 cd19cfa2 Huang Ying
        if (offset < block->length) {
2960 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
2961 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2962 cd19cfa2 Huang Ying
                ;
2963 cd19cfa2 Huang Ying
            } else {
2964 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
2965 cd19cfa2 Huang Ying
                munmap(vaddr, length);
2966 cd19cfa2 Huang Ying
                if (mem_path) {
2967 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
2968 cd19cfa2 Huang Ying
                    if (block->fd) {
2969 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
2970 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2971 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
2972 cd19cfa2 Huang Ying
#else
2973 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
2974 cd19cfa2 Huang Ying
#endif
2975 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2976 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
2977 cd19cfa2 Huang Ying
                    } else {
2978 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980 cd19cfa2 Huang Ying
                                    flags, -1, 0);
2981 cd19cfa2 Huang Ying
                    }
2982 cd19cfa2 Huang Ying
#endif
2983 cd19cfa2 Huang Ying
                } else {
2984 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2985 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2986 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2987 cd19cfa2 Huang Ying
                                flags, -1, 0);
2988 cd19cfa2 Huang Ying
#else
2989 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2990 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2991 cd19cfa2 Huang Ying
                                flags, -1, 0);
2992 cd19cfa2 Huang Ying
#endif
2993 cd19cfa2 Huang Ying
                }
2994 cd19cfa2 Huang Ying
                if (area != vaddr) {
2995 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
2996 cd19cfa2 Huang Ying
                            length, addr);
2997 cd19cfa2 Huang Ying
                    exit(1);
2998 cd19cfa2 Huang Ying
                }
2999 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3000 cd19cfa2 Huang Ying
            }
3001 cd19cfa2 Huang Ying
            return;
3002 cd19cfa2 Huang Ying
        }
3003 cd19cfa2 Huang Ying
    }
3004 cd19cfa2 Huang Ying
}
3005 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3006 cd19cfa2 Huang Ying
3007 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3008 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3009 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3010 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3011 5579c7f3 pbrook

3012 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3013 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3014 5579c7f3 pbrook
 */
3015 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3016 dc828ca1 pbrook
{
3017 94a6b54f pbrook
    RAMBlock *block;
3018 94a6b54f pbrook
3019 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3021 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3022 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3023 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3024 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3025 7d82af38 Vincent Palatin
            }
3026 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3027 f471a17e Alex Williamson
        }
3028 94a6b54f pbrook
    }
3029 f471a17e Alex Williamson
3030 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3031 f471a17e Alex Williamson
    abort();
3032 f471a17e Alex Williamson
3033 f471a17e Alex Williamson
    return NULL;
3034 dc828ca1 pbrook
}
3035 dc828ca1 pbrook
3036 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3037 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3038 b2e0a138 Michael S. Tsirkin
 */
3039 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3040 b2e0a138 Michael S. Tsirkin
{
3041 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3042 b2e0a138 Michael S. Tsirkin
3043 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3045 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3046 b2e0a138 Michael S. Tsirkin
        }
3047 b2e0a138 Michael S. Tsirkin
    }
3048 b2e0a138 Michael S. Tsirkin
3049 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3050 b2e0a138 Michael S. Tsirkin
    abort();
3051 b2e0a138 Michael S. Tsirkin
3052 b2e0a138 Michael S. Tsirkin
    return NULL;
3053 b2e0a138 Michael S. Tsirkin
}
3054 b2e0a138 Michael S. Tsirkin
3055 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3056 5579c7f3 pbrook
{
3057 94a6b54f pbrook
    RAMBlock *block;
3058 94a6b54f pbrook
    uint8_t *host = ptr;
3059 94a6b54f pbrook
3060 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3061 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3062 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3063 e890261f Marcelo Tosatti
            return 0;
3064 f471a17e Alex Williamson
        }
3065 94a6b54f pbrook
    }
3066 e890261f Marcelo Tosatti
    return -1;
3067 e890261f Marcelo Tosatti
}
3068 f471a17e Alex Williamson
3069 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3070 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3071 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3072 e890261f Marcelo Tosatti
{
3073 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3074 f471a17e Alex Williamson
3075 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3076 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3077 e890261f Marcelo Tosatti
        abort();
3078 e890261f Marcelo Tosatti
    }
3079 e890261f Marcelo Tosatti
    return ram_addr;
3080 5579c7f3 pbrook
}
3081 5579c7f3 pbrook
3082 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3083 33417e70 bellard
{
3084 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3085 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3086 67d3b957 pbrook
#endif
3087 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3088 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3089 e18231a3 blueswir1
#endif
3090 e18231a3 blueswir1
    return 0;
3091 e18231a3 blueswir1
}
3092 e18231a3 blueswir1
3093 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3094 e18231a3 blueswir1
{
3095 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3096 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3097 e18231a3 blueswir1
#endif
3098 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3099 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3100 e18231a3 blueswir1
#endif
3101 e18231a3 blueswir1
    return 0;
3102 e18231a3 blueswir1
}
3103 e18231a3 blueswir1
3104 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3105 e18231a3 blueswir1
{
3106 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3107 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3108 e18231a3 blueswir1
#endif
3109 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3110 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3111 b4f0a316 blueswir1
#endif
3112 33417e70 bellard
    return 0;
3113 33417e70 bellard
}
3114 33417e70 bellard
3115 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3116 33417e70 bellard
{
3117 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3118 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3119 67d3b957 pbrook
#endif
3120 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3121 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3122 e18231a3 blueswir1
#endif
3123 e18231a3 blueswir1
}
3124 e18231a3 blueswir1
3125 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3126 e18231a3 blueswir1
{
3127 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3128 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3129 e18231a3 blueswir1
#endif
3130 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3131 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3132 e18231a3 blueswir1
#endif
3133 e18231a3 blueswir1
}
3134 e18231a3 blueswir1
3135 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3136 e18231a3 blueswir1
{
3137 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3138 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3139 e18231a3 blueswir1
#endif
3140 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3141 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3142 b4f0a316 blueswir1
#endif
3143 33417e70 bellard
}
3144 33417e70 bellard
3145 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3146 33417e70 bellard
    unassigned_mem_readb,
3147 e18231a3 blueswir1
    unassigned_mem_readw,
3148 e18231a3 blueswir1
    unassigned_mem_readl,
3149 33417e70 bellard
};
3150 33417e70 bellard
3151 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3152 33417e70 bellard
    unassigned_mem_writeb,
3153 e18231a3 blueswir1
    unassigned_mem_writew,
3154 e18231a3 blueswir1
    unassigned_mem_writel,
3155 33417e70 bellard
};
3156 33417e70 bellard
3157 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3158 0f459d16 pbrook
                                uint32_t val)
3159 9fa3e853 bellard
{
3160 3a7d929e bellard
    int dirty_flags;
3161 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3162 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3163 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3164 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3165 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3166 9fa3e853 bellard
#endif
3167 3a7d929e bellard
    }
3168 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3169 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3170 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3171 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3172 f23db169 bellard
       flushed */
3173 f23db169 bellard
    if (dirty_flags == 0xff)
3174 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3175 9fa3e853 bellard
}
3176 9fa3e853 bellard
3177 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3178 0f459d16 pbrook
                                uint32_t val)
3179 9fa3e853 bellard
{
3180 3a7d929e bellard
    int dirty_flags;
3181 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3182 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3183 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3184 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3185 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3186 9fa3e853 bellard
#endif
3187 3a7d929e bellard
    }
3188 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3189 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3190 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3191 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3192 f23db169 bellard
       flushed */
3193 f23db169 bellard
    if (dirty_flags == 0xff)
3194 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3195 9fa3e853 bellard
}
3196 9fa3e853 bellard
3197 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3198 0f459d16 pbrook
                                uint32_t val)
3199 9fa3e853 bellard
{
3200 3a7d929e bellard
    int dirty_flags;
3201 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3202 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3203 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3204 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3205 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3206 9fa3e853 bellard
#endif
3207 3a7d929e bellard
    }
3208 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3209 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3210 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3211 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3212 f23db169 bellard
       flushed */
3213 f23db169 bellard
    if (dirty_flags == 0xff)
3214 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3215 9fa3e853 bellard
}
3216 9fa3e853 bellard
3217 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3218 9fa3e853 bellard
    NULL, /* never used */
3219 9fa3e853 bellard
    NULL, /* never used */
3220 9fa3e853 bellard
    NULL, /* never used */
3221 9fa3e853 bellard
};
3222 9fa3e853 bellard
3223 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3224 1ccde1cb bellard
    notdirty_mem_writeb,
3225 1ccde1cb bellard
    notdirty_mem_writew,
3226 1ccde1cb bellard
    notdirty_mem_writel,
3227 1ccde1cb bellard
};
3228 1ccde1cb bellard
3229 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3230 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3231 0f459d16 pbrook
{
3232 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3233 06d55cc1 aliguori
    target_ulong pc, cs_base;
3234 06d55cc1 aliguori
    TranslationBlock *tb;
3235 0f459d16 pbrook
    target_ulong vaddr;
3236 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3237 06d55cc1 aliguori
    int cpu_flags;
3238 0f459d16 pbrook
3239 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3240 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3241 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3242 06d55cc1 aliguori
         * current instruction. */
3243 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3244 06d55cc1 aliguori
        return;
3245 06d55cc1 aliguori
    }
3246 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3247 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3248 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3249 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3250 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3251 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3252 6e140f28 aliguori
                env->watchpoint_hit = wp;
3253 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3254 6e140f28 aliguori
                if (!tb) {
3255 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3256 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3257 6e140f28 aliguori
                }
3258 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3259 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3260 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3261 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3262 6e140f28 aliguori
                } else {
3263 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3264 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3265 6e140f28 aliguori
                }
3266 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3267 06d55cc1 aliguori
            }
3268 6e140f28 aliguori
        } else {
3269 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3270 0f459d16 pbrook
        }
3271 0f459d16 pbrook
    }
3272 0f459d16 pbrook
}
3273 0f459d16 pbrook
3274 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3275 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3276 6658ffb8 pbrook
   phys routines.  */
3277 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3278 6658ffb8 pbrook
{
3279 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3280 6658ffb8 pbrook
    return ldub_phys(addr);
3281 6658ffb8 pbrook
}
3282 6658ffb8 pbrook
3283 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3284 6658ffb8 pbrook
{
3285 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3286 6658ffb8 pbrook
    return lduw_phys(addr);
3287 6658ffb8 pbrook
}
3288 6658ffb8 pbrook
3289 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3290 6658ffb8 pbrook
{
3291 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3292 6658ffb8 pbrook
    return ldl_phys(addr);
3293 6658ffb8 pbrook
}
3294 6658ffb8 pbrook
3295 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3296 6658ffb8 pbrook
                             uint32_t val)
3297 6658ffb8 pbrook
{
3298 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3299 6658ffb8 pbrook
    stb_phys(addr, val);
3300 6658ffb8 pbrook
}
3301 6658ffb8 pbrook
3302 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3303 6658ffb8 pbrook
                             uint32_t val)
3304 6658ffb8 pbrook
{
3305 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3306 6658ffb8 pbrook
    stw_phys(addr, val);
3307 6658ffb8 pbrook
}
3308 6658ffb8 pbrook
3309 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3310 6658ffb8 pbrook
                             uint32_t val)
3311 6658ffb8 pbrook
{
3312 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3313 6658ffb8 pbrook
    stl_phys(addr, val);
3314 6658ffb8 pbrook
}
3315 6658ffb8 pbrook
3316 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3317 6658ffb8 pbrook
    watch_mem_readb,
3318 6658ffb8 pbrook
    watch_mem_readw,
3319 6658ffb8 pbrook
    watch_mem_readl,
3320 6658ffb8 pbrook
};
3321 6658ffb8 pbrook
3322 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3323 6658ffb8 pbrook
    watch_mem_writeb,
3324 6658ffb8 pbrook
    watch_mem_writew,
3325 6658ffb8 pbrook
    watch_mem_writel,
3326 6658ffb8 pbrook
};
3327 6658ffb8 pbrook
3328 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3329 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3330 f6405247 Richard Henderson
                                        unsigned int len)
3331 db7b5426 blueswir1
{
3332 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3333 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3334 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3335 db7b5426 blueswir1
           mmio, len, addr, idx);
3336 db7b5426 blueswir1
#endif
3337 db7b5426 blueswir1
3338 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3339 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3340 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3341 db7b5426 blueswir1
}
3342 db7b5426 blueswir1
3343 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3344 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3345 db7b5426 blueswir1
{
3346 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3347 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3348 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3349 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3350 db7b5426 blueswir1
#endif
3351 f6405247 Richard Henderson
3352 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3353 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3354 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3355 db7b5426 blueswir1
}
3356 db7b5426 blueswir1
3357 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3358 db7b5426 blueswir1
{
3359 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3360 db7b5426 blueswir1
}
3361 db7b5426 blueswir1
3362 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3363 db7b5426 blueswir1
                            uint32_t value)
3364 db7b5426 blueswir1
{
3365 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3366 db7b5426 blueswir1
}
3367 db7b5426 blueswir1
3368 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3369 db7b5426 blueswir1
{
3370 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3371 db7b5426 blueswir1
}
3372 db7b5426 blueswir1
3373 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3374 db7b5426 blueswir1
                            uint32_t value)
3375 db7b5426 blueswir1
{
3376 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3377 db7b5426 blueswir1
}
3378 db7b5426 blueswir1
3379 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3380 db7b5426 blueswir1
{
3381 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3382 db7b5426 blueswir1
}
3383 db7b5426 blueswir1
3384 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3385 f6405247 Richard Henderson
                            uint32_t value)
3386 db7b5426 blueswir1
{
3387 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3388 db7b5426 blueswir1
}
3389 db7b5426 blueswir1
3390 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3391 db7b5426 blueswir1
    &subpage_readb,
3392 db7b5426 blueswir1
    &subpage_readw,
3393 db7b5426 blueswir1
    &subpage_readl,
3394 db7b5426 blueswir1
};
3395 db7b5426 blueswir1
3396 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3397 db7b5426 blueswir1
    &subpage_writeb,
3398 db7b5426 blueswir1
    &subpage_writew,
3399 db7b5426 blueswir1
    &subpage_writel,
3400 db7b5426 blueswir1
};
3401 db7b5426 blueswir1
3402 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3403 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3404 db7b5426 blueswir1
{
3405 db7b5426 blueswir1
    int idx, eidx;
3406 db7b5426 blueswir1
3407 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3408 db7b5426 blueswir1
        return -1;
3409 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3410 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3411 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3412 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3413 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3414 db7b5426 blueswir1
#endif
3415 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3416 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3417 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3418 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3419 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3420 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3421 db7b5426 blueswir1
    }
3422 db7b5426 blueswir1
3423 db7b5426 blueswir1
    return 0;
3424 db7b5426 blueswir1
}
3425 db7b5426 blueswir1
3426 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3427 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3428 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3429 db7b5426 blueswir1
{
3430 c227f099 Anthony Liguori
    subpage_t *mmio;
3431 db7b5426 blueswir1
    int subpage_memory;
3432 db7b5426 blueswir1
3433 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3434 1eec614b aliguori
3435 1eec614b aliguori
    mmio->base = base;
3436 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3437 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3438 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3439 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3440 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3441 db7b5426 blueswir1
#endif
3442 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3443 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3444 db7b5426 blueswir1
3445 db7b5426 blueswir1
    return mmio;
3446 db7b5426 blueswir1
}
3447 db7b5426 blueswir1
3448 88715657 aliguori
static int get_free_io_mem_idx(void)
3449 88715657 aliguori
{
3450 88715657 aliguori
    int i;
3451 88715657 aliguori
3452 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3453 88715657 aliguori
        if (!io_mem_used[i]) {
3454 88715657 aliguori
            io_mem_used[i] = 1;
3455 88715657 aliguori
            return i;
3456 88715657 aliguori
        }
3457 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3458 88715657 aliguori
    return -1;
3459 88715657 aliguori
}
3460 88715657 aliguori
3461 dd310534 Alexander Graf
/*
3462 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3463 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3464 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3465 dd310534 Alexander Graf
 *
3466 dd310534 Alexander Graf
 * CPU          Device           swap?
3467 dd310534 Alexander Graf
 *
3468 dd310534 Alexander Graf
 * little       little           no
3469 dd310534 Alexander Graf
 * little       big              yes
3470 dd310534 Alexander Graf
 * big          little           yes
3471 dd310534 Alexander Graf
 * big          big              no
3472 dd310534 Alexander Graf
 */
3473 dd310534 Alexander Graf
3474 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3475 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3476 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3477 dd310534 Alexander Graf
    void *opaque;
3478 dd310534 Alexander Graf
} SwapEndianContainer;
3479 dd310534 Alexander Graf
3480 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3481 dd310534 Alexander Graf
{
3482 dd310534 Alexander Graf
    uint32_t val;
3483 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3484 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3485 dd310534 Alexander Graf
    return val;
3486 dd310534 Alexander Graf
}
3487 dd310534 Alexander Graf
3488 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3489 dd310534 Alexander Graf
{
3490 dd310534 Alexander Graf
    uint32_t val;
3491 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3492 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3493 dd310534 Alexander Graf
    return val;
3494 dd310534 Alexander Graf
}
3495 dd310534 Alexander Graf
3496 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3497 dd310534 Alexander Graf
{
3498 dd310534 Alexander Graf
    uint32_t val;
3499 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3500 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3501 dd310534 Alexander Graf
    return val;
3502 dd310534 Alexander Graf
}
3503 dd310534 Alexander Graf
3504 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3505 dd310534 Alexander Graf
    swapendian_mem_readb,
3506 dd310534 Alexander Graf
    swapendian_mem_readw,
3507 dd310534 Alexander Graf
    swapendian_mem_readl
3508 dd310534 Alexander Graf
};
3509 dd310534 Alexander Graf
3510 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3511 dd310534 Alexander Graf
                                  uint32_t val)
3512 dd310534 Alexander Graf
{
3513 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3514 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3515 dd310534 Alexander Graf
}
3516 dd310534 Alexander Graf
3517 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3518 dd310534 Alexander Graf
                                  uint32_t val)
3519 dd310534 Alexander Graf
{
3520 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3521 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3522 dd310534 Alexander Graf
}
3523 dd310534 Alexander Graf
3524 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3525 dd310534 Alexander Graf
                                  uint32_t val)
3526 dd310534 Alexander Graf
{
3527 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3528 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3529 dd310534 Alexander Graf
}
3530 dd310534 Alexander Graf
3531 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3532 dd310534 Alexander Graf
    swapendian_mem_writeb,
3533 dd310534 Alexander Graf
    swapendian_mem_writew,
3534 dd310534 Alexander Graf
    swapendian_mem_writel
3535 dd310534 Alexander Graf
};
3536 dd310534 Alexander Graf
3537 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3538 dd310534 Alexander Graf
{
3539 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3540 dd310534 Alexander Graf
    int i;
3541 dd310534 Alexander Graf
3542 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3543 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3544 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3545 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3546 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3547 dd310534 Alexander Graf
3548 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3549 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3550 dd310534 Alexander Graf
    }
3551 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3552 dd310534 Alexander Graf
}
3553 dd310534 Alexander Graf
3554 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3555 dd310534 Alexander Graf
{
3556 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3557 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3558 dd310534 Alexander Graf
    }
3559 dd310534 Alexander Graf
}
3560 dd310534 Alexander Graf
3561 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3562 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3563 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3564 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3565 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3566 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3567 4254fab8 blueswir1
   returned if error. */
3568 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3569 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3570 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3571 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3572 33417e70 bellard
{
3573 3cab721d Richard Henderson
    int i;
3574 3cab721d Richard Henderson
3575 33417e70 bellard
    if (io_index <= 0) {
3576 88715657 aliguori
        io_index = get_free_io_mem_idx();
3577 88715657 aliguori
        if (io_index == -1)
3578 88715657 aliguori
            return io_index;
3579 33417e70 bellard
    } else {
3580 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3581 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3582 33417e70 bellard
            return -1;
3583 33417e70 bellard
    }
3584 b5ff1b31 bellard
3585 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3586 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3587 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3588 3cab721d Richard Henderson
    }
3589 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3590 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3591 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3592 3cab721d Richard Henderson
    }
3593 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3594 f6405247 Richard Henderson
3595 dd310534 Alexander Graf
    switch (endian) {
3596 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3597 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3598 dd310534 Alexander Graf
        swapendian_init(io_index);
3599 dd310534 Alexander Graf
#endif
3600 dd310534 Alexander Graf
        break;
3601 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3602 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3603 dd310534 Alexander Graf
        swapendian_init(io_index);
3604 dd310534 Alexander Graf
#endif
3605 dd310534 Alexander Graf
        break;
3606 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3607 dd310534 Alexander Graf
    default:
3608 dd310534 Alexander Graf
        break;
3609 dd310534 Alexander Graf
    }
3610 dd310534 Alexander Graf
3611 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3612 33417e70 bellard
}
3613 61382a50 bellard
3614 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3615 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3616 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3617 1eed09cb Avi Kivity
{
3618 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3619 1eed09cb Avi Kivity
}
3620 1eed09cb Avi Kivity
3621 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3622 88715657 aliguori
{
3623 88715657 aliguori
    int i;
3624 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3625 88715657 aliguori
3626 dd310534 Alexander Graf
    swapendian_del(io_index);
3627 dd310534 Alexander Graf
3628 88715657 aliguori
    for (i=0;i < 3; i++) {
3629 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3630 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3631 88715657 aliguori
    }
3632 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3633 88715657 aliguori
    io_mem_used[io_index] = 0;
3634 88715657 aliguori
}
3635 88715657 aliguori
3636 e9179ce1 Avi Kivity
static void io_mem_init(void)
3637 e9179ce1 Avi Kivity
{
3638 e9179ce1 Avi Kivity
    int i;
3639 e9179ce1 Avi Kivity
3640 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3641 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3642 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3643 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3644 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3645 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3646 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3647 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3648 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3649 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3650 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3651 e9179ce1 Avi Kivity
3652 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3653 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3654 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3655 e9179ce1 Avi Kivity
}
3656 e9179ce1 Avi Kivity
3657 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3658 e2eef170 pbrook
3659 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3660 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3661 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3662 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3663 13eb76e0 bellard
{
3664 13eb76e0 bellard
    int l, flags;
3665 13eb76e0 bellard
    target_ulong page;
3666 53a5960a pbrook
    void * p;
3667 13eb76e0 bellard
3668 13eb76e0 bellard
    while (len > 0) {
3669 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3670 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3671 13eb76e0 bellard
        if (l > len)
3672 13eb76e0 bellard
            l = len;
3673 13eb76e0 bellard
        flags = page_get_flags(page);
3674 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3675 a68fe89c Paul Brook
            return -1;
3676 13eb76e0 bellard
        if (is_write) {
3677 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3678 a68fe89c Paul Brook
                return -1;
3679 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3680 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3681 a68fe89c Paul Brook
                return -1;
3682 72fb7daa aurel32
            memcpy(p, buf, l);
3683 72fb7daa aurel32
            unlock_user(p, addr, l);
3684 13eb76e0 bellard
        } else {
3685 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3686 a68fe89c Paul Brook
                return -1;
3687 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3688 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3689 a68fe89c Paul Brook
                return -1;
3690 72fb7daa aurel32
            memcpy(buf, p, l);
3691 5b257578 aurel32
            unlock_user(p, addr, 0);
3692 13eb76e0 bellard
        }
3693 13eb76e0 bellard
        len -= l;
3694 13eb76e0 bellard
        buf += l;
3695 13eb76e0 bellard
        addr += l;
3696 13eb76e0 bellard
    }
3697 a68fe89c Paul Brook
    return 0;
3698 13eb76e0 bellard
}
3699 8df1cd07 bellard
3700 13eb76e0 bellard
#else
3701 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3702 13eb76e0 bellard
                            int len, int is_write)
3703 13eb76e0 bellard
{
3704 13eb76e0 bellard
    int l, io_index;
3705 13eb76e0 bellard
    uint8_t *ptr;
3706 13eb76e0 bellard
    uint32_t val;
3707 c227f099 Anthony Liguori
    target_phys_addr_t page;
3708 2e12669a bellard
    unsigned long pd;
3709 92e873b9 bellard
    PhysPageDesc *p;
3710 3b46e624 ths
3711 13eb76e0 bellard
    while (len > 0) {
3712 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3713 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3714 13eb76e0 bellard
        if (l > len)
3715 13eb76e0 bellard
            l = len;
3716 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3717 13eb76e0 bellard
        if (!p) {
3718 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3719 13eb76e0 bellard
        } else {
3720 13eb76e0 bellard
            pd = p->phys_offset;
3721 13eb76e0 bellard
        }
3722 3b46e624 ths
3723 13eb76e0 bellard
        if (is_write) {
3724 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3725 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3726 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3727 8da3ff18 pbrook
                if (p)
3728 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3729 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3730 6a00d601 bellard
                   potential bugs */
3731 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3732 1c213d19 bellard
                    /* 32 bit write access */
3733 c27004ec bellard
                    val = ldl_p(buf);
3734 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3735 13eb76e0 bellard
                    l = 4;
3736 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3737 1c213d19 bellard
                    /* 16 bit write access */
3738 c27004ec bellard
                    val = lduw_p(buf);
3739 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3740 13eb76e0 bellard
                    l = 2;
3741 13eb76e0 bellard
                } else {
3742 1c213d19 bellard
                    /* 8 bit write access */
3743 c27004ec bellard
                    val = ldub_p(buf);
3744 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3745 13eb76e0 bellard
                    l = 1;
3746 13eb76e0 bellard
                }
3747 13eb76e0 bellard
            } else {
3748 b448f2f3 bellard
                unsigned long addr1;
3749 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3750 13eb76e0 bellard
                /* RAM case */
3751 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3752 13eb76e0 bellard
                memcpy(ptr, buf, l);
3753 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3754 3a7d929e bellard
                    /* invalidate code */
3755 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3756 3a7d929e bellard
                    /* set dirty bit */
3757 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3758 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3759 3a7d929e bellard
                }
3760 13eb76e0 bellard
            }
3761 13eb76e0 bellard
        } else {
3762 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3763 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3764 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3765 13eb76e0 bellard
                /* I/O case */
3766 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3767 8da3ff18 pbrook
                if (p)
3768 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3769 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3770 13eb76e0 bellard
                    /* 32 bit read access */
3771 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3772 c27004ec bellard
                    stl_p(buf, val);
3773 13eb76e0 bellard
                    l = 4;
3774 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3775 13eb76e0 bellard
                    /* 16 bit read access */
3776 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3777 c27004ec bellard
                    stw_p(buf, val);
3778 13eb76e0 bellard
                    l = 2;
3779 13eb76e0 bellard
                } else {
3780 1c213d19 bellard
                    /* 8 bit read access */
3781 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3782 c27004ec bellard
                    stb_p(buf, val);
3783 13eb76e0 bellard
                    l = 1;
3784 13eb76e0 bellard
                }
3785 13eb76e0 bellard
            } else {
3786 13eb76e0 bellard
                /* RAM case */
3787 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3788 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3789 13eb76e0 bellard
                memcpy(buf, ptr, l);
3790 13eb76e0 bellard
            }
3791 13eb76e0 bellard
        }
3792 13eb76e0 bellard
        len -= l;
3793 13eb76e0 bellard
        buf += l;
3794 13eb76e0 bellard
        addr += l;
3795 13eb76e0 bellard
    }
3796 13eb76e0 bellard
}
3797 8df1cd07 bellard
3798 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3799 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3800 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3801 d0ecd2aa bellard
{
3802 d0ecd2aa bellard
    int l;
3803 d0ecd2aa bellard
    uint8_t *ptr;
3804 c227f099 Anthony Liguori
    target_phys_addr_t page;
3805 d0ecd2aa bellard
    unsigned long pd;
3806 d0ecd2aa bellard
    PhysPageDesc *p;
3807 3b46e624 ths
3808 d0ecd2aa bellard
    while (len > 0) {
3809 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3810 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3811 d0ecd2aa bellard
        if (l > len)
3812 d0ecd2aa bellard
            l = len;
3813 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3814 d0ecd2aa bellard
        if (!p) {
3815 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3816 d0ecd2aa bellard
        } else {
3817 d0ecd2aa bellard
            pd = p->phys_offset;
3818 d0ecd2aa bellard
        }
3819 3b46e624 ths
3820 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3821 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3822 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3823 d0ecd2aa bellard
            /* do nothing */
3824 d0ecd2aa bellard
        } else {
3825 d0ecd2aa bellard
            unsigned long addr1;
3826 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3827 d0ecd2aa bellard
            /* ROM/RAM case */
3828 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3829 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3830 d0ecd2aa bellard
        }
3831 d0ecd2aa bellard
        len -= l;
3832 d0ecd2aa bellard
        buf += l;
3833 d0ecd2aa bellard
        addr += l;
3834 d0ecd2aa bellard
    }
3835 d0ecd2aa bellard
}
3836 d0ecd2aa bellard
3837 6d16c2f8 aliguori
typedef struct {
3838 6d16c2f8 aliguori
    void *buffer;
3839 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3840 c227f099 Anthony Liguori
    target_phys_addr_t len;
3841 6d16c2f8 aliguori
} BounceBuffer;
3842 6d16c2f8 aliguori
3843 6d16c2f8 aliguori
static BounceBuffer bounce;
3844 6d16c2f8 aliguori
3845 ba223c29 aliguori
typedef struct MapClient {
3846 ba223c29 aliguori
    void *opaque;
3847 ba223c29 aliguori
    void (*callback)(void *opaque);
3848 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3849 ba223c29 aliguori
} MapClient;
3850 ba223c29 aliguori
3851 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3852 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3853 ba223c29 aliguori
3854 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3855 ba223c29 aliguori
{
3856 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3857 ba223c29 aliguori
3858 ba223c29 aliguori
    client->opaque = opaque;
3859 ba223c29 aliguori
    client->callback = callback;
3860 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3861 ba223c29 aliguori
    return client;
3862 ba223c29 aliguori
}
3863 ba223c29 aliguori
3864 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3865 ba223c29 aliguori
{
3866 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3867 ba223c29 aliguori
3868 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3869 34d5e948 Isaku Yamahata
    qemu_free(client);
3870 ba223c29 aliguori
}
3871 ba223c29 aliguori
3872 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3873 ba223c29 aliguori
{
3874 ba223c29 aliguori
    MapClient *client;
3875 ba223c29 aliguori
3876 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3877 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3878 ba223c29 aliguori
        client->callback(client->opaque);
3879 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3880 ba223c29 aliguori
    }
3881 ba223c29 aliguori
}
3882 ba223c29 aliguori
3883 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3884 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3885 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3886 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3887 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3888 ba223c29 aliguori
 * likely to succeed.
3889 6d16c2f8 aliguori
 */
3890 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3891 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3892 6d16c2f8 aliguori
                              int is_write)
3893 6d16c2f8 aliguori
{
3894 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3895 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3896 6d16c2f8 aliguori
    int l;
3897 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3898 6d16c2f8 aliguori
    uint8_t *ptr;
3899 c227f099 Anthony Liguori
    target_phys_addr_t page;
3900 6d16c2f8 aliguori
    unsigned long pd;
3901 6d16c2f8 aliguori
    PhysPageDesc *p;
3902 6d16c2f8 aliguori
    unsigned long addr1;
3903 6d16c2f8 aliguori
3904 6d16c2f8 aliguori
    while (len > 0) {
3905 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3906 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3907 6d16c2f8 aliguori
        if (l > len)
3908 6d16c2f8 aliguori
            l = len;
3909 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3910 6d16c2f8 aliguori
        if (!p) {
3911 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3912 6d16c2f8 aliguori
        } else {
3913 6d16c2f8 aliguori
            pd = p->phys_offset;
3914 6d16c2f8 aliguori
        }
3915 6d16c2f8 aliguori
3916 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3917 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3918 6d16c2f8 aliguori
                break;
3919 6d16c2f8 aliguori
            }
3920 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3921 6d16c2f8 aliguori
            bounce.addr = addr;
3922 6d16c2f8 aliguori
            bounce.len = l;
3923 6d16c2f8 aliguori
            if (!is_write) {
3924 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3925 6d16c2f8 aliguori
            }
3926 6d16c2f8 aliguori
            ptr = bounce.buffer;
3927 6d16c2f8 aliguori
        } else {
3928 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3929 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3930 6d16c2f8 aliguori
        }
3931 6d16c2f8 aliguori
        if (!done) {
3932 6d16c2f8 aliguori
            ret = ptr;
3933 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3934 6d16c2f8 aliguori
            break;
3935 6d16c2f8 aliguori
        }
3936 6d16c2f8 aliguori
3937 6d16c2f8 aliguori
        len -= l;
3938 6d16c2f8 aliguori
        addr += l;
3939 6d16c2f8 aliguori
        done += l;
3940 6d16c2f8 aliguori
    }
3941 6d16c2f8 aliguori
    *plen = done;
3942 6d16c2f8 aliguori
    return ret;
3943 6d16c2f8 aliguori
}
3944 6d16c2f8 aliguori
3945 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3946 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3947 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3948 6d16c2f8 aliguori
 */
3949 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3950 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3951 6d16c2f8 aliguori
{
3952 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3953 6d16c2f8 aliguori
        if (is_write) {
3954 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3955 6d16c2f8 aliguori
            while (access_len) {
3956 6d16c2f8 aliguori
                unsigned l;
3957 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3958 6d16c2f8 aliguori
                if (l > access_len)
3959 6d16c2f8 aliguori
                    l = access_len;
3960 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3961 6d16c2f8 aliguori
                    /* invalidate code */
3962 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3963 6d16c2f8 aliguori
                    /* set dirty bit */
3964 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3965 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3966 6d16c2f8 aliguori
                }
3967 6d16c2f8 aliguori
                addr1 += l;
3968 6d16c2f8 aliguori
                access_len -= l;
3969 6d16c2f8 aliguori
            }
3970 6d16c2f8 aliguori
        }
3971 6d16c2f8 aliguori
        return;
3972 6d16c2f8 aliguori
    }
3973 6d16c2f8 aliguori
    if (is_write) {
3974 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3975 6d16c2f8 aliguori
    }
3976 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3977 6d16c2f8 aliguori
    bounce.buffer = NULL;
3978 ba223c29 aliguori
    cpu_notify_map_clients();
3979 6d16c2f8 aliguori
}
3980 d0ecd2aa bellard
3981 8df1cd07 bellard
/* warning: addr must be aligned */
3982 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3983 8df1cd07 bellard
{
3984 8df1cd07 bellard
    int io_index;
3985 8df1cd07 bellard
    uint8_t *ptr;
3986 8df1cd07 bellard
    uint32_t val;
3987 8df1cd07 bellard
    unsigned long pd;
3988 8df1cd07 bellard
    PhysPageDesc *p;
3989 8df1cd07 bellard
3990 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3991 8df1cd07 bellard
    if (!p) {
3992 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3993 8df1cd07 bellard
    } else {
3994 8df1cd07 bellard
        pd = p->phys_offset;
3995 8df1cd07 bellard
    }
3996 3b46e624 ths
3997 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3998 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3999 8df1cd07 bellard
        /* I/O case */
4000 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4001 8da3ff18 pbrook
        if (p)
4002 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4003 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4004 8df1cd07 bellard
    } else {
4005 8df1cd07 bellard
        /* RAM case */
4006 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4007 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4008 8df1cd07 bellard
        val = ldl_p(ptr);
4009 8df1cd07 bellard
    }
4010 8df1cd07 bellard
    return val;
4011 8df1cd07 bellard
}
4012 8df1cd07 bellard
4013 84b7b8e7 bellard
/* warning: addr must be aligned */
4014 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
4015 84b7b8e7 bellard
{
4016 84b7b8e7 bellard
    int io_index;
4017 84b7b8e7 bellard
    uint8_t *ptr;
4018 84b7b8e7 bellard
    uint64_t val;
4019 84b7b8e7 bellard
    unsigned long pd;
4020 84b7b8e7 bellard
    PhysPageDesc *p;
4021 84b7b8e7 bellard
4022 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4023 84b7b8e7 bellard
    if (!p) {
4024 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4025 84b7b8e7 bellard
    } else {
4026 84b7b8e7 bellard
        pd = p->phys_offset;
4027 84b7b8e7 bellard
    }
4028 3b46e624 ths
4029 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4030 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4031 84b7b8e7 bellard
        /* I/O case */
4032 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4033 8da3ff18 pbrook
        if (p)
4034 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4035 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4036 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4037 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4038 84b7b8e7 bellard
#else
4039 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4040 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4041 84b7b8e7 bellard
#endif
4042 84b7b8e7 bellard
    } else {
4043 84b7b8e7 bellard
        /* RAM case */
4044 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4045 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4046 84b7b8e7 bellard
        val = ldq_p(ptr);
4047 84b7b8e7 bellard
    }
4048 84b7b8e7 bellard
    return val;
4049 84b7b8e7 bellard
}
4050 84b7b8e7 bellard
4051 aab33094 bellard
/* XXX: optimize */
4052 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4053 aab33094 bellard
{
4054 aab33094 bellard
    uint8_t val;
4055 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4056 aab33094 bellard
    return val;
4057 aab33094 bellard
}
4058 aab33094 bellard
4059 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4060 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
4061 aab33094 bellard
{
4062 733f0b02 Michael S. Tsirkin
    int io_index;
4063 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4064 733f0b02 Michael S. Tsirkin
    uint64_t val;
4065 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4066 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4067 733f0b02 Michael S. Tsirkin
4068 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4069 733f0b02 Michael S. Tsirkin
    if (!p) {
4070 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4071 733f0b02 Michael S. Tsirkin
    } else {
4072 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4073 733f0b02 Michael S. Tsirkin
    }
4074 733f0b02 Michael S. Tsirkin
4075 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4076 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4077 733f0b02 Michael S. Tsirkin
        /* I/O case */
4078 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4079 733f0b02 Michael S. Tsirkin
        if (p)
4080 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4081 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4082 733f0b02 Michael S. Tsirkin
    } else {
4083 733f0b02 Michael S. Tsirkin
        /* RAM case */
4084 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4085 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4086 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
4087 733f0b02 Michael S. Tsirkin
    }
4088 733f0b02 Michael S. Tsirkin
    return val;
4089 aab33094 bellard
}
4090 aab33094 bellard
4091 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4092 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4093 8df1cd07 bellard
   bits are used to track modified PTEs */
4094 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4095 8df1cd07 bellard
{
4096 8df1cd07 bellard
    int io_index;
4097 8df1cd07 bellard
    uint8_t *ptr;
4098 8df1cd07 bellard
    unsigned long pd;
4099 8df1cd07 bellard
    PhysPageDesc *p;
4100 8df1cd07 bellard
4101 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4102 8df1cd07 bellard
    if (!p) {
4103 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4104 8df1cd07 bellard
    } else {
4105 8df1cd07 bellard
        pd = p->phys_offset;
4106 8df1cd07 bellard
    }
4107 3b46e624 ths
4108 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4109 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4110 8da3ff18 pbrook
        if (p)
4111 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4112 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4113 8df1cd07 bellard
    } else {
4114 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4115 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4116 8df1cd07 bellard
        stl_p(ptr, val);
4117 74576198 aliguori
4118 74576198 aliguori
        if (unlikely(in_migration)) {
4119 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4120 74576198 aliguori
                /* invalidate code */
4121 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4122 74576198 aliguori
                /* set dirty bit */
4123 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4124 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4125 74576198 aliguori
            }
4126 74576198 aliguori
        }
4127 8df1cd07 bellard
    }
4128 8df1cd07 bellard
}
4129 8df1cd07 bellard
4130 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4131 bc98a7ef j_mayer
{
4132 bc98a7ef j_mayer
    int io_index;
4133 bc98a7ef j_mayer
    uint8_t *ptr;
4134 bc98a7ef j_mayer
    unsigned long pd;
4135 bc98a7ef j_mayer
    PhysPageDesc *p;
4136 bc98a7ef j_mayer
4137 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4138 bc98a7ef j_mayer
    if (!p) {
4139 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4140 bc98a7ef j_mayer
    } else {
4141 bc98a7ef j_mayer
        pd = p->phys_offset;
4142 bc98a7ef j_mayer
    }
4143 3b46e624 ths
4144 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4145 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4146 8da3ff18 pbrook
        if (p)
4147 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4148 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4149 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4150 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4151 bc98a7ef j_mayer
#else
4152 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4153 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4154 bc98a7ef j_mayer
#endif
4155 bc98a7ef j_mayer
    } else {
4156 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4157 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4158 bc98a7ef j_mayer
        stq_p(ptr, val);
4159 bc98a7ef j_mayer
    }
4160 bc98a7ef j_mayer
}
4161 bc98a7ef j_mayer
4162 8df1cd07 bellard
/* warning: addr must be aligned */
4163 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4164 8df1cd07 bellard
{
4165 8df1cd07 bellard
    int io_index;
4166 8df1cd07 bellard
    uint8_t *ptr;
4167 8df1cd07 bellard
    unsigned long pd;
4168 8df1cd07 bellard
    PhysPageDesc *p;
4169 8df1cd07 bellard
4170 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4171 8df1cd07 bellard
    if (!p) {
4172 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4173 8df1cd07 bellard
    } else {
4174 8df1cd07 bellard
        pd = p->phys_offset;
4175 8df1cd07 bellard
    }
4176 3b46e624 ths
4177 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4178 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4179 8da3ff18 pbrook
        if (p)
4180 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4181 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4182 8df1cd07 bellard
    } else {
4183 8df1cd07 bellard
        unsigned long addr1;
4184 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4185 8df1cd07 bellard
        /* RAM case */
4186 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4187 8df1cd07 bellard
        stl_p(ptr, val);
4188 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4189 3a7d929e bellard
            /* invalidate code */
4190 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4191 3a7d929e bellard
            /* set dirty bit */
4192 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4193 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4194 3a7d929e bellard
        }
4195 8df1cd07 bellard
    }
4196 8df1cd07 bellard
}
4197 8df1cd07 bellard
4198 aab33094 bellard
/* XXX: optimize */
4199 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4200 aab33094 bellard
{
4201 aab33094 bellard
    uint8_t v = val;
4202 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4203 aab33094 bellard
}
4204 aab33094 bellard
4205 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4206 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4207 aab33094 bellard
{
4208 733f0b02 Michael S. Tsirkin
    int io_index;
4209 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4210 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4211 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4212 733f0b02 Michael S. Tsirkin
4213 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4214 733f0b02 Michael S. Tsirkin
    if (!p) {
4215 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4216 733f0b02 Michael S. Tsirkin
    } else {
4217 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4218 733f0b02 Michael S. Tsirkin
    }
4219 733f0b02 Michael S. Tsirkin
4220 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4221 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4222 733f0b02 Michael S. Tsirkin
        if (p)
4223 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4224 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4225 733f0b02 Michael S. Tsirkin
    } else {
4226 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4227 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4228 733f0b02 Michael S. Tsirkin
        /* RAM case */
4229 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4230 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4231 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4232 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4233 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4234 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4235 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4236 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4237 733f0b02 Michael S. Tsirkin
        }
4238 733f0b02 Michael S. Tsirkin
    }
4239 aab33094 bellard
}
4240 aab33094 bellard
4241 aab33094 bellard
/* XXX: optimize */
4242 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4243 aab33094 bellard
{
4244 aab33094 bellard
    val = tswap64(val);
4245 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4246 aab33094 bellard
}
4247 aab33094 bellard
4248 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4249 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4250 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4251 13eb76e0 bellard
{
4252 13eb76e0 bellard
    int l;
4253 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4254 9b3c35e0 j_mayer
    target_ulong page;
4255 13eb76e0 bellard
4256 13eb76e0 bellard
    while (len > 0) {
4257 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4258 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4259 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4260 13eb76e0 bellard
        if (phys_addr == -1)
4261 13eb76e0 bellard
            return -1;
4262 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4263 13eb76e0 bellard
        if (l > len)
4264 13eb76e0 bellard
            l = len;
4265 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4266 5e2972fd aliguori
        if (is_write)
4267 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4268 5e2972fd aliguori
        else
4269 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4270 13eb76e0 bellard
        len -= l;
4271 13eb76e0 bellard
        buf += l;
4272 13eb76e0 bellard
        addr += l;
4273 13eb76e0 bellard
    }
4274 13eb76e0 bellard
    return 0;
4275 13eb76e0 bellard
}
4276 a68fe89c Paul Brook
#endif
4277 13eb76e0 bellard
4278 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4279 2e70f6ef pbrook
   must be at the end of the TB */
4280 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4281 2e70f6ef pbrook
{
4282 2e70f6ef pbrook
    TranslationBlock *tb;
4283 2e70f6ef pbrook
    uint32_t n, cflags;
4284 2e70f6ef pbrook
    target_ulong pc, cs_base;
4285 2e70f6ef pbrook
    uint64_t flags;
4286 2e70f6ef pbrook
4287 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4288 2e70f6ef pbrook
    if (!tb) {
4289 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4290 2e70f6ef pbrook
                  retaddr);
4291 2e70f6ef pbrook
    }
4292 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4293 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4294 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4295 bf20dc07 ths
       occurred.  */
4296 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4297 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4298 2e70f6ef pbrook
    n++;
4299 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4300 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4301 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4302 2e70f6ef pbrook
       branch.  */
4303 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4304 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4305 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4306 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4307 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4308 2e70f6ef pbrook
    }
4309 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4310 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4311 2e70f6ef pbrook
            && n > 1) {
4312 2e70f6ef pbrook
        env->pc -= 2;
4313 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4314 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4315 2e70f6ef pbrook
    }
4316 2e70f6ef pbrook
#endif
4317 2e70f6ef pbrook
    /* This should never happen.  */
4318 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4319 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4320 2e70f6ef pbrook
4321 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4322 2e70f6ef pbrook
    pc = tb->pc;
4323 2e70f6ef pbrook
    cs_base = tb->cs_base;
4324 2e70f6ef pbrook
    flags = tb->flags;
4325 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4326 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4327 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4328 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4329 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4330 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4331 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4332 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4333 2e70f6ef pbrook
       second new TB.  */
4334 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4335 2e70f6ef pbrook
}
4336 2e70f6ef pbrook
4337 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4338 b3755a91 Paul Brook
4339 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4340 e3db7226 bellard
{
4341 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4342 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4343 e3db7226 bellard
    TranslationBlock *tb;
4344 3b46e624 ths
4345 e3db7226 bellard
    target_code_size = 0;
4346 e3db7226 bellard
    max_target_code_size = 0;
4347 e3db7226 bellard
    cross_page = 0;
4348 e3db7226 bellard
    direct_jmp_count = 0;
4349 e3db7226 bellard
    direct_jmp2_count = 0;
4350 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4351 e3db7226 bellard
        tb = &tbs[i];
4352 e3db7226 bellard
        target_code_size += tb->size;
4353 e3db7226 bellard
        if (tb->size > max_target_code_size)
4354 e3db7226 bellard
            max_target_code_size = tb->size;
4355 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4356 e3db7226 bellard
            cross_page++;
4357 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4358 e3db7226 bellard
            direct_jmp_count++;
4359 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4360 e3db7226 bellard
                direct_jmp2_count++;
4361 e3db7226 bellard
            }
4362 e3db7226 bellard
        }
4363 e3db7226 bellard
    }
4364 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4365 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4366 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4367 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4368 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4369 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4370 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4371 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4372 e3db7226 bellard
                max_target_code_size);
4373 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4374 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4375 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4376 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4377 5fafdf24 ths
            cross_page,
4378 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4379 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4380 5fafdf24 ths
                direct_jmp_count,
4381 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4382 e3db7226 bellard
                direct_jmp2_count,
4383 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4384 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4385 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4386 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4387 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4388 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4389 e3db7226 bellard
}
4390 e3db7226 bellard
4391 61382a50 bellard
#define MMUSUFFIX _cmmu
4392 61382a50 bellard
#define GETPC() NULL
4393 61382a50 bellard
#define env cpu_single_env
4394 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4395 61382a50 bellard
4396 61382a50 bellard
#define SHIFT 0
4397 61382a50 bellard
#include "softmmu_template.h"
4398 61382a50 bellard
4399 61382a50 bellard
#define SHIFT 1
4400 61382a50 bellard
#include "softmmu_template.h"
4401 61382a50 bellard
4402 61382a50 bellard
#define SHIFT 2
4403 61382a50 bellard
#include "softmmu_template.h"
4404 61382a50 bellard
4405 61382a50 bellard
#define SHIFT 3
4406 61382a50 bellard
#include "softmmu_template.h"
4407 61382a50 bellard
4408 61382a50 bellard
#undef env
4409 61382a50 bellard
4410 61382a50 bellard
#endif