Statistics
| Branch: | Revision:

root / exec.c @ 5ee8ad71

History | View | Annotate | Download (129.5 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
37 53a5960a pbrook
#include <qemu.h>
38 fd052bf6 Riku Voipio
#include <signal.h>
39 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 f01576f1 Juergen Lock
#include <sys/param.h>
41 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
42 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
43 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44 f01576f1 Juergen Lock
#include <sys/time.h>
45 f01576f1 Juergen Lock
#include <sys/proc.h>
46 f01576f1 Juergen Lock
#include <machine/profile.h>
47 f01576f1 Juergen Lock
#define _KERNEL
48 f01576f1 Juergen Lock
#include <sys/user.h>
49 f01576f1 Juergen Lock
#undef _KERNEL
50 f01576f1 Juergen Lock
#undef sigqueue
51 f01576f1 Juergen Lock
#include <libutil.h>
52 f01576f1 Juergen Lock
#endif
53 f01576f1 Juergen Lock
#endif
54 53a5960a pbrook
#endif
55 54936004 bellard
56 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
57 66e85a21 bellard
//#define DEBUG_FLUSH
58 9fa3e853 bellard
//#define DEBUG_TLB
59 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
60 fd6ce8f6 bellard
61 fd6ce8f6 bellard
/* make various TB consistency checks */
62 5fafdf24 ths
//#define DEBUG_TB_CHECK
63 5fafdf24 ths
//#define DEBUG_TLB_CHECK
64 fd6ce8f6 bellard
65 1196be37 ths
//#define DEBUG_IOPORT
66 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
67 1196be37 ths
68 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
69 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
70 99773bd4 pbrook
#undef DEBUG_TB_CHECK
71 99773bd4 pbrook
#endif
72 99773bd4 pbrook
73 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
74 9fa3e853 bellard
75 bdaf78e0 blueswir1
static TranslationBlock *tbs;
76 24ab68ac Stefan Weil
static int code_gen_max_blocks;
77 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 bdaf78e0 blueswir1
static int nb_tbs;
79 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
80 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81 fd6ce8f6 bellard
82 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
83 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
85 d03d860b blueswir1
 section close to code segment. */
86 d03d860b blueswir1
#define code_gen_section                                \
87 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
88 d03d860b blueswir1
    __attribute__((aligned (32)))
89 f8e2af11 Stefan Weil
#elif defined(_WIN32)
90 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
91 f8e2af11 Stefan Weil
#define code_gen_section                                \
92 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
93 d03d860b blueswir1
#else
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 d03d860b blueswir1
#endif
97 d03d860b blueswir1
98 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
99 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
100 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
101 26a5f13b bellard
/* threshold to flush the translated code buffer */
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
103 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
104 fd6ce8f6 bellard
105 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
106 9fa3e853 bellard
int phys_ram_fd;
107 74576198 aliguori
static int in_migration;
108 94a6b54f pbrook
109 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110 e2eef170 pbrook
#endif
111 9fa3e853 bellard
112 6a00d601 bellard
CPUState *first_cpu;
113 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
114 6a00d601 bellard
   cpu_exec() */
115 5fafdf24 ths
CPUState *cpu_single_env;
116 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
117 bf20dc07 ths
   1 = Precise instruction counting.
118 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
119 2e70f6ef pbrook
int use_icount = 0;
120 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
121 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
122 2e70f6ef pbrook
int64_t qemu_icount;
123 6a00d601 bellard
124 54936004 bellard
typedef struct PageDesc {
125 92e873b9 bellard
    /* list of TBs intersecting this ram page */
126 fd6ce8f6 bellard
    TranslationBlock *first_tb;
127 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
128 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
129 9fa3e853 bellard
    unsigned int code_write_count;
130 9fa3e853 bellard
    uint8_t *code_bitmap;
131 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
132 9fa3e853 bellard
    unsigned long flags;
133 9fa3e853 bellard
#endif
134 54936004 bellard
} PageDesc;
135 54936004 bellard
136 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
137 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
138 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
139 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141 41c1b1c9 Paul Brook
#else
142 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143 41c1b1c9 Paul Brook
#endif
144 bedb69ea j_mayer
#else
145 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146 bedb69ea j_mayer
#endif
147 54936004 bellard
148 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
149 5cd2c5b6 Richard Henderson
#define L2_BITS 10
150 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
151 54936004 bellard
152 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
153 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
154 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
158 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
159 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161 5cd2c5b6 Richard Henderson
#else
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
163 5cd2c5b6 Richard Henderson
#endif
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167 5cd2c5b6 Richard Henderson
#else
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
169 5cd2c5b6 Richard Henderson
#endif
170 5cd2c5b6 Richard Henderson
171 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173 5cd2c5b6 Richard Henderson
174 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 83fb7adf bellard
unsigned long qemu_real_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_bits;
179 83fb7adf bellard
unsigned long qemu_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_mask;
181 54936004 bellard
182 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
183 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
184 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
185 54936004 bellard
186 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
187 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
188 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
189 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
190 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
191 41c1b1c9 Paul Brook
} PhysPageDesc;
192 41c1b1c9 Paul Brook
193 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
194 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
195 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
196 6d9a1304 Paul Brook
197 e2eef170 pbrook
static void io_mem_init(void);
198 e2eef170 pbrook
199 33417e70 bellard
/* io memory support */
200 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
204 6658ffb8 pbrook
static int io_mem_watch;
205 6658ffb8 pbrook
#endif
206 33417e70 bellard
207 34865134 bellard
/* log support */
208 1e8b27ca Juha Riihimรคki
#ifdef WIN32
209 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
210 1e8b27ca Juha Riihimรคki
#else
211 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
212 1e8b27ca Juha Riihimรคki
#endif
213 34865134 bellard
FILE *logfile;
214 34865134 bellard
int loglevel;
215 e735b91c pbrook
static int log_append = 0;
216 34865134 bellard
217 e3db7226 bellard
/* statistics */
218 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
219 e3db7226 bellard
static int tlb_flush_count;
220 b3755a91 Paul Brook
#endif
221 e3db7226 bellard
static int tb_flush_count;
222 e3db7226 bellard
static int tb_phys_invalidate_count;
223 e3db7226 bellard
224 7cb69cae bellard
#ifdef _WIN32
225 7cb69cae bellard
static void map_exec(void *addr, long size)
226 7cb69cae bellard
{
227 7cb69cae bellard
    DWORD old_protect;
228 7cb69cae bellard
    VirtualProtect(addr, size,
229 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
230 7cb69cae bellard
    
231 7cb69cae bellard
}
232 7cb69cae bellard
#else
233 7cb69cae bellard
static void map_exec(void *addr, long size)
234 7cb69cae bellard
{
235 4369415f bellard
    unsigned long start, end, page_size;
236 7cb69cae bellard
    
237 4369415f bellard
    page_size = getpagesize();
238 7cb69cae bellard
    start = (unsigned long)addr;
239 4369415f bellard
    start &= ~(page_size - 1);
240 7cb69cae bellard
    
241 7cb69cae bellard
    end = (unsigned long)addr + size;
242 4369415f bellard
    end += page_size - 1;
243 4369415f bellard
    end &= ~(page_size - 1);
244 7cb69cae bellard
    
245 7cb69cae bellard
    mprotect((void *)start, end - start,
246 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
247 7cb69cae bellard
}
248 7cb69cae bellard
#endif
249 7cb69cae bellard
250 b346ff46 bellard
static void page_init(void)
251 54936004 bellard
{
252 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
253 54936004 bellard
       TARGET_PAGE_SIZE */
254 c2b48b69 aliguori
#ifdef _WIN32
255 c2b48b69 aliguori
    {
256 c2b48b69 aliguori
        SYSTEM_INFO system_info;
257 c2b48b69 aliguori
258 c2b48b69 aliguori
        GetSystemInfo(&system_info);
259 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
260 c2b48b69 aliguori
    }
261 c2b48b69 aliguori
#else
262 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
263 c2b48b69 aliguori
#endif
264 83fb7adf bellard
    if (qemu_host_page_size == 0)
265 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
266 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
268 83fb7adf bellard
    qemu_host_page_bits = 0;
269 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 83fb7adf bellard
        qemu_host_page_bits++;
271 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272 50a9569b balrog
273 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274 50a9569b balrog
    {
275 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
276 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
277 f01576f1 Juergen Lock
        int i, cnt;
278 f01576f1 Juergen Lock
279 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
280 f01576f1 Juergen Lock
        if (freep) {
281 f01576f1 Juergen Lock
            mmap_lock();
282 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
283 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
286 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
287 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
288 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289 f01576f1 Juergen Lock
290 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
291 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
292 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 f01576f1 Juergen Lock
                    } else {
294 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 f01576f1 Juergen Lock
                        endaddr = ~0ul;
296 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297 f01576f1 Juergen Lock
#endif
298 f01576f1 Juergen Lock
                    }
299 f01576f1 Juergen Lock
                }
300 f01576f1 Juergen Lock
            }
301 f01576f1 Juergen Lock
            free(freep);
302 f01576f1 Juergen Lock
            mmap_unlock();
303 f01576f1 Juergen Lock
        }
304 f01576f1 Juergen Lock
#else
305 50a9569b balrog
        FILE *f;
306 50a9569b balrog
307 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
308 5cd2c5b6 Richard Henderson
309 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
310 50a9569b balrog
        if (f) {
311 5cd2c5b6 Richard Henderson
            mmap_lock();
312 5cd2c5b6 Richard Henderson
313 50a9569b balrog
            do {
314 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
315 5cd2c5b6 Richard Henderson
                int n;
316 5cd2c5b6 Richard Henderson
317 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
320 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321 5cd2c5b6 Richard Henderson
322 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
323 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
324 5cd2c5b6 Richard Henderson
                    } else {
325 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
326 5cd2c5b6 Richard Henderson
                    }
327 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328 50a9569b balrog
                }
329 50a9569b balrog
            } while (!feof(f));
330 5cd2c5b6 Richard Henderson
331 50a9569b balrog
            fclose(f);
332 5cd2c5b6 Richard Henderson
            mmap_unlock();
333 50a9569b balrog
        }
334 f01576f1 Juergen Lock
#endif
335 50a9569b balrog
    }
336 50a9569b balrog
#endif
337 54936004 bellard
}
338 54936004 bellard
339 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340 54936004 bellard
{
341 41c1b1c9 Paul Brook
    PageDesc *pd;
342 41c1b1c9 Paul Brook
    void **lp;
343 41c1b1c9 Paul Brook
    int i;
344 41c1b1c9 Paul Brook
345 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
346 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
348 5cd2c5b6 Richard Henderson
    do {                                                \
349 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351 5cd2c5b6 Richard Henderson
    } while (0)
352 5cd2c5b6 Richard Henderson
#else
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
354 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
355 17e2377a pbrook
#endif
356 434929bf aliguori
357 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
358 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359 5cd2c5b6 Richard Henderson
360 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
361 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 5cd2c5b6 Richard Henderson
        void **p = *lp;
363 5cd2c5b6 Richard Henderson
364 5cd2c5b6 Richard Henderson
        if (p == NULL) {
365 5cd2c5b6 Richard Henderson
            if (!alloc) {
366 5cd2c5b6 Richard Henderson
                return NULL;
367 5cd2c5b6 Richard Henderson
            }
368 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
369 5cd2c5b6 Richard Henderson
            *lp = p;
370 17e2377a pbrook
        }
371 5cd2c5b6 Richard Henderson
372 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 5cd2c5b6 Richard Henderson
    }
374 5cd2c5b6 Richard Henderson
375 5cd2c5b6 Richard Henderson
    pd = *lp;
376 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
377 5cd2c5b6 Richard Henderson
        if (!alloc) {
378 5cd2c5b6 Richard Henderson
            return NULL;
379 5cd2c5b6 Richard Henderson
        }
380 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 5cd2c5b6 Richard Henderson
        *lp = pd;
382 54936004 bellard
    }
383 5cd2c5b6 Richard Henderson
384 5cd2c5b6 Richard Henderson
#undef ALLOC
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
387 54936004 bellard
}
388 54936004 bellard
389 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
390 54936004 bellard
{
391 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
392 fd6ce8f6 bellard
}
393 fd6ce8f6 bellard
394 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
395 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396 92e873b9 bellard
{
397 e3f4e2a4 pbrook
    PhysPageDesc *pd;
398 5cd2c5b6 Richard Henderson
    void **lp;
399 5cd2c5b6 Richard Henderson
    int i;
400 92e873b9 bellard
401 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
402 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403 108c49b8 bellard
404 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
405 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 5cd2c5b6 Richard Henderson
        void **p = *lp;
407 5cd2c5b6 Richard Henderson
        if (p == NULL) {
408 5cd2c5b6 Richard Henderson
            if (!alloc) {
409 5cd2c5b6 Richard Henderson
                return NULL;
410 5cd2c5b6 Richard Henderson
            }
411 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 5cd2c5b6 Richard Henderson
        }
413 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414 108c49b8 bellard
    }
415 5cd2c5b6 Richard Henderson
416 e3f4e2a4 pbrook
    pd = *lp;
417 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
418 e3f4e2a4 pbrook
        int i;
419 5cd2c5b6 Richard Henderson
420 5cd2c5b6 Richard Henderson
        if (!alloc) {
421 108c49b8 bellard
            return NULL;
422 5cd2c5b6 Richard Henderson
        }
423 5cd2c5b6 Richard Henderson
424 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425 5cd2c5b6 Richard Henderson
426 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
427 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429 67c4d23c pbrook
        }
430 92e873b9 bellard
    }
431 5cd2c5b6 Richard Henderson
432 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
433 92e873b9 bellard
}
434 92e873b9 bellard
435 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436 92e873b9 bellard
{
437 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
438 92e873b9 bellard
}
439 92e873b9 bellard
440 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
441 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442 3a7d929e bellard
                                    target_ulong vaddr);
443 c8a706fe pbrook
#define mmap_lock() do { } while(0)
444 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
445 9fa3e853 bellard
#endif
446 fd6ce8f6 bellard
447 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448 4369415f bellard
449 4369415f bellard
#if defined(CONFIG_USER_ONLY)
450 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
451 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
452 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
453 4369415f bellard
#endif
454 4369415f bellard
455 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
456 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
458 4369415f bellard
#endif
459 4369415f bellard
460 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
461 26a5f13b bellard
{
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
464 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
466 4369415f bellard
#else
467 26a5f13b bellard
    code_gen_buffer_size = tb_size;
468 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
469 4369415f bellard
#if defined(CONFIG_USER_ONLY)
470 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
471 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
#else
473 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
474 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475 4369415f bellard
#endif
476 26a5f13b bellard
    }
477 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
480 26a5f13b bellard
       the host cpu and OS */
481 26a5f13b bellard
#if defined(__linux__) 
482 26a5f13b bellard
    {
483 26a5f13b bellard
        int flags;
484 141ac468 blueswir1
        void *start = NULL;
485 141ac468 blueswir1
486 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487 26a5f13b bellard
#if defined(__x86_64__)
488 26a5f13b bellard
        flags |= MAP_32BIT;
489 26a5f13b bellard
        /* Cannot map more than that */
490 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
492 141ac468 blueswir1
#elif defined(__sparc_v9__)
493 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
494 141ac468 blueswir1
        flags |= MAP_FIXED;
495 141ac468 blueswir1
        start = (void *) 0x60000000UL;
496 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
498 1cb0661e balrog
#elif defined(__arm__)
499 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
500 1cb0661e balrog
        flags |= MAP_FIXED;
501 1cb0661e balrog
        start = (void *) 0x01000000UL;
502 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
504 eba0b893 Richard Henderson
#elif defined(__s390x__)
505 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
506 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
507 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 eba0b893 Richard Henderson
        }
510 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
511 26a5f13b bellard
#endif
512 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514 26a5f13b bellard
                               flags, -1, 0);
515 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
516 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 26a5f13b bellard
            exit(1);
518 26a5f13b bellard
        }
519 26a5f13b bellard
    }
520 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
522 06e67a82 aliguori
    {
523 06e67a82 aliguori
        int flags;
524 06e67a82 aliguori
        void *addr = NULL;
525 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
526 06e67a82 aliguori
#if defined(__x86_64__)
527 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 06e67a82 aliguori
         * 0x40000000 is free */
529 06e67a82 aliguori
        flags |= MAP_FIXED;
530 06e67a82 aliguori
        addr = (void *)0x40000000;
531 06e67a82 aliguori
        /* Cannot map more than that */
532 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
533 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
534 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
535 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
536 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
537 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
538 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
540 4cd31ad2 Blue Swirl
        }
541 06e67a82 aliguori
#endif
542 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
544 06e67a82 aliguori
                               flags, -1, 0);
545 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
546 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 06e67a82 aliguori
            exit(1);
548 06e67a82 aliguori
        }
549 06e67a82 aliguori
    }
550 26a5f13b bellard
#else
551 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
552 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
553 26a5f13b bellard
#endif
554 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
557 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
558 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560 26a5f13b bellard
}
561 26a5f13b bellard
562 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
564 26a5f13b bellard
   size. */
565 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
566 26a5f13b bellard
{
567 26a5f13b bellard
    cpu_gen_init();
568 26a5f13b bellard
    code_gen_alloc(tb_size);
569 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
570 4369415f bellard
    page_init();
571 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
572 26a5f13b bellard
    io_mem_init();
573 e2eef170 pbrook
#endif
574 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
576 9002ec79 Richard Henderson
       initialize the prologue now.  */
577 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
578 9002ec79 Richard Henderson
#endif
579 26a5f13b bellard
}
580 26a5f13b bellard
581 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582 9656f324 pbrook
583 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
584 e7f4eff7 Juan Quintela
{
585 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
586 9656f324 pbrook
587 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 3098dba0 aurel32
       version_id is increased. */
589 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
590 9656f324 pbrook
    tlb_flush(env, 1);
591 9656f324 pbrook
592 9656f324 pbrook
    return 0;
593 9656f324 pbrook
}
594 e7f4eff7 Juan Quintela
595 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
596 e7f4eff7 Juan Quintela
    .name = "cpu_common",
597 e7f4eff7 Juan Quintela
    .version_id = 1,
598 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
599 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
600 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
601 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
602 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
603 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
604 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
605 e7f4eff7 Juan Quintela
    }
606 e7f4eff7 Juan Quintela
};
607 9656f324 pbrook
#endif
608 9656f324 pbrook
609 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
610 950f1472 Glauber Costa
{
611 950f1472 Glauber Costa
    CPUState *env = first_cpu;
612 950f1472 Glauber Costa
613 950f1472 Glauber Costa
    while (env) {
614 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
615 950f1472 Glauber Costa
            break;
616 950f1472 Glauber Costa
        env = env->next_cpu;
617 950f1472 Glauber Costa
    }
618 950f1472 Glauber Costa
619 950f1472 Glauber Costa
    return env;
620 950f1472 Glauber Costa
}
621 950f1472 Glauber Costa
622 6a00d601 bellard
void cpu_exec_init(CPUState *env)
623 fd6ce8f6 bellard
{
624 6a00d601 bellard
    CPUState **penv;
625 6a00d601 bellard
    int cpu_index;
626 6a00d601 bellard
627 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
628 c2764719 pbrook
    cpu_list_lock();
629 c2764719 pbrook
#endif
630 6a00d601 bellard
    env->next_cpu = NULL;
631 6a00d601 bellard
    penv = &first_cpu;
632 6a00d601 bellard
    cpu_index = 0;
633 6a00d601 bellard
    while (*penv != NULL) {
634 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
635 6a00d601 bellard
        cpu_index++;
636 6a00d601 bellard
    }
637 6a00d601 bellard
    env->cpu_index = cpu_index;
638 268a362c aliguori
    env->numa_node = 0;
639 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
640 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
641 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
642 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
643 dc7a09cf Jan Kiszka
#endif
644 6a00d601 bellard
    *penv = env;
645 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
646 c2764719 pbrook
    cpu_list_unlock();
647 c2764719 pbrook
#endif
648 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
651 b3c7724c pbrook
                    cpu_save, cpu_load, env);
652 b3c7724c pbrook
#endif
653 fd6ce8f6 bellard
}
654 fd6ce8f6 bellard
655 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
656 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
657 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
658 d1a1eb74 Tristan Gingold
{
659 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
660 d1a1eb74 Tristan Gingold
661 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
662 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 d1a1eb74 Tristan Gingold
        return NULL;
664 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
665 d1a1eb74 Tristan Gingold
    tb->pc = pc;
666 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
667 d1a1eb74 Tristan Gingold
    return tb;
668 d1a1eb74 Tristan Gingold
}
669 d1a1eb74 Tristan Gingold
670 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
671 d1a1eb74 Tristan Gingold
{
672 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
673 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
674 d1a1eb74 Tristan Gingold
       be the last one generated.  */
675 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
677 d1a1eb74 Tristan Gingold
        nb_tbs--;
678 d1a1eb74 Tristan Gingold
    }
679 d1a1eb74 Tristan Gingold
}
680 d1a1eb74 Tristan Gingold
681 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
682 9fa3e853 bellard
{
683 9fa3e853 bellard
    if (p->code_bitmap) {
684 59817ccb bellard
        qemu_free(p->code_bitmap);
685 9fa3e853 bellard
        p->code_bitmap = NULL;
686 9fa3e853 bellard
    }
687 9fa3e853 bellard
    p->code_write_count = 0;
688 9fa3e853 bellard
}
689 9fa3e853 bellard
690 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691 5cd2c5b6 Richard Henderson
692 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
693 fd6ce8f6 bellard
{
694 5cd2c5b6 Richard Henderson
    int i;
695 fd6ce8f6 bellard
696 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
697 5cd2c5b6 Richard Henderson
        return;
698 5cd2c5b6 Richard Henderson
    }
699 5cd2c5b6 Richard Henderson
    if (level == 0) {
700 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
701 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
702 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
703 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
704 fd6ce8f6 bellard
        }
705 5cd2c5b6 Richard Henderson
    } else {
706 5cd2c5b6 Richard Henderson
        void **pp = *lp;
707 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
708 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
709 5cd2c5b6 Richard Henderson
        }
710 5cd2c5b6 Richard Henderson
    }
711 5cd2c5b6 Richard Henderson
}
712 5cd2c5b6 Richard Henderson
713 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
714 5cd2c5b6 Richard Henderson
{
715 5cd2c5b6 Richard Henderson
    int i;
716 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
717 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
718 fd6ce8f6 bellard
    }
719 fd6ce8f6 bellard
}
720 fd6ce8f6 bellard
721 fd6ce8f6 bellard
/* flush all the translation blocks */
722 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
723 6a00d601 bellard
void tb_flush(CPUState *env1)
724 fd6ce8f6 bellard
{
725 6a00d601 bellard
    CPUState *env;
726 0124311e bellard
#if defined(DEBUG_FLUSH)
727 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
729 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
730 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
731 fd6ce8f6 bellard
#endif
732 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
733 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
734 a208e54a pbrook
735 fd6ce8f6 bellard
    nb_tbs = 0;
736 3b46e624 ths
737 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 6a00d601 bellard
    }
740 9fa3e853 bellard
741 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
742 fd6ce8f6 bellard
    page_flush_tb();
743 9fa3e853 bellard
744 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
745 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
746 d4e8164f bellard
       expensive */
747 e3db7226 bellard
    tb_flush_count++;
748 fd6ce8f6 bellard
}
749 fd6ce8f6 bellard
750 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
751 fd6ce8f6 bellard
752 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
753 fd6ce8f6 bellard
{
754 fd6ce8f6 bellard
    TranslationBlock *tb;
755 fd6ce8f6 bellard
    int i;
756 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
757 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
759 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
761 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
762 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
763 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
764 fd6ce8f6 bellard
            }
765 fd6ce8f6 bellard
        }
766 fd6ce8f6 bellard
    }
767 fd6ce8f6 bellard
}
768 fd6ce8f6 bellard
769 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
770 fd6ce8f6 bellard
static void tb_page_check(void)
771 fd6ce8f6 bellard
{
772 fd6ce8f6 bellard
    TranslationBlock *tb;
773 fd6ce8f6 bellard
    int i, flags1, flags2;
774 3b46e624 ths
775 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
777 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
778 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
779 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
782 fd6ce8f6 bellard
            }
783 fd6ce8f6 bellard
        }
784 fd6ce8f6 bellard
    }
785 fd6ce8f6 bellard
}
786 fd6ce8f6 bellard
787 fd6ce8f6 bellard
#endif
788 fd6ce8f6 bellard
789 fd6ce8f6 bellard
/* invalidate one TB */
790 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 fd6ce8f6 bellard
                             int next_offset)
792 fd6ce8f6 bellard
{
793 fd6ce8f6 bellard
    TranslationBlock *tb1;
794 fd6ce8f6 bellard
    for(;;) {
795 fd6ce8f6 bellard
        tb1 = *ptb;
796 fd6ce8f6 bellard
        if (tb1 == tb) {
797 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 fd6ce8f6 bellard
            break;
799 fd6ce8f6 bellard
        }
800 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 fd6ce8f6 bellard
    }
802 fd6ce8f6 bellard
}
803 fd6ce8f6 bellard
804 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805 9fa3e853 bellard
{
806 9fa3e853 bellard
    TranslationBlock *tb1;
807 9fa3e853 bellard
    unsigned int n1;
808 9fa3e853 bellard
809 9fa3e853 bellard
    for(;;) {
810 9fa3e853 bellard
        tb1 = *ptb;
811 9fa3e853 bellard
        n1 = (long)tb1 & 3;
812 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 9fa3e853 bellard
        if (tb1 == tb) {
814 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
815 9fa3e853 bellard
            break;
816 9fa3e853 bellard
        }
817 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
818 9fa3e853 bellard
    }
819 9fa3e853 bellard
}
820 9fa3e853 bellard
821 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822 d4e8164f bellard
{
823 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
824 d4e8164f bellard
    unsigned int n1;
825 d4e8164f bellard
826 d4e8164f bellard
    ptb = &tb->jmp_next[n];
827 d4e8164f bellard
    tb1 = *ptb;
828 d4e8164f bellard
    if (tb1) {
829 d4e8164f bellard
        /* find tb(n) in circular list */
830 d4e8164f bellard
        for(;;) {
831 d4e8164f bellard
            tb1 = *ptb;
832 d4e8164f bellard
            n1 = (long)tb1 & 3;
833 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 d4e8164f bellard
            if (n1 == n && tb1 == tb)
835 d4e8164f bellard
                break;
836 d4e8164f bellard
            if (n1 == 2) {
837 d4e8164f bellard
                ptb = &tb1->jmp_first;
838 d4e8164f bellard
            } else {
839 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
840 d4e8164f bellard
            }
841 d4e8164f bellard
        }
842 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
843 d4e8164f bellard
        *ptb = tb->jmp_next[n];
844 d4e8164f bellard
845 d4e8164f bellard
        tb->jmp_next[n] = NULL;
846 d4e8164f bellard
    }
847 d4e8164f bellard
}
848 d4e8164f bellard
849 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
850 d4e8164f bellard
   another TB */
851 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
852 d4e8164f bellard
{
853 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854 d4e8164f bellard
}
855 d4e8164f bellard
856 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
857 fd6ce8f6 bellard
{
858 6a00d601 bellard
    CPUState *env;
859 8a40a180 bellard
    PageDesc *p;
860 d4e8164f bellard
    unsigned int h, n1;
861 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
862 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
863 3b46e624 ths
864 8a40a180 bellard
    /* remove the TB from the hash list */
865 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
867 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
868 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
869 8a40a180 bellard
870 8a40a180 bellard
    /* remove the TB from the page list */
871 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
872 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
874 8a40a180 bellard
        invalidate_page_bitmap(p);
875 8a40a180 bellard
    }
876 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
879 8a40a180 bellard
        invalidate_page_bitmap(p);
880 8a40a180 bellard
    }
881 8a40a180 bellard
882 36bdbe54 bellard
    tb_invalidated_flag = 1;
883 59817ccb bellard
884 fd6ce8f6 bellard
    /* remove the TB from the hash list */
885 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
886 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
888 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
889 6a00d601 bellard
    }
890 d4e8164f bellard
891 d4e8164f bellard
    /* suppress this TB from the two jump lists */
892 d4e8164f bellard
    tb_jmp_remove(tb, 0);
893 d4e8164f bellard
    tb_jmp_remove(tb, 1);
894 d4e8164f bellard
895 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
896 d4e8164f bellard
    tb1 = tb->jmp_first;
897 d4e8164f bellard
    for(;;) {
898 d4e8164f bellard
        n1 = (long)tb1 & 3;
899 d4e8164f bellard
        if (n1 == 2)
900 d4e8164f bellard
            break;
901 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
903 d4e8164f bellard
        tb_reset_jump(tb1, n1);
904 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
905 d4e8164f bellard
        tb1 = tb2;
906 d4e8164f bellard
    }
907 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
908 9fa3e853 bellard
909 e3db7226 bellard
    tb_phys_invalidate_count++;
910 9fa3e853 bellard
}
911 9fa3e853 bellard
912 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
913 9fa3e853 bellard
{
914 9fa3e853 bellard
    int end, mask, end1;
915 9fa3e853 bellard
916 9fa3e853 bellard
    end = start + len;
917 9fa3e853 bellard
    tab += start >> 3;
918 9fa3e853 bellard
    mask = 0xff << (start & 7);
919 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
920 9fa3e853 bellard
        if (start < end) {
921 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
922 9fa3e853 bellard
            *tab |= mask;
923 9fa3e853 bellard
        }
924 9fa3e853 bellard
    } else {
925 9fa3e853 bellard
        *tab++ |= mask;
926 9fa3e853 bellard
        start = (start + 8) & ~7;
927 9fa3e853 bellard
        end1 = end & ~7;
928 9fa3e853 bellard
        while (start < end1) {
929 9fa3e853 bellard
            *tab++ = 0xff;
930 9fa3e853 bellard
            start += 8;
931 9fa3e853 bellard
        }
932 9fa3e853 bellard
        if (start < end) {
933 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
934 9fa3e853 bellard
            *tab |= mask;
935 9fa3e853 bellard
        }
936 9fa3e853 bellard
    }
937 9fa3e853 bellard
}
938 9fa3e853 bellard
939 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
940 9fa3e853 bellard
{
941 9fa3e853 bellard
    int n, tb_start, tb_end;
942 9fa3e853 bellard
    TranslationBlock *tb;
943 3b46e624 ths
944 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
945 9fa3e853 bellard
946 9fa3e853 bellard
    tb = p->first_tb;
947 9fa3e853 bellard
    while (tb != NULL) {
948 9fa3e853 bellard
        n = (long)tb & 3;
949 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
950 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
951 9fa3e853 bellard
        if (n == 0) {
952 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
953 9fa3e853 bellard
               it is not a problem */
954 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 9fa3e853 bellard
            tb_end = tb_start + tb->size;
956 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
957 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
958 9fa3e853 bellard
        } else {
959 9fa3e853 bellard
            tb_start = 0;
960 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 9fa3e853 bellard
        }
962 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 9fa3e853 bellard
        tb = tb->page_next[n];
964 9fa3e853 bellard
    }
965 9fa3e853 bellard
}
966 9fa3e853 bellard
967 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
968 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
969 2e70f6ef pbrook
                              int flags, int cflags)
970 d720b93d bellard
{
971 d720b93d bellard
    TranslationBlock *tb;
972 d720b93d bellard
    uint8_t *tc_ptr;
973 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
974 41c1b1c9 Paul Brook
    target_ulong virt_page2;
975 d720b93d bellard
    int code_gen_size;
976 d720b93d bellard
977 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
978 c27004ec bellard
    tb = tb_alloc(pc);
979 d720b93d bellard
    if (!tb) {
980 d720b93d bellard
        /* flush must be done */
981 d720b93d bellard
        tb_flush(env);
982 d720b93d bellard
        /* cannot fail at this point */
983 c27004ec bellard
        tb = tb_alloc(pc);
984 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
985 2e70f6ef pbrook
        tb_invalidated_flag = 1;
986 d720b93d bellard
    }
987 d720b93d bellard
    tc_ptr = code_gen_ptr;
988 d720b93d bellard
    tb->tc_ptr = tc_ptr;
989 d720b93d bellard
    tb->cs_base = cs_base;
990 d720b93d bellard
    tb->flags = flags;
991 d720b93d bellard
    tb->cflags = cflags;
992 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
993 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
994 3b46e624 ths
995 d720b93d bellard
    /* check next page if needed */
996 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
997 d720b93d bellard
    phys_page2 = -1;
998 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
999 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1000 d720b93d bellard
    }
1001 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1002 2e70f6ef pbrook
    return tb;
1003 d720b93d bellard
}
1004 3b46e624 ths
1005 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1006 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1007 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1008 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1009 d720b93d bellard
   TB if code is modified inside this TB. */
1010 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1011 d720b93d bellard
                                   int is_cpu_write_access)
1012 d720b93d bellard
{
1013 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1014 d720b93d bellard
    CPUState *env = cpu_single_env;
1015 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1016 6b917547 aliguori
    PageDesc *p;
1017 6b917547 aliguori
    int n;
1018 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1019 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1020 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1021 6b917547 aliguori
    int current_tb_modified = 0;
1022 6b917547 aliguori
    target_ulong current_pc = 0;
1023 6b917547 aliguori
    target_ulong current_cs_base = 0;
1024 6b917547 aliguori
    int current_flags = 0;
1025 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1026 9fa3e853 bellard
1027 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1028 5fafdf24 ths
    if (!p)
1029 9fa3e853 bellard
        return;
1030 5fafdf24 ths
    if (!p->code_bitmap &&
1031 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 d720b93d bellard
        is_cpu_write_access) {
1033 9fa3e853 bellard
        /* build code bitmap */
1034 9fa3e853 bellard
        build_page_bitmap(p);
1035 9fa3e853 bellard
    }
1036 9fa3e853 bellard
1037 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1038 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 9fa3e853 bellard
    tb = p->first_tb;
1040 9fa3e853 bellard
    while (tb != NULL) {
1041 9fa3e853 bellard
        n = (long)tb & 3;
1042 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1043 9fa3e853 bellard
        tb_next = tb->page_next[n];
1044 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1045 9fa3e853 bellard
        if (n == 0) {
1046 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1047 9fa3e853 bellard
               it is not a problem */
1048 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1050 9fa3e853 bellard
        } else {
1051 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1052 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 9fa3e853 bellard
        }
1054 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1055 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1056 d720b93d bellard
            if (current_tb_not_found) {
1057 d720b93d bellard
                current_tb_not_found = 0;
1058 d720b93d bellard
                current_tb = NULL;
1059 2e70f6ef pbrook
                if (env->mem_io_pc) {
1060 d720b93d bellard
                    /* now we have a real cpu fault */
1061 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1062 d720b93d bellard
                }
1063 d720b93d bellard
            }
1064 d720b93d bellard
            if (current_tb == tb &&
1065 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1066 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1067 d720b93d bellard
                its execution. We could be more precise by checking
1068 d720b93d bellard
                that the modification is after the current PC, but it
1069 d720b93d bellard
                would require a specialized function to partially
1070 d720b93d bellard
                restore the CPU state */
1071 3b46e624 ths
1072 d720b93d bellard
                current_tb_modified = 1;
1073 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1074 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1075 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1076 6b917547 aliguori
                                     &current_flags);
1077 d720b93d bellard
            }
1078 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1079 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1080 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1081 6f5a9f7e bellard
            saved_tb = NULL;
1082 6f5a9f7e bellard
            if (env) {
1083 6f5a9f7e bellard
                saved_tb = env->current_tb;
1084 6f5a9f7e bellard
                env->current_tb = NULL;
1085 6f5a9f7e bellard
            }
1086 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1087 6f5a9f7e bellard
            if (env) {
1088 6f5a9f7e bellard
                env->current_tb = saved_tb;
1089 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1090 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1091 6f5a9f7e bellard
            }
1092 9fa3e853 bellard
        }
1093 9fa3e853 bellard
        tb = tb_next;
1094 9fa3e853 bellard
    }
1095 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1096 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1097 9fa3e853 bellard
    if (!p->first_tb) {
1098 9fa3e853 bellard
        invalidate_page_bitmap(p);
1099 d720b93d bellard
        if (is_cpu_write_access) {
1100 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1101 d720b93d bellard
        }
1102 d720b93d bellard
    }
1103 d720b93d bellard
#endif
1104 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1105 d720b93d bellard
    if (current_tb_modified) {
1106 d720b93d bellard
        /* we generate a block containing just the instruction
1107 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1108 d720b93d bellard
           itself */
1109 ea1c1802 bellard
        env->current_tb = NULL;
1110 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1111 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1112 9fa3e853 bellard
    }
1113 fd6ce8f6 bellard
#endif
1114 9fa3e853 bellard
}
1115 fd6ce8f6 bellard
1116 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1117 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1118 9fa3e853 bellard
{
1119 9fa3e853 bellard
    PageDesc *p;
1120 9fa3e853 bellard
    int offset, b;
1121 59817ccb bellard
#if 0
1122 a4193c8a bellard
    if (1) {
1123 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1125 93fcfe39 aliguori
                  cpu_single_env->eip,
1126 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1127 59817ccb bellard
    }
1128 59817ccb bellard
#endif
1129 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1130 5fafdf24 ths
    if (!p)
1131 9fa3e853 bellard
        return;
1132 9fa3e853 bellard
    if (p->code_bitmap) {
1133 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1134 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1135 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1136 9fa3e853 bellard
            goto do_invalidate;
1137 9fa3e853 bellard
    } else {
1138 9fa3e853 bellard
    do_invalidate:
1139 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1140 9fa3e853 bellard
    }
1141 9fa3e853 bellard
}
1142 9fa3e853 bellard
1143 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1144 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1145 d720b93d bellard
                                    unsigned long pc, void *puc)
1146 9fa3e853 bellard
{
1147 6b917547 aliguori
    TranslationBlock *tb;
1148 9fa3e853 bellard
    PageDesc *p;
1149 6b917547 aliguori
    int n;
1150 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1151 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1152 d720b93d bellard
    CPUState *env = cpu_single_env;
1153 6b917547 aliguori
    int current_tb_modified = 0;
1154 6b917547 aliguori
    target_ulong current_pc = 0;
1155 6b917547 aliguori
    target_ulong current_cs_base = 0;
1156 6b917547 aliguori
    int current_flags = 0;
1157 d720b93d bellard
#endif
1158 9fa3e853 bellard
1159 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1160 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1161 5fafdf24 ths
    if (!p)
1162 9fa3e853 bellard
        return;
1163 9fa3e853 bellard
    tb = p->first_tb;
1164 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1165 d720b93d bellard
    if (tb && pc != 0) {
1166 d720b93d bellard
        current_tb = tb_find_pc(pc);
1167 d720b93d bellard
    }
1168 d720b93d bellard
#endif
1169 9fa3e853 bellard
    while (tb != NULL) {
1170 9fa3e853 bellard
        n = (long)tb & 3;
1171 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1172 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1173 d720b93d bellard
        if (current_tb == tb &&
1174 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1175 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1176 d720b93d bellard
                   its execution. We could be more precise by checking
1177 d720b93d bellard
                   that the modification is after the current PC, but it
1178 d720b93d bellard
                   would require a specialized function to partially
1179 d720b93d bellard
                   restore the CPU state */
1180 3b46e624 ths
1181 d720b93d bellard
            current_tb_modified = 1;
1182 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1183 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1184 6b917547 aliguori
                                 &current_flags);
1185 d720b93d bellard
        }
1186 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1187 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1188 9fa3e853 bellard
        tb = tb->page_next[n];
1189 9fa3e853 bellard
    }
1190 fd6ce8f6 bellard
    p->first_tb = NULL;
1191 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1192 d720b93d bellard
    if (current_tb_modified) {
1193 d720b93d bellard
        /* we generate a block containing just the instruction
1194 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1195 d720b93d bellard
           itself */
1196 ea1c1802 bellard
        env->current_tb = NULL;
1197 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1198 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1199 d720b93d bellard
    }
1200 d720b93d bellard
#endif
1201 fd6ce8f6 bellard
}
1202 9fa3e853 bellard
#endif
1203 fd6ce8f6 bellard
1204 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1205 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1206 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1207 fd6ce8f6 bellard
{
1208 fd6ce8f6 bellard
    PageDesc *p;
1209 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1210 9fa3e853 bellard
1211 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1212 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1213 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1214 9fa3e853 bellard
    last_first_tb = p->first_tb;
1215 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1216 9fa3e853 bellard
    invalidate_page_bitmap(p);
1217 fd6ce8f6 bellard
1218 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1219 d720b93d bellard
1220 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1221 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1222 53a5960a pbrook
        target_ulong addr;
1223 53a5960a pbrook
        PageDesc *p2;
1224 9fa3e853 bellard
        int prot;
1225 9fa3e853 bellard
1226 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1227 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1228 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1229 fd6ce8f6 bellard
        prot = 0;
1230 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1231 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1232 53a5960a pbrook
1233 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1234 53a5960a pbrook
            if (!p2)
1235 53a5960a pbrook
                continue;
1236 53a5960a pbrook
            prot |= p2->flags;
1237 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1238 53a5960a pbrook
          }
1239 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1240 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1241 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1242 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1243 53a5960a pbrook
               page_addr);
1244 fd6ce8f6 bellard
#endif
1245 fd6ce8f6 bellard
    }
1246 9fa3e853 bellard
#else
1247 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1248 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1249 9fa3e853 bellard
       allocated in a physical page */
1250 9fa3e853 bellard
    if (!last_first_tb) {
1251 6a00d601 bellard
        tlb_protect_code(page_addr);
1252 9fa3e853 bellard
    }
1253 9fa3e853 bellard
#endif
1254 d720b93d bellard
1255 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1256 fd6ce8f6 bellard
}
1257 fd6ce8f6 bellard
1258 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1259 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1260 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1261 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1262 d4e8164f bellard
{
1263 9fa3e853 bellard
    unsigned int h;
1264 9fa3e853 bellard
    TranslationBlock **ptb;
1265 9fa3e853 bellard
1266 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1267 c8a706fe pbrook
       before we are done.  */
1268 c8a706fe pbrook
    mmap_lock();
1269 9fa3e853 bellard
    /* add in the physical hash table */
1270 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1271 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1272 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1273 9fa3e853 bellard
    *ptb = tb;
1274 fd6ce8f6 bellard
1275 fd6ce8f6 bellard
    /* add in the page list */
1276 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 9fa3e853 bellard
    if (phys_page2 != -1)
1278 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1279 9fa3e853 bellard
    else
1280 9fa3e853 bellard
        tb->page_addr[1] = -1;
1281 9fa3e853 bellard
1282 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1284 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1285 d4e8164f bellard
1286 d4e8164f bellard
    /* init original jump addresses */
1287 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1288 d4e8164f bellard
        tb_reset_jump(tb, 0);
1289 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1290 d4e8164f bellard
        tb_reset_jump(tb, 1);
1291 8a40a180 bellard
1292 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1293 8a40a180 bellard
    tb_page_check();
1294 8a40a180 bellard
#endif
1295 c8a706fe pbrook
    mmap_unlock();
1296 fd6ce8f6 bellard
}
1297 fd6ce8f6 bellard
1298 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1300 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1301 fd6ce8f6 bellard
{
1302 9fa3e853 bellard
    int m_min, m_max, m;
1303 9fa3e853 bellard
    unsigned long v;
1304 9fa3e853 bellard
    TranslationBlock *tb;
1305 a513fe19 bellard
1306 a513fe19 bellard
    if (nb_tbs <= 0)
1307 a513fe19 bellard
        return NULL;
1308 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1310 a513fe19 bellard
        return NULL;
1311 a513fe19 bellard
    /* binary search (cf Knuth) */
1312 a513fe19 bellard
    m_min = 0;
1313 a513fe19 bellard
    m_max = nb_tbs - 1;
1314 a513fe19 bellard
    while (m_min <= m_max) {
1315 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1316 a513fe19 bellard
        tb = &tbs[m];
1317 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1318 a513fe19 bellard
        if (v == tc_ptr)
1319 a513fe19 bellard
            return tb;
1320 a513fe19 bellard
        else if (tc_ptr < v) {
1321 a513fe19 bellard
            m_max = m - 1;
1322 a513fe19 bellard
        } else {
1323 a513fe19 bellard
            m_min = m + 1;
1324 a513fe19 bellard
        }
1325 5fafdf24 ths
    }
1326 a513fe19 bellard
    return &tbs[m_max];
1327 a513fe19 bellard
}
1328 7501267e bellard
1329 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1330 ea041c0e bellard
1331 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332 ea041c0e bellard
{
1333 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1334 ea041c0e bellard
    unsigned int n1;
1335 ea041c0e bellard
1336 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1337 ea041c0e bellard
    if (tb1 != NULL) {
1338 ea041c0e bellard
        /* find head of list */
1339 ea041c0e bellard
        for(;;) {
1340 ea041c0e bellard
            n1 = (long)tb1 & 3;
1341 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 ea041c0e bellard
            if (n1 == 2)
1343 ea041c0e bellard
                break;
1344 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1345 ea041c0e bellard
        }
1346 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1347 ea041c0e bellard
        tb_next = tb1;
1348 ea041c0e bellard
1349 ea041c0e bellard
        /* remove tb from the jmp_first list */
1350 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1351 ea041c0e bellard
        for(;;) {
1352 ea041c0e bellard
            tb1 = *ptb;
1353 ea041c0e bellard
            n1 = (long)tb1 & 3;
1354 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1356 ea041c0e bellard
                break;
1357 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1358 ea041c0e bellard
        }
1359 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1360 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1361 3b46e624 ths
1362 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1363 ea041c0e bellard
        tb_reset_jump(tb, n);
1364 ea041c0e bellard
1365 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1366 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1367 ea041c0e bellard
    }
1368 ea041c0e bellard
}
1369 ea041c0e bellard
1370 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1371 ea041c0e bellard
{
1372 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1373 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1374 ea041c0e bellard
}
1375 ea041c0e bellard
1376 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1377 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1378 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379 94df27fd Paul Brook
{
1380 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381 94df27fd Paul Brook
}
1382 94df27fd Paul Brook
#else
1383 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384 d720b93d bellard
{
1385 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1386 9b3c35e0 j_mayer
    target_ulong pd;
1387 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1388 c2f07f81 pbrook
    PhysPageDesc *p;
1389 d720b93d bellard
1390 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1391 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 c2f07f81 pbrook
    if (!p) {
1393 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1394 c2f07f81 pbrook
    } else {
1395 c2f07f81 pbrook
        pd = p->phys_offset;
1396 c2f07f81 pbrook
    }
1397 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1398 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1399 d720b93d bellard
}
1400 c27004ec bellard
#endif
1401 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1402 d720b93d bellard
1403 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1404 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405 c527ee8f Paul Brook
1406 c527ee8f Paul Brook
{
1407 c527ee8f Paul Brook
}
1408 c527ee8f Paul Brook
1409 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1411 c527ee8f Paul Brook
{
1412 c527ee8f Paul Brook
    return -ENOSYS;
1413 c527ee8f Paul Brook
}
1414 c527ee8f Paul Brook
#else
1415 6658ffb8 pbrook
/* Add a watchpoint.  */
1416 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1418 6658ffb8 pbrook
{
1419 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1420 c0ce998e aliguori
    CPUWatchpoint *wp;
1421 6658ffb8 pbrook
1422 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 b4051334 aliguori
        return -EINVAL;
1427 b4051334 aliguori
    }
1428 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1429 a1d1bb31 aliguori
1430 a1d1bb31 aliguori
    wp->vaddr = addr;
1431 b4051334 aliguori
    wp->len_mask = len_mask;
1432 a1d1bb31 aliguori
    wp->flags = flags;
1433 a1d1bb31 aliguori
1434 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1435 c0ce998e aliguori
    if (flags & BP_GDB)
1436 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1437 c0ce998e aliguori
    else
1438 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1439 6658ffb8 pbrook
1440 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1441 a1d1bb31 aliguori
1442 a1d1bb31 aliguori
    if (watchpoint)
1443 a1d1bb31 aliguori
        *watchpoint = wp;
1444 a1d1bb31 aliguori
    return 0;
1445 6658ffb8 pbrook
}
1446 6658ffb8 pbrook
1447 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1448 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 a1d1bb31 aliguori
                          int flags)
1450 6658ffb8 pbrook
{
1451 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1452 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1453 6658ffb8 pbrook
1454 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1455 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1456 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1457 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1458 6658ffb8 pbrook
            return 0;
1459 6658ffb8 pbrook
        }
1460 6658ffb8 pbrook
    }
1461 a1d1bb31 aliguori
    return -ENOENT;
1462 6658ffb8 pbrook
}
1463 6658ffb8 pbrook
1464 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1465 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466 a1d1bb31 aliguori
{
1467 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1468 7d03f82f edgar_igl
1469 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1470 a1d1bb31 aliguori
1471 a1d1bb31 aliguori
    qemu_free(watchpoint);
1472 a1d1bb31 aliguori
}
1473 a1d1bb31 aliguori
1474 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1475 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476 a1d1bb31 aliguori
{
1477 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1478 a1d1bb31 aliguori
1479 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1480 a1d1bb31 aliguori
        if (wp->flags & mask)
1481 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1482 c0ce998e aliguori
    }
1483 7d03f82f edgar_igl
}
1484 c527ee8f Paul Brook
#endif
1485 7d03f82f edgar_igl
1486 a1d1bb31 aliguori
/* Add a breakpoint.  */
1487 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1489 4c3a88a2 bellard
{
1490 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1491 c0ce998e aliguori
    CPUBreakpoint *bp;
1492 3b46e624 ths
1493 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1494 4c3a88a2 bellard
1495 a1d1bb31 aliguori
    bp->pc = pc;
1496 a1d1bb31 aliguori
    bp->flags = flags;
1497 a1d1bb31 aliguori
1498 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1499 c0ce998e aliguori
    if (flags & BP_GDB)
1500 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1501 c0ce998e aliguori
    else
1502 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1503 3b46e624 ths
1504 d720b93d bellard
    breakpoint_invalidate(env, pc);
1505 a1d1bb31 aliguori
1506 a1d1bb31 aliguori
    if (breakpoint)
1507 a1d1bb31 aliguori
        *breakpoint = bp;
1508 4c3a88a2 bellard
    return 0;
1509 4c3a88a2 bellard
#else
1510 a1d1bb31 aliguori
    return -ENOSYS;
1511 4c3a88a2 bellard
#endif
1512 4c3a88a2 bellard
}
1513 4c3a88a2 bellard
1514 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1515 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516 a1d1bb31 aliguori
{
1517 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1518 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1519 a1d1bb31 aliguori
1520 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1521 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1522 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1523 a1d1bb31 aliguori
            return 0;
1524 a1d1bb31 aliguori
        }
1525 7d03f82f edgar_igl
    }
1526 a1d1bb31 aliguori
    return -ENOENT;
1527 a1d1bb31 aliguori
#else
1528 a1d1bb31 aliguori
    return -ENOSYS;
1529 7d03f82f edgar_igl
#endif
1530 7d03f82f edgar_igl
}
1531 7d03f82f edgar_igl
1532 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1533 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1534 4c3a88a2 bellard
{
1535 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1536 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1537 d720b93d bellard
1538 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1539 a1d1bb31 aliguori
1540 a1d1bb31 aliguori
    qemu_free(breakpoint);
1541 a1d1bb31 aliguori
#endif
1542 a1d1bb31 aliguori
}
1543 a1d1bb31 aliguori
1544 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1545 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546 a1d1bb31 aliguori
{
1547 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1548 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1549 a1d1bb31 aliguori
1550 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1551 a1d1bb31 aliguori
        if (bp->flags & mask)
1552 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1553 c0ce998e aliguori
    }
1554 4c3a88a2 bellard
#endif
1555 4c3a88a2 bellard
}
1556 4c3a88a2 bellard
1557 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 c33a346e bellard
   CPU loop after each instruction */
1559 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1560 c33a346e bellard
{
1561 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1562 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1563 c33a346e bellard
        env->singlestep_enabled = enabled;
1564 e22a25c9 aliguori
        if (kvm_enabled())
1565 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1566 e22a25c9 aliguori
        else {
1567 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1568 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1569 e22a25c9 aliguori
            tb_flush(env);
1570 e22a25c9 aliguori
        }
1571 c33a346e bellard
    }
1572 c33a346e bellard
#endif
1573 c33a346e bellard
}
1574 c33a346e bellard
1575 34865134 bellard
/* enable or disable low levels log */
1576 34865134 bellard
void cpu_set_log(int log_flags)
1577 34865134 bellard
{
1578 34865134 bellard
    loglevel = log_flags;
1579 34865134 bellard
    if (loglevel && !logfile) {
1580 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1581 34865134 bellard
        if (!logfile) {
1582 34865134 bellard
            perror(logfilename);
1583 34865134 bellard
            _exit(1);
1584 34865134 bellard
        }
1585 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1586 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 9fa3e853 bellard
        {
1588 b55266b5 blueswir1
            static char logfile_buf[4096];
1589 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 9fa3e853 bellard
        }
1591 bf65f53f Filip Navara
#elif !defined(_WIN32)
1592 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1593 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1594 9fa3e853 bellard
#endif
1595 e735b91c pbrook
        log_append = 1;
1596 e735b91c pbrook
    }
1597 e735b91c pbrook
    if (!loglevel && logfile) {
1598 e735b91c pbrook
        fclose(logfile);
1599 e735b91c pbrook
        logfile = NULL;
1600 34865134 bellard
    }
1601 34865134 bellard
}
1602 34865134 bellard
1603 34865134 bellard
void cpu_set_log_filename(const char *filename)
1604 34865134 bellard
{
1605 34865134 bellard
    logfilename = strdup(filename);
1606 e735b91c pbrook
    if (logfile) {
1607 e735b91c pbrook
        fclose(logfile);
1608 e735b91c pbrook
        logfile = NULL;
1609 e735b91c pbrook
    }
1610 e735b91c pbrook
    cpu_set_log(loglevel);
1611 34865134 bellard
}
1612 c33a346e bellard
1613 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1614 ea041c0e bellard
{
1615 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1616 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1617 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1618 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1619 ea041c0e bellard
    TranslationBlock *tb;
1620 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1621 59817ccb bellard
1622 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1623 3098dba0 aurel32
    tb = env->current_tb;
1624 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1625 3098dba0 aurel32
       all the potentially executing TB */
1626 f76cfe56 Riku Voipio
    if (tb) {
1627 3098dba0 aurel32
        env->current_tb = NULL;
1628 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1629 be214e6c aurel32
    }
1630 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1631 3098dba0 aurel32
}
1632 3098dba0 aurel32
1633 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1634 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1635 3098dba0 aurel32
{
1636 3098dba0 aurel32
    int old_mask;
1637 be214e6c aurel32
1638 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1639 68a79315 bellard
    env->interrupt_request |= mask;
1640 3098dba0 aurel32
1641 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1642 8edac960 aliguori
    /*
1643 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1644 8edac960 aliguori
     * case its halted.
1645 8edac960 aliguori
     */
1646 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1647 8edac960 aliguori
        qemu_cpu_kick(env);
1648 8edac960 aliguori
        return;
1649 8edac960 aliguori
    }
1650 8edac960 aliguori
#endif
1651 8edac960 aliguori
1652 2e70f6ef pbrook
    if (use_icount) {
1653 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1654 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1655 2e70f6ef pbrook
        if (!can_do_io(env)
1656 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1657 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1658 2e70f6ef pbrook
        }
1659 2e70f6ef pbrook
#endif
1660 2e70f6ef pbrook
    } else {
1661 3098dba0 aurel32
        cpu_unlink_tb(env);
1662 ea041c0e bellard
    }
1663 ea041c0e bellard
}
1664 ea041c0e bellard
1665 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1666 b54ad049 bellard
{
1667 b54ad049 bellard
    env->interrupt_request &= ~mask;
1668 b54ad049 bellard
}
1669 b54ad049 bellard
1670 3098dba0 aurel32
void cpu_exit(CPUState *env)
1671 3098dba0 aurel32
{
1672 3098dba0 aurel32
    env->exit_request = 1;
1673 3098dba0 aurel32
    cpu_unlink_tb(env);
1674 3098dba0 aurel32
}
1675 3098dba0 aurel32
1676 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1677 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1678 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1679 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1680 f193c797 bellard
      "show target assembly code for each compiled TB" },
1681 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1682 57fec1fe bellard
      "show micro ops for each compiled TB" },
1683 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1684 e01a1157 blueswir1
      "show micro ops "
1685 e01a1157 blueswir1
#ifdef TARGET_I386
1686 e01a1157 blueswir1
      "before eflags optimization and "
1687 f193c797 bellard
#endif
1688 e01a1157 blueswir1
      "after liveness analysis" },
1689 f193c797 bellard
    { CPU_LOG_INT, "int",
1690 f193c797 bellard
      "show interrupts/exceptions in short format" },
1691 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1692 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1693 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1694 e91c8a77 ths
      "show CPU state before block translation" },
1695 f193c797 bellard
#ifdef TARGET_I386
1696 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1697 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1698 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1699 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1700 f193c797 bellard
#endif
1701 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1702 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1703 fd872598 bellard
      "show all i/o ports accesses" },
1704 8e3a9fd2 bellard
#endif
1705 f193c797 bellard
    { 0, NULL, NULL },
1706 f193c797 bellard
};
1707 f193c797 bellard
1708 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1709 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1711 f6f3fbca Michael S. Tsirkin
1712 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1713 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1714 9742bf26 Yoshiaki Tamura
                                  ram_addr_t phys_offset)
1715 f6f3fbca Michael S. Tsirkin
{
1716 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1717 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1718 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1719 f6f3fbca Michael S. Tsirkin
    }
1720 f6f3fbca Michael S. Tsirkin
}
1721 f6f3fbca Michael S. Tsirkin
1722 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1723 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1724 f6f3fbca Michael S. Tsirkin
{
1725 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1726 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1727 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1728 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1729 f6f3fbca Michael S. Tsirkin
            return r;
1730 f6f3fbca Michael S. Tsirkin
    }
1731 f6f3fbca Michael S. Tsirkin
    return 0;
1732 f6f3fbca Michael S. Tsirkin
}
1733 f6f3fbca Michael S. Tsirkin
1734 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1735 f6f3fbca Michael S. Tsirkin
{
1736 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1737 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1738 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1739 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1740 f6f3fbca Michael S. Tsirkin
            return r;
1741 f6f3fbca Michael S. Tsirkin
    }
1742 f6f3fbca Michael S. Tsirkin
    return 0;
1743 f6f3fbca Michael S. Tsirkin
}
1744 f6f3fbca Michael S. Tsirkin
1745 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1747 f6f3fbca Michael S. Tsirkin
{
1748 5cd2c5b6 Richard Henderson
    int i;
1749 f6f3fbca Michael S. Tsirkin
1750 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1751 5cd2c5b6 Richard Henderson
        return;
1752 5cd2c5b6 Richard Henderson
    }
1753 5cd2c5b6 Richard Henderson
    if (level == 0) {
1754 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1755 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1756 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1758 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1759 f6f3fbca Michael S. Tsirkin
            }
1760 5cd2c5b6 Richard Henderson
        }
1761 5cd2c5b6 Richard Henderson
    } else {
1762 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1763 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1764 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1765 f6f3fbca Michael S. Tsirkin
        }
1766 f6f3fbca Michael S. Tsirkin
    }
1767 f6f3fbca Michael S. Tsirkin
}
1768 f6f3fbca Michael S. Tsirkin
1769 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1770 f6f3fbca Michael S. Tsirkin
{
1771 5cd2c5b6 Richard Henderson
    int i;
1772 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1773 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1775 f6f3fbca Michael S. Tsirkin
    }
1776 f6f3fbca Michael S. Tsirkin
}
1777 f6f3fbca Michael S. Tsirkin
1778 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779 f6f3fbca Michael S. Tsirkin
{
1780 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1782 f6f3fbca Michael S. Tsirkin
}
1783 f6f3fbca Michael S. Tsirkin
1784 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785 f6f3fbca Michael S. Tsirkin
{
1786 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1787 f6f3fbca Michael S. Tsirkin
}
1788 f6f3fbca Michael S. Tsirkin
#endif
1789 f6f3fbca Michael S. Tsirkin
1790 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1791 f193c797 bellard
{
1792 f193c797 bellard
    if (strlen(s2) != n)
1793 f193c797 bellard
        return 0;
1794 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1795 f193c797 bellard
}
1796 3b46e624 ths
1797 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1798 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1799 f193c797 bellard
{
1800 c7cd6a37 blueswir1
    const CPULogItem *item;
1801 f193c797 bellard
    int mask;
1802 f193c797 bellard
    const char *p, *p1;
1803 f193c797 bellard
1804 f193c797 bellard
    p = str;
1805 f193c797 bellard
    mask = 0;
1806 f193c797 bellard
    for(;;) {
1807 f193c797 bellard
        p1 = strchr(p, ',');
1808 f193c797 bellard
        if (!p1)
1809 f193c797 bellard
            p1 = p + strlen(p);
1810 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1811 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1812 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1813 9742bf26 Yoshiaki Tamura
            }
1814 9742bf26 Yoshiaki Tamura
        } else {
1815 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1816 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1817 9742bf26 Yoshiaki Tamura
                    goto found;
1818 9742bf26 Yoshiaki Tamura
            }
1819 9742bf26 Yoshiaki Tamura
            return 0;
1820 f193c797 bellard
        }
1821 f193c797 bellard
    found:
1822 f193c797 bellard
        mask |= item->mask;
1823 f193c797 bellard
        if (*p1 != ',')
1824 f193c797 bellard
            break;
1825 f193c797 bellard
        p = p1 + 1;
1826 f193c797 bellard
    }
1827 f193c797 bellard
    return mask;
1828 f193c797 bellard
}
1829 ea041c0e bellard
1830 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1831 7501267e bellard
{
1832 7501267e bellard
    va_list ap;
1833 493ae1f0 pbrook
    va_list ap2;
1834 7501267e bellard
1835 7501267e bellard
    va_start(ap, fmt);
1836 493ae1f0 pbrook
    va_copy(ap2, ap);
1837 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1838 7501267e bellard
    vfprintf(stderr, fmt, ap);
1839 7501267e bellard
    fprintf(stderr, "\n");
1840 7501267e bellard
#ifdef TARGET_I386
1841 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842 7fe48483 bellard
#else
1843 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1844 7501267e bellard
#endif
1845 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1846 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1847 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1848 93fcfe39 aliguori
        qemu_log("\n");
1849 f9373291 j_mayer
#ifdef TARGET_I386
1850 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1851 f9373291 j_mayer
#else
1852 93fcfe39 aliguori
        log_cpu_state(env, 0);
1853 f9373291 j_mayer
#endif
1854 31b1a7b4 aliguori
        qemu_log_flush();
1855 93fcfe39 aliguori
        qemu_log_close();
1856 924edcae balrog
    }
1857 493ae1f0 pbrook
    va_end(ap2);
1858 f9373291 j_mayer
    va_end(ap);
1859 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1860 fd052bf6 Riku Voipio
    {
1861 fd052bf6 Riku Voipio
        struct sigaction act;
1862 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1863 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1864 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1865 fd052bf6 Riku Voipio
    }
1866 fd052bf6 Riku Voipio
#endif
1867 7501267e bellard
    abort();
1868 7501267e bellard
}
1869 7501267e bellard
1870 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1871 c5be9f08 ths
{
1872 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1873 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1874 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1875 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1876 5a38f081 aliguori
    CPUBreakpoint *bp;
1877 5a38f081 aliguori
    CPUWatchpoint *wp;
1878 5a38f081 aliguori
#endif
1879 5a38f081 aliguori
1880 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1881 5a38f081 aliguori
1882 5a38f081 aliguori
    /* Preserve chaining and index. */
1883 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1884 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1885 5a38f081 aliguori
1886 5a38f081 aliguori
    /* Clone all break/watchpoints.
1887 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1888 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1889 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1890 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1891 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1892 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1893 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 5a38f081 aliguori
    }
1895 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1896 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 5a38f081 aliguori
                              wp->flags, NULL);
1898 5a38f081 aliguori
    }
1899 5a38f081 aliguori
#endif
1900 5a38f081 aliguori
1901 c5be9f08 ths
    return new_env;
1902 c5be9f08 ths
}
1903 c5be9f08 ths
1904 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1905 0124311e bellard
1906 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907 5c751e99 edgar_igl
{
1908 5c751e99 edgar_igl
    unsigned int i;
1909 5c751e99 edgar_igl
1910 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1911 5c751e99 edgar_igl
       overlap the flushed page.  */
1912 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1914 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915 5c751e99 edgar_igl
1916 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1917 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1918 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1919 5c751e99 edgar_igl
}
1920 5c751e99 edgar_igl
1921 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1922 08738984 Igor Kovalenko
    .addr_read  = -1,
1923 08738984 Igor Kovalenko
    .addr_write = -1,
1924 08738984 Igor Kovalenko
    .addr_code  = -1,
1925 08738984 Igor Kovalenko
    .addend     = -1,
1926 08738984 Igor Kovalenko
};
1927 08738984 Igor Kovalenko
1928 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1929 ee8b7021 bellard
   implemented yet) */
1930 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1931 33417e70 bellard
{
1932 33417e70 bellard
    int i;
1933 0124311e bellard
1934 9fa3e853 bellard
#if defined(DEBUG_TLB)
1935 9fa3e853 bellard
    printf("tlb_flush:\n");
1936 9fa3e853 bellard
#endif
1937 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1938 0124311e bellard
       links while we are modifying them */
1939 0124311e bellard
    env->current_tb = NULL;
1940 0124311e bellard
1941 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1942 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1943 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1944 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1945 cfde4bd9 Isaku Yamahata
        }
1946 33417e70 bellard
    }
1947 9fa3e853 bellard
1948 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1949 9fa3e853 bellard
1950 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1951 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1952 e3db7226 bellard
    tlb_flush_count++;
1953 33417e70 bellard
}
1954 33417e70 bellard
1955 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1956 61382a50 bellard
{
1957 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1958 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1959 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1960 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1961 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1962 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1963 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1964 84b7b8e7 bellard
    }
1965 61382a50 bellard
}
1966 61382a50 bellard
1967 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1968 33417e70 bellard
{
1969 8a40a180 bellard
    int i;
1970 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1971 0124311e bellard
1972 9fa3e853 bellard
#if defined(DEBUG_TLB)
1973 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1974 9fa3e853 bellard
#endif
1975 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1976 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1978 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1979 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1981 d4c430a8 Paul Brook
#endif
1982 d4c430a8 Paul Brook
        tlb_flush(env, 1);
1983 d4c430a8 Paul Brook
        return;
1984 d4c430a8 Paul Brook
    }
1985 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1986 0124311e bellard
       links while we are modifying them */
1987 0124311e bellard
    env->current_tb = NULL;
1988 61382a50 bellard
1989 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1990 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1991 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1993 0124311e bellard
1994 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1995 9fa3e853 bellard
}
1996 9fa3e853 bellard
1997 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1998 9fa3e853 bellard
   can be detected */
1999 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2000 9fa3e853 bellard
{
2001 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2002 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2003 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2004 9fa3e853 bellard
}
2005 9fa3e853 bellard
2006 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2007 3a7d929e bellard
   tested for self modifying code */
2008 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2009 3a7d929e bellard
                                    target_ulong vaddr)
2010 9fa3e853 bellard
{
2011 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2012 1ccde1cb bellard
}
2013 1ccde1cb bellard
2014 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2015 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2016 1ccde1cb bellard
{
2017 1ccde1cb bellard
    unsigned long addr;
2018 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2020 1ccde1cb bellard
        if ((addr - start) < length) {
2021 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2022 1ccde1cb bellard
        }
2023 1ccde1cb bellard
    }
2024 1ccde1cb bellard
}
2025 1ccde1cb bellard
2026 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2027 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2028 0a962c02 bellard
                                     int dirty_flags)
2029 1ccde1cb bellard
{
2030 1ccde1cb bellard
    CPUState *env;
2031 4f2ac237 bellard
    unsigned long length, start1;
2032 f7c11b53 Yoshiaki Tamura
    int i;
2033 1ccde1cb bellard
2034 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2035 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2036 1ccde1cb bellard
2037 1ccde1cb bellard
    length = end - start;
2038 1ccde1cb bellard
    if (length == 0)
2039 1ccde1cb bellard
        return;
2040 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2041 f23db169 bellard
2042 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2043 1ccde1cb bellard
       when accessing the range */
2044 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2045 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2046 5579c7f3 pbrook
       address comparisons below.  */
2047 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2048 5579c7f3 pbrook
            != (end - 1) - start) {
2049 5579c7f3 pbrook
        abort();
2050 5579c7f3 pbrook
    }
2051 5579c7f3 pbrook
2052 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2053 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2054 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2056 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 cfde4bd9 Isaku Yamahata
                                      start1, length);
2058 cfde4bd9 Isaku Yamahata
        }
2059 6a00d601 bellard
    }
2060 1ccde1cb bellard
}
2061 1ccde1cb bellard
2062 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2063 74576198 aliguori
{
2064 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2065 74576198 aliguori
    in_migration = enable;
2066 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2067 f6f3fbca Michael S. Tsirkin
    return ret;
2068 74576198 aliguori
}
2069 74576198 aliguori
2070 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2071 74576198 aliguori
{
2072 74576198 aliguori
    return in_migration;
2073 74576198 aliguori
}
2074 74576198 aliguori
2075 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2077 2bec46dc aliguori
{
2078 7b8f3b78 Michael S. Tsirkin
    int ret;
2079 151f7749 Jan Kiszka
2080 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2081 151f7749 Jan Kiszka
    return ret;
2082 2bec46dc aliguori
}
2083 2bec46dc aliguori
2084 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2085 e5896b12 Anthony PERARD
                           ram_addr_t size)
2086 e5896b12 Anthony PERARD
{
2087 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2088 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2089 e5896b12 Anthony PERARD
        if (client->log_start) {
2090 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2091 e5896b12 Anthony PERARD
            if (r < 0) {
2092 e5896b12 Anthony PERARD
                return r;
2093 e5896b12 Anthony PERARD
            }
2094 e5896b12 Anthony PERARD
        }
2095 e5896b12 Anthony PERARD
    }
2096 e5896b12 Anthony PERARD
    return 0;
2097 e5896b12 Anthony PERARD
}
2098 e5896b12 Anthony PERARD
2099 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2100 e5896b12 Anthony PERARD
                          ram_addr_t size)
2101 e5896b12 Anthony PERARD
{
2102 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2103 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2104 e5896b12 Anthony PERARD
        if (client->log_stop) {
2105 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2106 e5896b12 Anthony PERARD
            if (r < 0) {
2107 e5896b12 Anthony PERARD
                return r;
2108 e5896b12 Anthony PERARD
            }
2109 e5896b12 Anthony PERARD
        }
2110 e5896b12 Anthony PERARD
    }
2111 e5896b12 Anthony PERARD
    return 0;
2112 e5896b12 Anthony PERARD
}
2113 e5896b12 Anthony PERARD
2114 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2115 3a7d929e bellard
{
2116 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2117 5579c7f3 pbrook
    void *p;
2118 3a7d929e bellard
2119 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2120 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2121 5579c7f3 pbrook
            + tlb_entry->addend);
2122 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2123 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2124 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2125 3a7d929e bellard
        }
2126 3a7d929e bellard
    }
2127 3a7d929e bellard
}
2128 3a7d929e bellard
2129 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2130 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2131 3a7d929e bellard
{
2132 3a7d929e bellard
    int i;
2133 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2134 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2135 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2136 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2137 cfde4bd9 Isaku Yamahata
    }
2138 3a7d929e bellard
}
2139 3a7d929e bellard
2140 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2141 1ccde1cb bellard
{
2142 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2143 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2144 1ccde1cb bellard
}
2145 1ccde1cb bellard
2146 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2147 0f459d16 pbrook
   so that it is no longer dirty */
2148 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2149 1ccde1cb bellard
{
2150 1ccde1cb bellard
    int i;
2151 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2152 1ccde1cb bellard
2153 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2154 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2155 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2156 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2157 9fa3e853 bellard
}
2158 9fa3e853 bellard
2159 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2160 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2161 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2162 d4c430a8 Paul Brook
                               target_ulong size)
2163 d4c430a8 Paul Brook
{
2164 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2165 d4c430a8 Paul Brook
2166 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2167 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2168 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2169 d4c430a8 Paul Brook
        return;
2170 d4c430a8 Paul Brook
    }
2171 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2172 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2173 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2174 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2175 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2176 d4c430a8 Paul Brook
        mask <<= 1;
2177 d4c430a8 Paul Brook
    }
2178 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2179 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2180 d4c430a8 Paul Brook
}
2181 d4c430a8 Paul Brook
2182 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2183 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2184 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2185 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2186 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2187 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2188 9fa3e853 bellard
{
2189 92e873b9 bellard
    PhysPageDesc *p;
2190 4f2ac237 bellard
    unsigned long pd;
2191 9fa3e853 bellard
    unsigned int index;
2192 4f2ac237 bellard
    target_ulong address;
2193 0f459d16 pbrook
    target_ulong code_address;
2194 355b1943 Paul Brook
    unsigned long addend;
2195 84b7b8e7 bellard
    CPUTLBEntry *te;
2196 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2197 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2198 9fa3e853 bellard
2199 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2200 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2201 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2202 d4c430a8 Paul Brook
    }
2203 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2204 9fa3e853 bellard
    if (!p) {
2205 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2206 9fa3e853 bellard
    } else {
2207 9fa3e853 bellard
        pd = p->phys_offset;
2208 9fa3e853 bellard
    }
2209 9fa3e853 bellard
#if defined(DEBUG_TLB)
2210 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2211 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2212 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2213 9fa3e853 bellard
#endif
2214 9fa3e853 bellard
2215 0f459d16 pbrook
    address = vaddr;
2216 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2217 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2218 0f459d16 pbrook
        address |= TLB_MMIO;
2219 0f459d16 pbrook
    }
2220 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2221 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2222 0f459d16 pbrook
        /* Normal RAM.  */
2223 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2224 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2225 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2226 0f459d16 pbrook
        else
2227 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2228 0f459d16 pbrook
    } else {
2229 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2230 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2231 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2232 0f459d16 pbrook
           and avoid full address decoding in every device.
2233 0f459d16 pbrook
           We can't use the high bits of pd for this because
2234 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2235 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2236 8da3ff18 pbrook
        if (p) {
2237 8da3ff18 pbrook
            iotlb += p->region_offset;
2238 8da3ff18 pbrook
        } else {
2239 8da3ff18 pbrook
            iotlb += paddr;
2240 8da3ff18 pbrook
        }
2241 0f459d16 pbrook
    }
2242 0f459d16 pbrook
2243 0f459d16 pbrook
    code_address = address;
2244 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2245 0f459d16 pbrook
       watchpoint trap routines.  */
2246 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2247 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2248 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2249 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2250 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2251 bf298f83 Jun Koi
                address |= TLB_MMIO;
2252 bf298f83 Jun Koi
                break;
2253 bf298f83 Jun Koi
            }
2254 6658ffb8 pbrook
        }
2255 0f459d16 pbrook
    }
2256 d79acba4 balrog
2257 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2258 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2259 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2260 0f459d16 pbrook
    te->addend = addend - vaddr;
2261 0f459d16 pbrook
    if (prot & PAGE_READ) {
2262 0f459d16 pbrook
        te->addr_read = address;
2263 0f459d16 pbrook
    } else {
2264 0f459d16 pbrook
        te->addr_read = -1;
2265 0f459d16 pbrook
    }
2266 5c751e99 edgar_igl
2267 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2268 0f459d16 pbrook
        te->addr_code = code_address;
2269 0f459d16 pbrook
    } else {
2270 0f459d16 pbrook
        te->addr_code = -1;
2271 0f459d16 pbrook
    }
2272 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2273 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2274 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2275 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2276 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2277 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2278 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2279 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2280 9fa3e853 bellard
        } else {
2281 0f459d16 pbrook
            te->addr_write = address;
2282 9fa3e853 bellard
        }
2283 0f459d16 pbrook
    } else {
2284 0f459d16 pbrook
        te->addr_write = -1;
2285 9fa3e853 bellard
    }
2286 9fa3e853 bellard
}
2287 9fa3e853 bellard
2288 0124311e bellard
#else
2289 0124311e bellard
2290 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2291 0124311e bellard
{
2292 0124311e bellard
}
2293 0124311e bellard
2294 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2295 0124311e bellard
{
2296 0124311e bellard
}
2297 0124311e bellard
2298 edf8e2af Mika Westerberg
/*
2299 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2300 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2301 edf8e2af Mika Westerberg
 */
2302 5cd2c5b6 Richard Henderson
2303 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2304 5cd2c5b6 Richard Henderson
{
2305 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2306 5cd2c5b6 Richard Henderson
    void *priv;
2307 5cd2c5b6 Richard Henderson
    unsigned long start;
2308 5cd2c5b6 Richard Henderson
    int prot;
2309 5cd2c5b6 Richard Henderson
};
2310 5cd2c5b6 Richard Henderson
2311 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2312 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2313 5cd2c5b6 Richard Henderson
{
2314 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2315 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2316 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2317 5cd2c5b6 Richard Henderson
            return rc;
2318 5cd2c5b6 Richard Henderson
        }
2319 5cd2c5b6 Richard Henderson
    }
2320 5cd2c5b6 Richard Henderson
2321 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2322 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2323 5cd2c5b6 Richard Henderson
2324 5cd2c5b6 Richard Henderson
    return 0;
2325 5cd2c5b6 Richard Henderson
}
2326 5cd2c5b6 Richard Henderson
2327 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2328 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2329 5cd2c5b6 Richard Henderson
{
2330 b480d9b7 Paul Brook
    abi_ulong pa;
2331 5cd2c5b6 Richard Henderson
    int i, rc;
2332 5cd2c5b6 Richard Henderson
2333 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2334 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2335 5cd2c5b6 Richard Henderson
    }
2336 5cd2c5b6 Richard Henderson
2337 5cd2c5b6 Richard Henderson
    if (level == 0) {
2338 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2339 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2340 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2341 5cd2c5b6 Richard Henderson
2342 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2343 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2344 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2345 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2346 5cd2c5b6 Richard Henderson
                    return rc;
2347 9fa3e853 bellard
                }
2348 9fa3e853 bellard
            }
2349 5cd2c5b6 Richard Henderson
        }
2350 5cd2c5b6 Richard Henderson
    } else {
2351 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2352 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2353 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2354 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2355 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2356 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2357 5cd2c5b6 Richard Henderson
                return rc;
2358 5cd2c5b6 Richard Henderson
            }
2359 5cd2c5b6 Richard Henderson
        }
2360 5cd2c5b6 Richard Henderson
    }
2361 5cd2c5b6 Richard Henderson
2362 5cd2c5b6 Richard Henderson
    return 0;
2363 5cd2c5b6 Richard Henderson
}
2364 5cd2c5b6 Richard Henderson
2365 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2366 5cd2c5b6 Richard Henderson
{
2367 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2368 5cd2c5b6 Richard Henderson
    unsigned long i;
2369 5cd2c5b6 Richard Henderson
2370 5cd2c5b6 Richard Henderson
    data.fn = fn;
2371 5cd2c5b6 Richard Henderson
    data.priv = priv;
2372 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2373 5cd2c5b6 Richard Henderson
    data.prot = 0;
2374 5cd2c5b6 Richard Henderson
2375 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2376 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2377 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2378 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2379 5cd2c5b6 Richard Henderson
            return rc;
2380 9fa3e853 bellard
        }
2381 33417e70 bellard
    }
2382 5cd2c5b6 Richard Henderson
2383 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2384 edf8e2af Mika Westerberg
}
2385 edf8e2af Mika Westerberg
2386 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2387 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2388 edf8e2af Mika Westerberg
{
2389 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2390 edf8e2af Mika Westerberg
2391 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2392 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2393 edf8e2af Mika Westerberg
        start, end, end - start,
2394 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2395 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2396 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2397 edf8e2af Mika Westerberg
2398 edf8e2af Mika Westerberg
    return (0);
2399 edf8e2af Mika Westerberg
}
2400 edf8e2af Mika Westerberg
2401 edf8e2af Mika Westerberg
/* dump memory mappings */
2402 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2403 edf8e2af Mika Westerberg
{
2404 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2405 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2406 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2407 33417e70 bellard
}
2408 33417e70 bellard
2409 53a5960a pbrook
int page_get_flags(target_ulong address)
2410 33417e70 bellard
{
2411 9fa3e853 bellard
    PageDesc *p;
2412 9fa3e853 bellard
2413 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2414 33417e70 bellard
    if (!p)
2415 9fa3e853 bellard
        return 0;
2416 9fa3e853 bellard
    return p->flags;
2417 9fa3e853 bellard
}
2418 9fa3e853 bellard
2419 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2420 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2421 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2422 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2423 9fa3e853 bellard
{
2424 376a7909 Richard Henderson
    target_ulong addr, len;
2425 376a7909 Richard Henderson
2426 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2427 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2428 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2429 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2430 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2431 376a7909 Richard Henderson
#endif
2432 376a7909 Richard Henderson
    assert(start < end);
2433 9fa3e853 bellard
2434 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2435 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2436 376a7909 Richard Henderson
2437 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2438 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2439 376a7909 Richard Henderson
    }
2440 376a7909 Richard Henderson
2441 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2442 376a7909 Richard Henderson
         len != 0;
2443 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2444 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2445 376a7909 Richard Henderson
2446 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2447 376a7909 Richard Henderson
           the code inside.  */
2448 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2449 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2450 9fa3e853 bellard
            p->first_tb) {
2451 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2452 9fa3e853 bellard
        }
2453 9fa3e853 bellard
        p->flags = flags;
2454 9fa3e853 bellard
    }
2455 33417e70 bellard
}
2456 33417e70 bellard
2457 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2458 3d97b40b ths
{
2459 3d97b40b ths
    PageDesc *p;
2460 3d97b40b ths
    target_ulong end;
2461 3d97b40b ths
    target_ulong addr;
2462 3d97b40b ths
2463 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2464 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2465 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2466 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2467 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2468 376a7909 Richard Henderson
#endif
2469 376a7909 Richard Henderson
2470 3e0650a9 Richard Henderson
    if (len == 0) {
2471 3e0650a9 Richard Henderson
        return 0;
2472 3e0650a9 Richard Henderson
    }
2473 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2474 376a7909 Richard Henderson
        /* We've wrapped around.  */
2475 55f280c9 balrog
        return -1;
2476 376a7909 Richard Henderson
    }
2477 55f280c9 balrog
2478 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2479 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2480 3d97b40b ths
2481 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2482 376a7909 Richard Henderson
         len != 0;
2483 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2484 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2485 3d97b40b ths
        if( !p )
2486 3d97b40b ths
            return -1;
2487 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2488 3d97b40b ths
            return -1;
2489 3d97b40b ths
2490 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2491 3d97b40b ths
            return -1;
2492 dae3270c bellard
        if (flags & PAGE_WRITE) {
2493 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2494 dae3270c bellard
                return -1;
2495 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2496 dae3270c bellard
               contains translated code */
2497 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2498 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2499 dae3270c bellard
                    return -1;
2500 dae3270c bellard
            }
2501 dae3270c bellard
            return 0;
2502 dae3270c bellard
        }
2503 3d97b40b ths
    }
2504 3d97b40b ths
    return 0;
2505 3d97b40b ths
}
2506 3d97b40b ths
2507 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2508 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2509 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2510 9fa3e853 bellard
{
2511 45d679d6 Aurelien Jarno
    unsigned int prot;
2512 45d679d6 Aurelien Jarno
    PageDesc *p;
2513 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2514 9fa3e853 bellard
2515 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2516 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2517 c8a706fe pbrook
       practice it seems to be ok.  */
2518 c8a706fe pbrook
    mmap_lock();
2519 c8a706fe pbrook
2520 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2521 45d679d6 Aurelien Jarno
    if (!p) {
2522 c8a706fe pbrook
        mmap_unlock();
2523 9fa3e853 bellard
        return 0;
2524 c8a706fe pbrook
    }
2525 45d679d6 Aurelien Jarno
2526 9fa3e853 bellard
    /* if the page was really writable, then we change its
2527 9fa3e853 bellard
       protection back to writable */
2528 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2529 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2530 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2531 45d679d6 Aurelien Jarno
2532 45d679d6 Aurelien Jarno
        prot = 0;
2533 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2534 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2535 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2536 45d679d6 Aurelien Jarno
            prot |= p->flags;
2537 45d679d6 Aurelien Jarno
2538 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2539 9fa3e853 bellard
               the corresponding translated code. */
2540 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2541 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2542 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2543 9fa3e853 bellard
#endif
2544 9fa3e853 bellard
        }
2545 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2546 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2547 45d679d6 Aurelien Jarno
2548 45d679d6 Aurelien Jarno
        mmap_unlock();
2549 45d679d6 Aurelien Jarno
        return 1;
2550 9fa3e853 bellard
    }
2551 c8a706fe pbrook
    mmap_unlock();
2552 9fa3e853 bellard
    return 0;
2553 9fa3e853 bellard
}
2554 9fa3e853 bellard
2555 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2556 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2557 1ccde1cb bellard
{
2558 1ccde1cb bellard
}
2559 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2560 9fa3e853 bellard
2561 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2562 8da3ff18 pbrook
2563 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2564 c04b2b78 Paul Brook
typedef struct subpage_t {
2565 c04b2b78 Paul Brook
    target_phys_addr_t base;
2566 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2567 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2568 c04b2b78 Paul Brook
} subpage_t;
2569 c04b2b78 Paul Brook
2570 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2571 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2572 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2573 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2574 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2575 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2576 db7b5426 blueswir1
                      need_subpage)                                     \
2577 db7b5426 blueswir1
    do {                                                                \
2578 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2579 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2580 db7b5426 blueswir1
        else {                                                          \
2581 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2582 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2583 db7b5426 blueswir1
                need_subpage = 1;                                       \
2584 db7b5426 blueswir1
        }                                                               \
2585 db7b5426 blueswir1
                                                                        \
2586 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2587 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2588 db7b5426 blueswir1
        else {                                                          \
2589 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2590 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2591 db7b5426 blueswir1
                need_subpage = 1;                                       \
2592 db7b5426 blueswir1
        }                                                               \
2593 db7b5426 blueswir1
    } while (0)
2594 db7b5426 blueswir1
2595 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2596 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2597 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2598 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2599 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2600 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2601 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2602 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2603 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2604 c227f099 Anthony Liguori
                                         ram_addr_t size,
2605 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2606 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2607 33417e70 bellard
{
2608 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2609 92e873b9 bellard
    PhysPageDesc *p;
2610 9d42037b bellard
    CPUState *env;
2611 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2612 f6405247 Richard Henderson
    subpage_t *subpage;
2613 33417e70 bellard
2614 3b8e6a2d Edgar E. Iglesias
    assert(size);
2615 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2616 f6f3fbca Michael S. Tsirkin
2617 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2618 67c4d23c pbrook
        region_offset = start_addr;
2619 67c4d23c pbrook
    }
2620 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2621 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2622 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2623 3b8e6a2d Edgar E. Iglesias
2624 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2625 3b8e6a2d Edgar E. Iglesias
    do {
2626 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2627 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2628 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2629 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2630 db7b5426 blueswir1
            int need_subpage = 0;
2631 db7b5426 blueswir1
2632 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2633 db7b5426 blueswir1
                          need_subpage);
2634 f6405247 Richard Henderson
            if (need_subpage) {
2635 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2636 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2637 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2638 8da3ff18 pbrook
                                           p->region_offset);
2639 db7b5426 blueswir1
                } else {
2640 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2641 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2642 db7b5426 blueswir1
                }
2643 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2644 8da3ff18 pbrook
                                 region_offset);
2645 8da3ff18 pbrook
                p->region_offset = 0;
2646 db7b5426 blueswir1
            } else {
2647 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2648 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2649 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2650 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2651 db7b5426 blueswir1
            }
2652 db7b5426 blueswir1
        } else {
2653 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2654 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2655 8da3ff18 pbrook
            p->region_offset = region_offset;
2656 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2657 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2658 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2659 0e8f0967 pbrook
            } else {
2660 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2661 db7b5426 blueswir1
                int need_subpage = 0;
2662 db7b5426 blueswir1
2663 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2664 db7b5426 blueswir1
                              end_addr2, need_subpage);
2665 db7b5426 blueswir1
2666 f6405247 Richard Henderson
                if (need_subpage) {
2667 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2668 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2669 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2670 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2671 8da3ff18 pbrook
                                     phys_offset, region_offset);
2672 8da3ff18 pbrook
                    p->region_offset = 0;
2673 db7b5426 blueswir1
                }
2674 db7b5426 blueswir1
            }
2675 db7b5426 blueswir1
        }
2676 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2677 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2678 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2679 3b46e624 ths
2680 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2681 9d42037b bellard
       reset the modified entries */
2682 9d42037b bellard
    /* XXX: slow ! */
2683 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2684 9d42037b bellard
        tlb_flush(env, 1);
2685 9d42037b bellard
    }
2686 33417e70 bellard
}
2687 33417e70 bellard
2688 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2689 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2690 ba863458 bellard
{
2691 ba863458 bellard
    PhysPageDesc *p;
2692 ba863458 bellard
2693 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2694 ba863458 bellard
    if (!p)
2695 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2696 ba863458 bellard
    return p->phys_offset;
2697 ba863458 bellard
}
2698 ba863458 bellard
2699 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2700 f65ed4c1 aliguori
{
2701 f65ed4c1 aliguori
    if (kvm_enabled())
2702 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2703 f65ed4c1 aliguori
}
2704 f65ed4c1 aliguori
2705 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2706 f65ed4c1 aliguori
{
2707 f65ed4c1 aliguori
    if (kvm_enabled())
2708 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2709 f65ed4c1 aliguori
}
2710 f65ed4c1 aliguori
2711 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2712 62a2744c Sheng Yang
{
2713 62a2744c Sheng Yang
    if (kvm_enabled())
2714 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2715 62a2744c Sheng Yang
}
2716 62a2744c Sheng Yang
2717 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2718 c902760f Marcelo Tosatti
2719 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2720 c902760f Marcelo Tosatti
2721 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2722 c902760f Marcelo Tosatti
2723 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2724 c902760f Marcelo Tosatti
{
2725 c902760f Marcelo Tosatti
    struct statfs fs;
2726 c902760f Marcelo Tosatti
    int ret;
2727 c902760f Marcelo Tosatti
2728 c902760f Marcelo Tosatti
    do {
2729 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2730 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2731 c902760f Marcelo Tosatti
2732 c902760f Marcelo Tosatti
    if (ret != 0) {
2733 9742bf26 Yoshiaki Tamura
        perror(path);
2734 9742bf26 Yoshiaki Tamura
        return 0;
2735 c902760f Marcelo Tosatti
    }
2736 c902760f Marcelo Tosatti
2737 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2738 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2739 c902760f Marcelo Tosatti
2740 c902760f Marcelo Tosatti
    return fs.f_bsize;
2741 c902760f Marcelo Tosatti
}
2742 c902760f Marcelo Tosatti
2743 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2744 04b16653 Alex Williamson
                            ram_addr_t memory,
2745 04b16653 Alex Williamson
                            const char *path)
2746 c902760f Marcelo Tosatti
{
2747 c902760f Marcelo Tosatti
    char *filename;
2748 c902760f Marcelo Tosatti
    void *area;
2749 c902760f Marcelo Tosatti
    int fd;
2750 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2751 c902760f Marcelo Tosatti
    int flags;
2752 c902760f Marcelo Tosatti
#endif
2753 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2754 c902760f Marcelo Tosatti
2755 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2756 c902760f Marcelo Tosatti
    if (!hpagesize) {
2757 9742bf26 Yoshiaki Tamura
        return NULL;
2758 c902760f Marcelo Tosatti
    }
2759 c902760f Marcelo Tosatti
2760 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2761 c902760f Marcelo Tosatti
        return NULL;
2762 c902760f Marcelo Tosatti
    }
2763 c902760f Marcelo Tosatti
2764 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2765 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2766 c902760f Marcelo Tosatti
        return NULL;
2767 c902760f Marcelo Tosatti
    }
2768 c902760f Marcelo Tosatti
2769 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2770 9742bf26 Yoshiaki Tamura
        return NULL;
2771 c902760f Marcelo Tosatti
    }
2772 c902760f Marcelo Tosatti
2773 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2774 c902760f Marcelo Tosatti
    if (fd < 0) {
2775 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2776 9742bf26 Yoshiaki Tamura
        free(filename);
2777 9742bf26 Yoshiaki Tamura
        return NULL;
2778 c902760f Marcelo Tosatti
    }
2779 c902760f Marcelo Tosatti
    unlink(filename);
2780 c902760f Marcelo Tosatti
    free(filename);
2781 c902760f Marcelo Tosatti
2782 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2783 c902760f Marcelo Tosatti
2784 c902760f Marcelo Tosatti
    /*
2785 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2786 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2787 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2788 c902760f Marcelo Tosatti
     * mmap will fail.
2789 c902760f Marcelo Tosatti
     */
2790 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2791 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2792 c902760f Marcelo Tosatti
2793 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2794 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2795 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2796 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2797 c902760f Marcelo Tosatti
     */
2798 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2799 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2800 c902760f Marcelo Tosatti
#else
2801 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2802 c902760f Marcelo Tosatti
#endif
2803 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2804 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2805 9742bf26 Yoshiaki Tamura
        close(fd);
2806 9742bf26 Yoshiaki Tamura
        return (NULL);
2807 c902760f Marcelo Tosatti
    }
2808 04b16653 Alex Williamson
    block->fd = fd;
2809 c902760f Marcelo Tosatti
    return area;
2810 c902760f Marcelo Tosatti
}
2811 c902760f Marcelo Tosatti
#endif
2812 c902760f Marcelo Tosatti
2813 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2814 d17b5288 Alex Williamson
{
2815 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2816 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2817 04b16653 Alex Williamson
2818 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2819 04b16653 Alex Williamson
        return 0;
2820 04b16653 Alex Williamson
2821 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2822 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2823 04b16653 Alex Williamson
2824 04b16653 Alex Williamson
        end = block->offset + block->length;
2825 04b16653 Alex Williamson
2826 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2827 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2828 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2829 04b16653 Alex Williamson
            }
2830 04b16653 Alex Williamson
        }
2831 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2832 04b16653 Alex Williamson
            offset =  end;
2833 04b16653 Alex Williamson
            mingap = next - end;
2834 04b16653 Alex Williamson
        }
2835 04b16653 Alex Williamson
    }
2836 04b16653 Alex Williamson
    return offset;
2837 04b16653 Alex Williamson
}
2838 04b16653 Alex Williamson
2839 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2840 04b16653 Alex Williamson
{
2841 d17b5288 Alex Williamson
    RAMBlock *block;
2842 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2843 d17b5288 Alex Williamson
2844 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2845 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2846 d17b5288 Alex Williamson
2847 d17b5288 Alex Williamson
    return last;
2848 d17b5288 Alex Williamson
}
2849 d17b5288 Alex Williamson
2850 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2851 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2852 84b89d78 Cam Macdonell
{
2853 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2854 84b89d78 Cam Macdonell
2855 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2856 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2857 84b89d78 Cam Macdonell
2858 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2859 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2860 84b89d78 Cam Macdonell
        if (id) {
2861 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2862 84b89d78 Cam Macdonell
            qemu_free(id);
2863 84b89d78 Cam Macdonell
        }
2864 84b89d78 Cam Macdonell
    }
2865 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2866 84b89d78 Cam Macdonell
2867 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2868 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2869 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2870 84b89d78 Cam Macdonell
                    new_block->idstr);
2871 84b89d78 Cam Macdonell
            abort();
2872 84b89d78 Cam Macdonell
        }
2873 84b89d78 Cam Macdonell
    }
2874 84b89d78 Cam Macdonell
2875 6977dfe6 Yoshiaki Tamura
    if (host) {
2876 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2877 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2878 6977dfe6 Yoshiaki Tamura
    } else {
2879 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2880 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2881 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2882 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2883 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2884 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2885 6977dfe6 Yoshiaki Tamura
            }
2886 c902760f Marcelo Tosatti
#else
2887 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2888 6977dfe6 Yoshiaki Tamura
            exit(1);
2889 c902760f Marcelo Tosatti
#endif
2890 6977dfe6 Yoshiaki Tamura
        } else {
2891 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2892 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2893 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2894 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2895 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2896 6b02494d Alexander Graf
#else
2897 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2898 6b02494d Alexander Graf
#endif
2899 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2900 6977dfe6 Yoshiaki Tamura
        }
2901 c902760f Marcelo Tosatti
    }
2902 6977dfe6 Yoshiaki Tamura
2903 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2904 94a6b54f pbrook
    new_block->length = size;
2905 94a6b54f pbrook
2906 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2907 94a6b54f pbrook
2908 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2909 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2910 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2911 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2912 94a6b54f pbrook
2913 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2914 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2915 6f0437e8 Jan Kiszka
2916 94a6b54f pbrook
    return new_block->offset;
2917 94a6b54f pbrook
}
2918 e9a1ab19 bellard
2919 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2920 6977dfe6 Yoshiaki Tamura
{
2921 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2922 6977dfe6 Yoshiaki Tamura
}
2923 6977dfe6 Yoshiaki Tamura
2924 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2925 e9a1ab19 bellard
{
2926 04b16653 Alex Williamson
    RAMBlock *block;
2927 04b16653 Alex Williamson
2928 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2929 04b16653 Alex Williamson
        if (addr == block->offset) {
2930 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2931 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2932 cd19cfa2 Huang Ying
                ;
2933 cd19cfa2 Huang Ying
            } else if (mem_path) {
2934 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2935 04b16653 Alex Williamson
                if (block->fd) {
2936 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2937 04b16653 Alex Williamson
                    close(block->fd);
2938 04b16653 Alex Williamson
                } else {
2939 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2940 04b16653 Alex Williamson
                }
2941 fd28aa13 Jan Kiszka
#else
2942 fd28aa13 Jan Kiszka
                abort();
2943 04b16653 Alex Williamson
#endif
2944 04b16653 Alex Williamson
            } else {
2945 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2946 04b16653 Alex Williamson
                munmap(block->host, block->length);
2947 04b16653 Alex Williamson
#else
2948 04b16653 Alex Williamson
                qemu_vfree(block->host);
2949 04b16653 Alex Williamson
#endif
2950 04b16653 Alex Williamson
            }
2951 04b16653 Alex Williamson
            qemu_free(block);
2952 04b16653 Alex Williamson
            return;
2953 04b16653 Alex Williamson
        }
2954 04b16653 Alex Williamson
    }
2955 04b16653 Alex Williamson
2956 e9a1ab19 bellard
}
2957 e9a1ab19 bellard
2958 cd19cfa2 Huang Ying
#ifndef _WIN32
2959 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2960 cd19cfa2 Huang Ying
{
2961 cd19cfa2 Huang Ying
    RAMBlock *block;
2962 cd19cfa2 Huang Ying
    ram_addr_t offset;
2963 cd19cfa2 Huang Ying
    int flags;
2964 cd19cfa2 Huang Ying
    void *area, *vaddr;
2965 cd19cfa2 Huang Ying
2966 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2967 cd19cfa2 Huang Ying
        offset = addr - block->offset;
2968 cd19cfa2 Huang Ying
        if (offset < block->length) {
2969 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
2970 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2971 cd19cfa2 Huang Ying
                ;
2972 cd19cfa2 Huang Ying
            } else {
2973 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
2974 cd19cfa2 Huang Ying
                munmap(vaddr, length);
2975 cd19cfa2 Huang Ying
                if (mem_path) {
2976 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
2977 cd19cfa2 Huang Ying
                    if (block->fd) {
2978 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
2979 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2980 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
2981 cd19cfa2 Huang Ying
#else
2982 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
2983 cd19cfa2 Huang Ying
#endif
2984 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2985 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
2986 cd19cfa2 Huang Ying
                    } else {
2987 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2988 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2989 cd19cfa2 Huang Ying
                                    flags, -1, 0);
2990 cd19cfa2 Huang Ying
                    }
2991 fd28aa13 Jan Kiszka
#else
2992 fd28aa13 Jan Kiszka
                    abort();
2993 cd19cfa2 Huang Ying
#endif
2994 cd19cfa2 Huang Ying
                } else {
2995 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2996 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2997 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2998 cd19cfa2 Huang Ying
                                flags, -1, 0);
2999 cd19cfa2 Huang Ying
#else
3000 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3001 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3002 cd19cfa2 Huang Ying
                                flags, -1, 0);
3003 cd19cfa2 Huang Ying
#endif
3004 cd19cfa2 Huang Ying
                }
3005 cd19cfa2 Huang Ying
                if (area != vaddr) {
3006 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3007 cd19cfa2 Huang Ying
                            length, addr);
3008 cd19cfa2 Huang Ying
                    exit(1);
3009 cd19cfa2 Huang Ying
                }
3010 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3011 cd19cfa2 Huang Ying
            }
3012 cd19cfa2 Huang Ying
            return;
3013 cd19cfa2 Huang Ying
        }
3014 cd19cfa2 Huang Ying
    }
3015 cd19cfa2 Huang Ying
}
3016 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3017 cd19cfa2 Huang Ying
3018 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3019 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3020 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3021 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3022 5579c7f3 pbrook

3023 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3024 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3025 5579c7f3 pbrook
 */
3026 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3027 dc828ca1 pbrook
{
3028 94a6b54f pbrook
    RAMBlock *block;
3029 94a6b54f pbrook
3030 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3031 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3032 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3033 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3034 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3035 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3036 7d82af38 Vincent Palatin
            }
3037 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3038 f471a17e Alex Williamson
        }
3039 94a6b54f pbrook
    }
3040 f471a17e Alex Williamson
3041 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3042 f471a17e Alex Williamson
    abort();
3043 f471a17e Alex Williamson
3044 f471a17e Alex Williamson
    return NULL;
3045 dc828ca1 pbrook
}
3046 dc828ca1 pbrook
3047 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3048 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3049 b2e0a138 Michael S. Tsirkin
 */
3050 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3051 b2e0a138 Michael S. Tsirkin
{
3052 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3053 b2e0a138 Michael S. Tsirkin
3054 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3055 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3056 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3057 b2e0a138 Michael S. Tsirkin
        }
3058 b2e0a138 Michael S. Tsirkin
    }
3059 b2e0a138 Michael S. Tsirkin
3060 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3061 b2e0a138 Michael S. Tsirkin
    abort();
3062 b2e0a138 Michael S. Tsirkin
3063 b2e0a138 Michael S. Tsirkin
    return NULL;
3064 b2e0a138 Michael S. Tsirkin
}
3065 b2e0a138 Michael S. Tsirkin
3066 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3067 5579c7f3 pbrook
{
3068 94a6b54f pbrook
    RAMBlock *block;
3069 94a6b54f pbrook
    uint8_t *host = ptr;
3070 94a6b54f pbrook
3071 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3073 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3074 e890261f Marcelo Tosatti
            return 0;
3075 f471a17e Alex Williamson
        }
3076 94a6b54f pbrook
    }
3077 e890261f Marcelo Tosatti
    return -1;
3078 e890261f Marcelo Tosatti
}
3079 f471a17e Alex Williamson
3080 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3081 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3082 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3083 e890261f Marcelo Tosatti
{
3084 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3085 f471a17e Alex Williamson
3086 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3087 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3088 e890261f Marcelo Tosatti
        abort();
3089 e890261f Marcelo Tosatti
    }
3090 e890261f Marcelo Tosatti
    return ram_addr;
3091 5579c7f3 pbrook
}
3092 5579c7f3 pbrook
3093 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3094 33417e70 bellard
{
3095 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3096 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3097 67d3b957 pbrook
#endif
3098 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3099 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3100 e18231a3 blueswir1
#endif
3101 e18231a3 blueswir1
    return 0;
3102 e18231a3 blueswir1
}
3103 e18231a3 blueswir1
3104 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3105 e18231a3 blueswir1
{
3106 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3107 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3108 e18231a3 blueswir1
#endif
3109 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3110 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3111 e18231a3 blueswir1
#endif
3112 e18231a3 blueswir1
    return 0;
3113 e18231a3 blueswir1
}
3114 e18231a3 blueswir1
3115 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3116 e18231a3 blueswir1
{
3117 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3118 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3119 e18231a3 blueswir1
#endif
3120 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3121 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3122 b4f0a316 blueswir1
#endif
3123 33417e70 bellard
    return 0;
3124 33417e70 bellard
}
3125 33417e70 bellard
3126 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3127 33417e70 bellard
{
3128 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3129 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3130 67d3b957 pbrook
#endif
3131 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3132 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3133 e18231a3 blueswir1
#endif
3134 e18231a3 blueswir1
}
3135 e18231a3 blueswir1
3136 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3137 e18231a3 blueswir1
{
3138 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3139 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3140 e18231a3 blueswir1
#endif
3141 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3142 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3143 e18231a3 blueswir1
#endif
3144 e18231a3 blueswir1
}
3145 e18231a3 blueswir1
3146 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3147 e18231a3 blueswir1
{
3148 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3149 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3150 e18231a3 blueswir1
#endif
3151 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3152 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3153 b4f0a316 blueswir1
#endif
3154 33417e70 bellard
}
3155 33417e70 bellard
3156 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3157 33417e70 bellard
    unassigned_mem_readb,
3158 e18231a3 blueswir1
    unassigned_mem_readw,
3159 e18231a3 blueswir1
    unassigned_mem_readl,
3160 33417e70 bellard
};
3161 33417e70 bellard
3162 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3163 33417e70 bellard
    unassigned_mem_writeb,
3164 e18231a3 blueswir1
    unassigned_mem_writew,
3165 e18231a3 blueswir1
    unassigned_mem_writel,
3166 33417e70 bellard
};
3167 33417e70 bellard
3168 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3169 0f459d16 pbrook
                                uint32_t val)
3170 9fa3e853 bellard
{
3171 3a7d929e bellard
    int dirty_flags;
3172 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3173 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3174 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3175 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3176 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3177 9fa3e853 bellard
#endif
3178 3a7d929e bellard
    }
3179 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3180 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3181 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3182 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3183 f23db169 bellard
       flushed */
3184 f23db169 bellard
    if (dirty_flags == 0xff)
3185 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3186 9fa3e853 bellard
}
3187 9fa3e853 bellard
3188 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3189 0f459d16 pbrook
                                uint32_t val)
3190 9fa3e853 bellard
{
3191 3a7d929e bellard
    int dirty_flags;
3192 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3193 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3194 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3195 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3196 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3197 9fa3e853 bellard
#endif
3198 3a7d929e bellard
    }
3199 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3200 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3201 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3202 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3203 f23db169 bellard
       flushed */
3204 f23db169 bellard
    if (dirty_flags == 0xff)
3205 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3206 9fa3e853 bellard
}
3207 9fa3e853 bellard
3208 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3209 0f459d16 pbrook
                                uint32_t val)
3210 9fa3e853 bellard
{
3211 3a7d929e bellard
    int dirty_flags;
3212 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3213 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3214 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3215 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3216 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3217 9fa3e853 bellard
#endif
3218 3a7d929e bellard
    }
3219 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3220 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3221 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3222 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3223 f23db169 bellard
       flushed */
3224 f23db169 bellard
    if (dirty_flags == 0xff)
3225 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3226 9fa3e853 bellard
}
3227 9fa3e853 bellard
3228 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3229 9fa3e853 bellard
    NULL, /* never used */
3230 9fa3e853 bellard
    NULL, /* never used */
3231 9fa3e853 bellard
    NULL, /* never used */
3232 9fa3e853 bellard
};
3233 9fa3e853 bellard
3234 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3235 1ccde1cb bellard
    notdirty_mem_writeb,
3236 1ccde1cb bellard
    notdirty_mem_writew,
3237 1ccde1cb bellard
    notdirty_mem_writel,
3238 1ccde1cb bellard
};
3239 1ccde1cb bellard
3240 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3241 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3242 0f459d16 pbrook
{
3243 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3244 06d55cc1 aliguori
    target_ulong pc, cs_base;
3245 06d55cc1 aliguori
    TranslationBlock *tb;
3246 0f459d16 pbrook
    target_ulong vaddr;
3247 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3248 06d55cc1 aliguori
    int cpu_flags;
3249 0f459d16 pbrook
3250 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3251 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3252 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3253 06d55cc1 aliguori
         * current instruction. */
3254 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3255 06d55cc1 aliguori
        return;
3256 06d55cc1 aliguori
    }
3257 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3258 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3259 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3260 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3261 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3262 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3263 6e140f28 aliguori
                env->watchpoint_hit = wp;
3264 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3265 6e140f28 aliguori
                if (!tb) {
3266 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3267 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3268 6e140f28 aliguori
                }
3269 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3270 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3271 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3272 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3273 6e140f28 aliguori
                } else {
3274 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3275 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3276 6e140f28 aliguori
                }
3277 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3278 06d55cc1 aliguori
            }
3279 6e140f28 aliguori
        } else {
3280 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3281 0f459d16 pbrook
        }
3282 0f459d16 pbrook
    }
3283 0f459d16 pbrook
}
3284 0f459d16 pbrook
3285 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3286 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3287 6658ffb8 pbrook
   phys routines.  */
3288 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3289 6658ffb8 pbrook
{
3290 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3291 6658ffb8 pbrook
    return ldub_phys(addr);
3292 6658ffb8 pbrook
}
3293 6658ffb8 pbrook
3294 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3295 6658ffb8 pbrook
{
3296 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3297 6658ffb8 pbrook
    return lduw_phys(addr);
3298 6658ffb8 pbrook
}
3299 6658ffb8 pbrook
3300 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3301 6658ffb8 pbrook
{
3302 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3303 6658ffb8 pbrook
    return ldl_phys(addr);
3304 6658ffb8 pbrook
}
3305 6658ffb8 pbrook
3306 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3307 6658ffb8 pbrook
                             uint32_t val)
3308 6658ffb8 pbrook
{
3309 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3310 6658ffb8 pbrook
    stb_phys(addr, val);
3311 6658ffb8 pbrook
}
3312 6658ffb8 pbrook
3313 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3314 6658ffb8 pbrook
                             uint32_t val)
3315 6658ffb8 pbrook
{
3316 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3317 6658ffb8 pbrook
    stw_phys(addr, val);
3318 6658ffb8 pbrook
}
3319 6658ffb8 pbrook
3320 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3321 6658ffb8 pbrook
                             uint32_t val)
3322 6658ffb8 pbrook
{
3323 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3324 6658ffb8 pbrook
    stl_phys(addr, val);
3325 6658ffb8 pbrook
}
3326 6658ffb8 pbrook
3327 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3328 6658ffb8 pbrook
    watch_mem_readb,
3329 6658ffb8 pbrook
    watch_mem_readw,
3330 6658ffb8 pbrook
    watch_mem_readl,
3331 6658ffb8 pbrook
};
3332 6658ffb8 pbrook
3333 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3334 6658ffb8 pbrook
    watch_mem_writeb,
3335 6658ffb8 pbrook
    watch_mem_writew,
3336 6658ffb8 pbrook
    watch_mem_writel,
3337 6658ffb8 pbrook
};
3338 6658ffb8 pbrook
3339 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3340 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3341 f6405247 Richard Henderson
                                        unsigned int len)
3342 db7b5426 blueswir1
{
3343 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3344 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3345 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3346 db7b5426 blueswir1
           mmio, len, addr, idx);
3347 db7b5426 blueswir1
#endif
3348 db7b5426 blueswir1
3349 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3350 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3351 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3352 db7b5426 blueswir1
}
3353 db7b5426 blueswir1
3354 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3355 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3356 db7b5426 blueswir1
{
3357 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3358 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3359 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3360 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3361 db7b5426 blueswir1
#endif
3362 f6405247 Richard Henderson
3363 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3364 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3365 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3366 db7b5426 blueswir1
}
3367 db7b5426 blueswir1
3368 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3369 db7b5426 blueswir1
{
3370 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3371 db7b5426 blueswir1
}
3372 db7b5426 blueswir1
3373 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3374 db7b5426 blueswir1
                            uint32_t value)
3375 db7b5426 blueswir1
{
3376 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3377 db7b5426 blueswir1
}
3378 db7b5426 blueswir1
3379 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3380 db7b5426 blueswir1
{
3381 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3382 db7b5426 blueswir1
}
3383 db7b5426 blueswir1
3384 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3385 db7b5426 blueswir1
                            uint32_t value)
3386 db7b5426 blueswir1
{
3387 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3388 db7b5426 blueswir1
}
3389 db7b5426 blueswir1
3390 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3391 db7b5426 blueswir1
{
3392 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3393 db7b5426 blueswir1
}
3394 db7b5426 blueswir1
3395 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3396 f6405247 Richard Henderson
                            uint32_t value)
3397 db7b5426 blueswir1
{
3398 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3399 db7b5426 blueswir1
}
3400 db7b5426 blueswir1
3401 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3402 db7b5426 blueswir1
    &subpage_readb,
3403 db7b5426 blueswir1
    &subpage_readw,
3404 db7b5426 blueswir1
    &subpage_readl,
3405 db7b5426 blueswir1
};
3406 db7b5426 blueswir1
3407 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3408 db7b5426 blueswir1
    &subpage_writeb,
3409 db7b5426 blueswir1
    &subpage_writew,
3410 db7b5426 blueswir1
    &subpage_writel,
3411 db7b5426 blueswir1
};
3412 db7b5426 blueswir1
3413 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3414 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3415 db7b5426 blueswir1
{
3416 db7b5426 blueswir1
    int idx, eidx;
3417 db7b5426 blueswir1
3418 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3419 db7b5426 blueswir1
        return -1;
3420 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3421 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3422 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3423 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3424 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3425 db7b5426 blueswir1
#endif
3426 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3427 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3428 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3429 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3430 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3431 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3432 db7b5426 blueswir1
    }
3433 db7b5426 blueswir1
3434 db7b5426 blueswir1
    return 0;
3435 db7b5426 blueswir1
}
3436 db7b5426 blueswir1
3437 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3438 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3439 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3440 db7b5426 blueswir1
{
3441 c227f099 Anthony Liguori
    subpage_t *mmio;
3442 db7b5426 blueswir1
    int subpage_memory;
3443 db7b5426 blueswir1
3444 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3445 1eec614b aliguori
3446 1eec614b aliguori
    mmio->base = base;
3447 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3448 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3449 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3450 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3451 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3452 db7b5426 blueswir1
#endif
3453 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3454 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3455 db7b5426 blueswir1
3456 db7b5426 blueswir1
    return mmio;
3457 db7b5426 blueswir1
}
3458 db7b5426 blueswir1
3459 88715657 aliguori
static int get_free_io_mem_idx(void)
3460 88715657 aliguori
{
3461 88715657 aliguori
    int i;
3462 88715657 aliguori
3463 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3464 88715657 aliguori
        if (!io_mem_used[i]) {
3465 88715657 aliguori
            io_mem_used[i] = 1;
3466 88715657 aliguori
            return i;
3467 88715657 aliguori
        }
3468 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3469 88715657 aliguori
    return -1;
3470 88715657 aliguori
}
3471 88715657 aliguori
3472 dd310534 Alexander Graf
/*
3473 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3474 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3475 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3476 dd310534 Alexander Graf
 *
3477 dd310534 Alexander Graf
 * CPU          Device           swap?
3478 dd310534 Alexander Graf
 *
3479 dd310534 Alexander Graf
 * little       little           no
3480 dd310534 Alexander Graf
 * little       big              yes
3481 dd310534 Alexander Graf
 * big          little           yes
3482 dd310534 Alexander Graf
 * big          big              no
3483 dd310534 Alexander Graf
 */
3484 dd310534 Alexander Graf
3485 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3486 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3487 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3488 dd310534 Alexander Graf
    void *opaque;
3489 dd310534 Alexander Graf
} SwapEndianContainer;
3490 dd310534 Alexander Graf
3491 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3492 dd310534 Alexander Graf
{
3493 dd310534 Alexander Graf
    uint32_t val;
3494 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3495 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3496 dd310534 Alexander Graf
    return val;
3497 dd310534 Alexander Graf
}
3498 dd310534 Alexander Graf
3499 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3500 dd310534 Alexander Graf
{
3501 dd310534 Alexander Graf
    uint32_t val;
3502 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3503 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3504 dd310534 Alexander Graf
    return val;
3505 dd310534 Alexander Graf
}
3506 dd310534 Alexander Graf
3507 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3508 dd310534 Alexander Graf
{
3509 dd310534 Alexander Graf
    uint32_t val;
3510 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3511 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3512 dd310534 Alexander Graf
    return val;
3513 dd310534 Alexander Graf
}
3514 dd310534 Alexander Graf
3515 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3516 dd310534 Alexander Graf
    swapendian_mem_readb,
3517 dd310534 Alexander Graf
    swapendian_mem_readw,
3518 dd310534 Alexander Graf
    swapendian_mem_readl
3519 dd310534 Alexander Graf
};
3520 dd310534 Alexander Graf
3521 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3522 dd310534 Alexander Graf
                                  uint32_t val)
3523 dd310534 Alexander Graf
{
3524 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3525 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3526 dd310534 Alexander Graf
}
3527 dd310534 Alexander Graf
3528 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3529 dd310534 Alexander Graf
                                  uint32_t val)
3530 dd310534 Alexander Graf
{
3531 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3532 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3533 dd310534 Alexander Graf
}
3534 dd310534 Alexander Graf
3535 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3536 dd310534 Alexander Graf
                                  uint32_t val)
3537 dd310534 Alexander Graf
{
3538 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3539 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3540 dd310534 Alexander Graf
}
3541 dd310534 Alexander Graf
3542 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3543 dd310534 Alexander Graf
    swapendian_mem_writeb,
3544 dd310534 Alexander Graf
    swapendian_mem_writew,
3545 dd310534 Alexander Graf
    swapendian_mem_writel
3546 dd310534 Alexander Graf
};
3547 dd310534 Alexander Graf
3548 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3549 dd310534 Alexander Graf
{
3550 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3551 dd310534 Alexander Graf
    int i;
3552 dd310534 Alexander Graf
3553 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3554 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3555 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3556 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3557 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3558 dd310534 Alexander Graf
3559 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3560 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3561 dd310534 Alexander Graf
    }
3562 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3563 dd310534 Alexander Graf
}
3564 dd310534 Alexander Graf
3565 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3566 dd310534 Alexander Graf
{
3567 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3568 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3569 dd310534 Alexander Graf
    }
3570 dd310534 Alexander Graf
}
3571 dd310534 Alexander Graf
3572 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3573 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3574 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3575 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3576 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3577 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3578 4254fab8 blueswir1
   returned if error. */
3579 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3580 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3581 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3582 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3583 33417e70 bellard
{
3584 3cab721d Richard Henderson
    int i;
3585 3cab721d Richard Henderson
3586 33417e70 bellard
    if (io_index <= 0) {
3587 88715657 aliguori
        io_index = get_free_io_mem_idx();
3588 88715657 aliguori
        if (io_index == -1)
3589 88715657 aliguori
            return io_index;
3590 33417e70 bellard
    } else {
3591 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3592 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3593 33417e70 bellard
            return -1;
3594 33417e70 bellard
    }
3595 b5ff1b31 bellard
3596 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3597 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3598 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3599 3cab721d Richard Henderson
    }
3600 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3601 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3602 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3603 3cab721d Richard Henderson
    }
3604 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3605 f6405247 Richard Henderson
3606 dd310534 Alexander Graf
    switch (endian) {
3607 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3608 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3609 dd310534 Alexander Graf
        swapendian_init(io_index);
3610 dd310534 Alexander Graf
#endif
3611 dd310534 Alexander Graf
        break;
3612 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3613 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3614 dd310534 Alexander Graf
        swapendian_init(io_index);
3615 dd310534 Alexander Graf
#endif
3616 dd310534 Alexander Graf
        break;
3617 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3618 dd310534 Alexander Graf
    default:
3619 dd310534 Alexander Graf
        break;
3620 dd310534 Alexander Graf
    }
3621 dd310534 Alexander Graf
3622 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3623 33417e70 bellard
}
3624 61382a50 bellard
3625 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3626 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3627 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3628 1eed09cb Avi Kivity
{
3629 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3630 1eed09cb Avi Kivity
}
3631 1eed09cb Avi Kivity
3632 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3633 88715657 aliguori
{
3634 88715657 aliguori
    int i;
3635 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3636 88715657 aliguori
3637 dd310534 Alexander Graf
    swapendian_del(io_index);
3638 dd310534 Alexander Graf
3639 88715657 aliguori
    for (i=0;i < 3; i++) {
3640 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3641 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3642 88715657 aliguori
    }
3643 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3644 88715657 aliguori
    io_mem_used[io_index] = 0;
3645 88715657 aliguori
}
3646 88715657 aliguori
3647 e9179ce1 Avi Kivity
static void io_mem_init(void)
3648 e9179ce1 Avi Kivity
{
3649 e9179ce1 Avi Kivity
    int i;
3650 e9179ce1 Avi Kivity
3651 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3652 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3653 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3654 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3655 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3656 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3657 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3658 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3659 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3660 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3661 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3662 e9179ce1 Avi Kivity
3663 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3664 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3665 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3666 e9179ce1 Avi Kivity
}
3667 e9179ce1 Avi Kivity
3668 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3669 e2eef170 pbrook
3670 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3671 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3672 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3673 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3674 13eb76e0 bellard
{
3675 13eb76e0 bellard
    int l, flags;
3676 13eb76e0 bellard
    target_ulong page;
3677 53a5960a pbrook
    void * p;
3678 13eb76e0 bellard
3679 13eb76e0 bellard
    while (len > 0) {
3680 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3681 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3682 13eb76e0 bellard
        if (l > len)
3683 13eb76e0 bellard
            l = len;
3684 13eb76e0 bellard
        flags = page_get_flags(page);
3685 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3686 a68fe89c Paul Brook
            return -1;
3687 13eb76e0 bellard
        if (is_write) {
3688 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3689 a68fe89c Paul Brook
                return -1;
3690 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3691 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3692 a68fe89c Paul Brook
                return -1;
3693 72fb7daa aurel32
            memcpy(p, buf, l);
3694 72fb7daa aurel32
            unlock_user(p, addr, l);
3695 13eb76e0 bellard
        } else {
3696 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3697 a68fe89c Paul Brook
                return -1;
3698 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3699 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3700 a68fe89c Paul Brook
                return -1;
3701 72fb7daa aurel32
            memcpy(buf, p, l);
3702 5b257578 aurel32
            unlock_user(p, addr, 0);
3703 13eb76e0 bellard
        }
3704 13eb76e0 bellard
        len -= l;
3705 13eb76e0 bellard
        buf += l;
3706 13eb76e0 bellard
        addr += l;
3707 13eb76e0 bellard
    }
3708 a68fe89c Paul Brook
    return 0;
3709 13eb76e0 bellard
}
3710 8df1cd07 bellard
3711 13eb76e0 bellard
#else
3712 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3713 13eb76e0 bellard
                            int len, int is_write)
3714 13eb76e0 bellard
{
3715 13eb76e0 bellard
    int l, io_index;
3716 13eb76e0 bellard
    uint8_t *ptr;
3717 13eb76e0 bellard
    uint32_t val;
3718 c227f099 Anthony Liguori
    target_phys_addr_t page;
3719 2e12669a bellard
    unsigned long pd;
3720 92e873b9 bellard
    PhysPageDesc *p;
3721 3b46e624 ths
3722 13eb76e0 bellard
    while (len > 0) {
3723 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3724 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3725 13eb76e0 bellard
        if (l > len)
3726 13eb76e0 bellard
            l = len;
3727 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3728 13eb76e0 bellard
        if (!p) {
3729 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3730 13eb76e0 bellard
        } else {
3731 13eb76e0 bellard
            pd = p->phys_offset;
3732 13eb76e0 bellard
        }
3733 3b46e624 ths
3734 13eb76e0 bellard
        if (is_write) {
3735 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3736 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3737 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3738 8da3ff18 pbrook
                if (p)
3739 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3740 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3741 6a00d601 bellard
                   potential bugs */
3742 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3743 1c213d19 bellard
                    /* 32 bit write access */
3744 c27004ec bellard
                    val = ldl_p(buf);
3745 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3746 13eb76e0 bellard
                    l = 4;
3747 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3748 1c213d19 bellard
                    /* 16 bit write access */
3749 c27004ec bellard
                    val = lduw_p(buf);
3750 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3751 13eb76e0 bellard
                    l = 2;
3752 13eb76e0 bellard
                } else {
3753 1c213d19 bellard
                    /* 8 bit write access */
3754 c27004ec bellard
                    val = ldub_p(buf);
3755 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3756 13eb76e0 bellard
                    l = 1;
3757 13eb76e0 bellard
                }
3758 13eb76e0 bellard
            } else {
3759 b448f2f3 bellard
                unsigned long addr1;
3760 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3761 13eb76e0 bellard
                /* RAM case */
3762 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3763 13eb76e0 bellard
                memcpy(ptr, buf, l);
3764 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3765 3a7d929e bellard
                    /* invalidate code */
3766 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3767 3a7d929e bellard
                    /* set dirty bit */
3768 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3769 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3770 3a7d929e bellard
                }
3771 13eb76e0 bellard
            }
3772 13eb76e0 bellard
        } else {
3773 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3774 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3775 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3776 13eb76e0 bellard
                /* I/O case */
3777 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3778 8da3ff18 pbrook
                if (p)
3779 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3780 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3781 13eb76e0 bellard
                    /* 32 bit read access */
3782 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3783 c27004ec bellard
                    stl_p(buf, val);
3784 13eb76e0 bellard
                    l = 4;
3785 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3786 13eb76e0 bellard
                    /* 16 bit read access */
3787 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3788 c27004ec bellard
                    stw_p(buf, val);
3789 13eb76e0 bellard
                    l = 2;
3790 13eb76e0 bellard
                } else {
3791 1c213d19 bellard
                    /* 8 bit read access */
3792 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3793 c27004ec bellard
                    stb_p(buf, val);
3794 13eb76e0 bellard
                    l = 1;
3795 13eb76e0 bellard
                }
3796 13eb76e0 bellard
            } else {
3797 13eb76e0 bellard
                /* RAM case */
3798 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3799 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3800 13eb76e0 bellard
                memcpy(buf, ptr, l);
3801 13eb76e0 bellard
            }
3802 13eb76e0 bellard
        }
3803 13eb76e0 bellard
        len -= l;
3804 13eb76e0 bellard
        buf += l;
3805 13eb76e0 bellard
        addr += l;
3806 13eb76e0 bellard
    }
3807 13eb76e0 bellard
}
3808 8df1cd07 bellard
3809 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3810 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3811 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3812 d0ecd2aa bellard
{
3813 d0ecd2aa bellard
    int l;
3814 d0ecd2aa bellard
    uint8_t *ptr;
3815 c227f099 Anthony Liguori
    target_phys_addr_t page;
3816 d0ecd2aa bellard
    unsigned long pd;
3817 d0ecd2aa bellard
    PhysPageDesc *p;
3818 3b46e624 ths
3819 d0ecd2aa bellard
    while (len > 0) {
3820 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3821 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3822 d0ecd2aa bellard
        if (l > len)
3823 d0ecd2aa bellard
            l = len;
3824 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3825 d0ecd2aa bellard
        if (!p) {
3826 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3827 d0ecd2aa bellard
        } else {
3828 d0ecd2aa bellard
            pd = p->phys_offset;
3829 d0ecd2aa bellard
        }
3830 3b46e624 ths
3831 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3832 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3833 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3834 d0ecd2aa bellard
            /* do nothing */
3835 d0ecd2aa bellard
        } else {
3836 d0ecd2aa bellard
            unsigned long addr1;
3837 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3838 d0ecd2aa bellard
            /* ROM/RAM case */
3839 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3840 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3841 d0ecd2aa bellard
        }
3842 d0ecd2aa bellard
        len -= l;
3843 d0ecd2aa bellard
        buf += l;
3844 d0ecd2aa bellard
        addr += l;
3845 d0ecd2aa bellard
    }
3846 d0ecd2aa bellard
}
3847 d0ecd2aa bellard
3848 6d16c2f8 aliguori
typedef struct {
3849 6d16c2f8 aliguori
    void *buffer;
3850 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3851 c227f099 Anthony Liguori
    target_phys_addr_t len;
3852 6d16c2f8 aliguori
} BounceBuffer;
3853 6d16c2f8 aliguori
3854 6d16c2f8 aliguori
static BounceBuffer bounce;
3855 6d16c2f8 aliguori
3856 ba223c29 aliguori
typedef struct MapClient {
3857 ba223c29 aliguori
    void *opaque;
3858 ba223c29 aliguori
    void (*callback)(void *opaque);
3859 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3860 ba223c29 aliguori
} MapClient;
3861 ba223c29 aliguori
3862 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3863 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3864 ba223c29 aliguori
3865 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3866 ba223c29 aliguori
{
3867 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3868 ba223c29 aliguori
3869 ba223c29 aliguori
    client->opaque = opaque;
3870 ba223c29 aliguori
    client->callback = callback;
3871 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3872 ba223c29 aliguori
    return client;
3873 ba223c29 aliguori
}
3874 ba223c29 aliguori
3875 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3876 ba223c29 aliguori
{
3877 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3878 ba223c29 aliguori
3879 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3880 34d5e948 Isaku Yamahata
    qemu_free(client);
3881 ba223c29 aliguori
}
3882 ba223c29 aliguori
3883 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3884 ba223c29 aliguori
{
3885 ba223c29 aliguori
    MapClient *client;
3886 ba223c29 aliguori
3887 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3888 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3889 ba223c29 aliguori
        client->callback(client->opaque);
3890 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3891 ba223c29 aliguori
    }
3892 ba223c29 aliguori
}
3893 ba223c29 aliguori
3894 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3895 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3896 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3897 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3898 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3899 ba223c29 aliguori
 * likely to succeed.
3900 6d16c2f8 aliguori
 */
3901 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3902 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3903 6d16c2f8 aliguori
                              int is_write)
3904 6d16c2f8 aliguori
{
3905 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3906 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3907 6d16c2f8 aliguori
    int l;
3908 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3909 6d16c2f8 aliguori
    uint8_t *ptr;
3910 c227f099 Anthony Liguori
    target_phys_addr_t page;
3911 6d16c2f8 aliguori
    unsigned long pd;
3912 6d16c2f8 aliguori
    PhysPageDesc *p;
3913 6d16c2f8 aliguori
    unsigned long addr1;
3914 6d16c2f8 aliguori
3915 6d16c2f8 aliguori
    while (len > 0) {
3916 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3917 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3918 6d16c2f8 aliguori
        if (l > len)
3919 6d16c2f8 aliguori
            l = len;
3920 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3921 6d16c2f8 aliguori
        if (!p) {
3922 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3923 6d16c2f8 aliguori
        } else {
3924 6d16c2f8 aliguori
            pd = p->phys_offset;
3925 6d16c2f8 aliguori
        }
3926 6d16c2f8 aliguori
3927 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3928 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3929 6d16c2f8 aliguori
                break;
3930 6d16c2f8 aliguori
            }
3931 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3932 6d16c2f8 aliguori
            bounce.addr = addr;
3933 6d16c2f8 aliguori
            bounce.len = l;
3934 6d16c2f8 aliguori
            if (!is_write) {
3935 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3936 6d16c2f8 aliguori
            }
3937 6d16c2f8 aliguori
            ptr = bounce.buffer;
3938 6d16c2f8 aliguori
        } else {
3939 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3940 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3941 6d16c2f8 aliguori
        }
3942 6d16c2f8 aliguori
        if (!done) {
3943 6d16c2f8 aliguori
            ret = ptr;
3944 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3945 6d16c2f8 aliguori
            break;
3946 6d16c2f8 aliguori
        }
3947 6d16c2f8 aliguori
3948 6d16c2f8 aliguori
        len -= l;
3949 6d16c2f8 aliguori
        addr += l;
3950 6d16c2f8 aliguori
        done += l;
3951 6d16c2f8 aliguori
    }
3952 6d16c2f8 aliguori
    *plen = done;
3953 6d16c2f8 aliguori
    return ret;
3954 6d16c2f8 aliguori
}
3955 6d16c2f8 aliguori
3956 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3957 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3958 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3959 6d16c2f8 aliguori
 */
3960 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3961 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3962 6d16c2f8 aliguori
{
3963 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3964 6d16c2f8 aliguori
        if (is_write) {
3965 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3966 6d16c2f8 aliguori
            while (access_len) {
3967 6d16c2f8 aliguori
                unsigned l;
3968 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3969 6d16c2f8 aliguori
                if (l > access_len)
3970 6d16c2f8 aliguori
                    l = access_len;
3971 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3972 6d16c2f8 aliguori
                    /* invalidate code */
3973 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3974 6d16c2f8 aliguori
                    /* set dirty bit */
3975 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3976 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3977 6d16c2f8 aliguori
                }
3978 6d16c2f8 aliguori
                addr1 += l;
3979 6d16c2f8 aliguori
                access_len -= l;
3980 6d16c2f8 aliguori
            }
3981 6d16c2f8 aliguori
        }
3982 6d16c2f8 aliguori
        return;
3983 6d16c2f8 aliguori
    }
3984 6d16c2f8 aliguori
    if (is_write) {
3985 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3986 6d16c2f8 aliguori
    }
3987 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3988 6d16c2f8 aliguori
    bounce.buffer = NULL;
3989 ba223c29 aliguori
    cpu_notify_map_clients();
3990 6d16c2f8 aliguori
}
3991 d0ecd2aa bellard
3992 8df1cd07 bellard
/* warning: addr must be aligned */
3993 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3994 8df1cd07 bellard
{
3995 8df1cd07 bellard
    int io_index;
3996 8df1cd07 bellard
    uint8_t *ptr;
3997 8df1cd07 bellard
    uint32_t val;
3998 8df1cd07 bellard
    unsigned long pd;
3999 8df1cd07 bellard
    PhysPageDesc *p;
4000 8df1cd07 bellard
4001 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4002 8df1cd07 bellard
    if (!p) {
4003 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4004 8df1cd07 bellard
    } else {
4005 8df1cd07 bellard
        pd = p->phys_offset;
4006 8df1cd07 bellard
    }
4007 3b46e624 ths
4008 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4009 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4010 8df1cd07 bellard
        /* I/O case */
4011 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4012 8da3ff18 pbrook
        if (p)
4013 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4014 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4015 8df1cd07 bellard
    } else {
4016 8df1cd07 bellard
        /* RAM case */
4017 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4018 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4019 8df1cd07 bellard
        val = ldl_p(ptr);
4020 8df1cd07 bellard
    }
4021 8df1cd07 bellard
    return val;
4022 8df1cd07 bellard
}
4023 8df1cd07 bellard
4024 84b7b8e7 bellard
/* warning: addr must be aligned */
4025 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
4026 84b7b8e7 bellard
{
4027 84b7b8e7 bellard
    int io_index;
4028 84b7b8e7 bellard
    uint8_t *ptr;
4029 84b7b8e7 bellard
    uint64_t val;
4030 84b7b8e7 bellard
    unsigned long pd;
4031 84b7b8e7 bellard
    PhysPageDesc *p;
4032 84b7b8e7 bellard
4033 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4034 84b7b8e7 bellard
    if (!p) {
4035 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4036 84b7b8e7 bellard
    } else {
4037 84b7b8e7 bellard
        pd = p->phys_offset;
4038 84b7b8e7 bellard
    }
4039 3b46e624 ths
4040 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4041 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4042 84b7b8e7 bellard
        /* I/O case */
4043 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4044 8da3ff18 pbrook
        if (p)
4045 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4046 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4047 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4048 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4049 84b7b8e7 bellard
#else
4050 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4051 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4052 84b7b8e7 bellard
#endif
4053 84b7b8e7 bellard
    } else {
4054 84b7b8e7 bellard
        /* RAM case */
4055 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4056 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4057 84b7b8e7 bellard
        val = ldq_p(ptr);
4058 84b7b8e7 bellard
    }
4059 84b7b8e7 bellard
    return val;
4060 84b7b8e7 bellard
}
4061 84b7b8e7 bellard
4062 aab33094 bellard
/* XXX: optimize */
4063 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4064 aab33094 bellard
{
4065 aab33094 bellard
    uint8_t val;
4066 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4067 aab33094 bellard
    return val;
4068 aab33094 bellard
}
4069 aab33094 bellard
4070 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4071 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
4072 aab33094 bellard
{
4073 733f0b02 Michael S. Tsirkin
    int io_index;
4074 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4075 733f0b02 Michael S. Tsirkin
    uint64_t val;
4076 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4077 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4078 733f0b02 Michael S. Tsirkin
4079 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4080 733f0b02 Michael S. Tsirkin
    if (!p) {
4081 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4082 733f0b02 Michael S. Tsirkin
    } else {
4083 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4084 733f0b02 Michael S. Tsirkin
    }
4085 733f0b02 Michael S. Tsirkin
4086 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4087 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4088 733f0b02 Michael S. Tsirkin
        /* I/O case */
4089 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4090 733f0b02 Michael S. Tsirkin
        if (p)
4091 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4092 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4093 733f0b02 Michael S. Tsirkin
    } else {
4094 733f0b02 Michael S. Tsirkin
        /* RAM case */
4095 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4096 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4097 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
4098 733f0b02 Michael S. Tsirkin
    }
4099 733f0b02 Michael S. Tsirkin
    return val;
4100 aab33094 bellard
}
4101 aab33094 bellard
4102 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4103 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4104 8df1cd07 bellard
   bits are used to track modified PTEs */
4105 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4106 8df1cd07 bellard
{
4107 8df1cd07 bellard
    int io_index;
4108 8df1cd07 bellard
    uint8_t *ptr;
4109 8df1cd07 bellard
    unsigned long pd;
4110 8df1cd07 bellard
    PhysPageDesc *p;
4111 8df1cd07 bellard
4112 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4113 8df1cd07 bellard
    if (!p) {
4114 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4115 8df1cd07 bellard
    } else {
4116 8df1cd07 bellard
        pd = p->phys_offset;
4117 8df1cd07 bellard
    }
4118 3b46e624 ths
4119 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4120 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4121 8da3ff18 pbrook
        if (p)
4122 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4123 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4124 8df1cd07 bellard
    } else {
4125 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4126 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4127 8df1cd07 bellard
        stl_p(ptr, val);
4128 74576198 aliguori
4129 74576198 aliguori
        if (unlikely(in_migration)) {
4130 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4131 74576198 aliguori
                /* invalidate code */
4132 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4133 74576198 aliguori
                /* set dirty bit */
4134 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4135 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4136 74576198 aliguori
            }
4137 74576198 aliguori
        }
4138 8df1cd07 bellard
    }
4139 8df1cd07 bellard
}
4140 8df1cd07 bellard
4141 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4142 bc98a7ef j_mayer
{
4143 bc98a7ef j_mayer
    int io_index;
4144 bc98a7ef j_mayer
    uint8_t *ptr;
4145 bc98a7ef j_mayer
    unsigned long pd;
4146 bc98a7ef j_mayer
    PhysPageDesc *p;
4147 bc98a7ef j_mayer
4148 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4149 bc98a7ef j_mayer
    if (!p) {
4150 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4151 bc98a7ef j_mayer
    } else {
4152 bc98a7ef j_mayer
        pd = p->phys_offset;
4153 bc98a7ef j_mayer
    }
4154 3b46e624 ths
4155 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4156 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4157 8da3ff18 pbrook
        if (p)
4158 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4159 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4160 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4161 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4162 bc98a7ef j_mayer
#else
4163 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4164 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4165 bc98a7ef j_mayer
#endif
4166 bc98a7ef j_mayer
    } else {
4167 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4168 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4169 bc98a7ef j_mayer
        stq_p(ptr, val);
4170 bc98a7ef j_mayer
    }
4171 bc98a7ef j_mayer
}
4172 bc98a7ef j_mayer
4173 8df1cd07 bellard
/* warning: addr must be aligned */
4174 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4175 8df1cd07 bellard
{
4176 8df1cd07 bellard
    int io_index;
4177 8df1cd07 bellard
    uint8_t *ptr;
4178 8df1cd07 bellard
    unsigned long pd;
4179 8df1cd07 bellard
    PhysPageDesc *p;
4180 8df1cd07 bellard
4181 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4182 8df1cd07 bellard
    if (!p) {
4183 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4184 8df1cd07 bellard
    } else {
4185 8df1cd07 bellard
        pd = p->phys_offset;
4186 8df1cd07 bellard
    }
4187 3b46e624 ths
4188 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4189 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4190 8da3ff18 pbrook
        if (p)
4191 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4192 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4193 8df1cd07 bellard
    } else {
4194 8df1cd07 bellard
        unsigned long addr1;
4195 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4196 8df1cd07 bellard
        /* RAM case */
4197 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4198 8df1cd07 bellard
        stl_p(ptr, val);
4199 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4200 3a7d929e bellard
            /* invalidate code */
4201 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4202 3a7d929e bellard
            /* set dirty bit */
4203 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4204 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4205 3a7d929e bellard
        }
4206 8df1cd07 bellard
    }
4207 8df1cd07 bellard
}
4208 8df1cd07 bellard
4209 aab33094 bellard
/* XXX: optimize */
4210 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4211 aab33094 bellard
{
4212 aab33094 bellard
    uint8_t v = val;
4213 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4214 aab33094 bellard
}
4215 aab33094 bellard
4216 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4217 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4218 aab33094 bellard
{
4219 733f0b02 Michael S. Tsirkin
    int io_index;
4220 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4221 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4222 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4223 733f0b02 Michael S. Tsirkin
4224 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4225 733f0b02 Michael S. Tsirkin
    if (!p) {
4226 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4227 733f0b02 Michael S. Tsirkin
    } else {
4228 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4229 733f0b02 Michael S. Tsirkin
    }
4230 733f0b02 Michael S. Tsirkin
4231 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4232 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4233 733f0b02 Michael S. Tsirkin
        if (p)
4234 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4235 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4236 733f0b02 Michael S. Tsirkin
    } else {
4237 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4238 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4239 733f0b02 Michael S. Tsirkin
        /* RAM case */
4240 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4241 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4242 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4243 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4244 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4245 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4246 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4247 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4248 733f0b02 Michael S. Tsirkin
        }
4249 733f0b02 Michael S. Tsirkin
    }
4250 aab33094 bellard
}
4251 aab33094 bellard
4252 aab33094 bellard
/* XXX: optimize */
4253 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4254 aab33094 bellard
{
4255 aab33094 bellard
    val = tswap64(val);
4256 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4257 aab33094 bellard
}
4258 aab33094 bellard
4259 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4260 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4261 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4262 13eb76e0 bellard
{
4263 13eb76e0 bellard
    int l;
4264 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4265 9b3c35e0 j_mayer
    target_ulong page;
4266 13eb76e0 bellard
4267 13eb76e0 bellard
    while (len > 0) {
4268 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4269 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4270 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4271 13eb76e0 bellard
        if (phys_addr == -1)
4272 13eb76e0 bellard
            return -1;
4273 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4274 13eb76e0 bellard
        if (l > len)
4275 13eb76e0 bellard
            l = len;
4276 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4277 5e2972fd aliguori
        if (is_write)
4278 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4279 5e2972fd aliguori
        else
4280 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4281 13eb76e0 bellard
        len -= l;
4282 13eb76e0 bellard
        buf += l;
4283 13eb76e0 bellard
        addr += l;
4284 13eb76e0 bellard
    }
4285 13eb76e0 bellard
    return 0;
4286 13eb76e0 bellard
}
4287 a68fe89c Paul Brook
#endif
4288 13eb76e0 bellard
4289 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4290 2e70f6ef pbrook
   must be at the end of the TB */
4291 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4292 2e70f6ef pbrook
{
4293 2e70f6ef pbrook
    TranslationBlock *tb;
4294 2e70f6ef pbrook
    uint32_t n, cflags;
4295 2e70f6ef pbrook
    target_ulong pc, cs_base;
4296 2e70f6ef pbrook
    uint64_t flags;
4297 2e70f6ef pbrook
4298 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4299 2e70f6ef pbrook
    if (!tb) {
4300 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4301 2e70f6ef pbrook
                  retaddr);
4302 2e70f6ef pbrook
    }
4303 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4304 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4305 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4306 bf20dc07 ths
       occurred.  */
4307 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4308 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4309 2e70f6ef pbrook
    n++;
4310 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4311 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4312 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4313 2e70f6ef pbrook
       branch.  */
4314 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4315 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4316 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4317 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4318 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4319 2e70f6ef pbrook
    }
4320 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4321 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4322 2e70f6ef pbrook
            && n > 1) {
4323 2e70f6ef pbrook
        env->pc -= 2;
4324 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4325 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4326 2e70f6ef pbrook
    }
4327 2e70f6ef pbrook
#endif
4328 2e70f6ef pbrook
    /* This should never happen.  */
4329 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4330 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4331 2e70f6ef pbrook
4332 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4333 2e70f6ef pbrook
    pc = tb->pc;
4334 2e70f6ef pbrook
    cs_base = tb->cs_base;
4335 2e70f6ef pbrook
    flags = tb->flags;
4336 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4337 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4338 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4339 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4340 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4341 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4342 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4343 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4344 2e70f6ef pbrook
       second new TB.  */
4345 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4346 2e70f6ef pbrook
}
4347 2e70f6ef pbrook
4348 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4349 b3755a91 Paul Brook
4350 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4351 e3db7226 bellard
{
4352 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4353 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4354 e3db7226 bellard
    TranslationBlock *tb;
4355 3b46e624 ths
4356 e3db7226 bellard
    target_code_size = 0;
4357 e3db7226 bellard
    max_target_code_size = 0;
4358 e3db7226 bellard
    cross_page = 0;
4359 e3db7226 bellard
    direct_jmp_count = 0;
4360 e3db7226 bellard
    direct_jmp2_count = 0;
4361 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4362 e3db7226 bellard
        tb = &tbs[i];
4363 e3db7226 bellard
        target_code_size += tb->size;
4364 e3db7226 bellard
        if (tb->size > max_target_code_size)
4365 e3db7226 bellard
            max_target_code_size = tb->size;
4366 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4367 e3db7226 bellard
            cross_page++;
4368 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4369 e3db7226 bellard
            direct_jmp_count++;
4370 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4371 e3db7226 bellard
                direct_jmp2_count++;
4372 e3db7226 bellard
            }
4373 e3db7226 bellard
        }
4374 e3db7226 bellard
    }
4375 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4376 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4377 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4378 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4379 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4380 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4381 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4382 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4383 e3db7226 bellard
                max_target_code_size);
4384 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4385 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4386 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4387 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4388 5fafdf24 ths
            cross_page,
4389 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4390 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4391 5fafdf24 ths
                direct_jmp_count,
4392 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4393 e3db7226 bellard
                direct_jmp2_count,
4394 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4395 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4396 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4397 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4398 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4399 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4400 e3db7226 bellard
}
4401 e3db7226 bellard
4402 61382a50 bellard
#define MMUSUFFIX _cmmu
4403 61382a50 bellard
#define GETPC() NULL
4404 61382a50 bellard
#define env cpu_single_env
4405 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4406 61382a50 bellard
4407 61382a50 bellard
#define SHIFT 0
4408 61382a50 bellard
#include "softmmu_template.h"
4409 61382a50 bellard
4410 61382a50 bellard
#define SHIFT 1
4411 61382a50 bellard
#include "softmmu_template.h"
4412 61382a50 bellard
4413 61382a50 bellard
#define SHIFT 2
4414 61382a50 bellard
#include "softmmu_template.h"
4415 61382a50 bellard
4416 61382a50 bellard
#define SHIFT 3
4417 61382a50 bellard
#include "softmmu_template.h"
4418 61382a50 bellard
4419 61382a50 bellard
#undef env
4420 61382a50 bellard
4421 61382a50 bellard
#endif