Statistics
| Branch: | Revision:

root / exec.c @ 5de6b46d

History | View | Annotate | Download (133.4 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 432d268c Jun Nakajima
#include "hw/xen.h"
36 29e922b6 Blue Swirl
#include "qemu-timer.h"
37 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
38 53a5960a pbrook
#include <qemu.h>
39 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 f01576f1 Juergen Lock
#include <sys/param.h>
41 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
42 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
43 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44 f01576f1 Juergen Lock
#include <sys/time.h>
45 f01576f1 Juergen Lock
#include <sys/proc.h>
46 f01576f1 Juergen Lock
#include <machine/profile.h>
47 f01576f1 Juergen Lock
#define _KERNEL
48 f01576f1 Juergen Lock
#include <sys/user.h>
49 f01576f1 Juergen Lock
#undef _KERNEL
50 f01576f1 Juergen Lock
#undef sigqueue
51 f01576f1 Juergen Lock
#include <libutil.h>
52 f01576f1 Juergen Lock
#endif
53 f01576f1 Juergen Lock
#endif
54 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
55 432d268c Jun Nakajima
#include "xen-mapcache.h"
56 53a5960a pbrook
#endif
57 54936004 bellard
58 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
59 66e85a21 bellard
//#define DEBUG_FLUSH
60 9fa3e853 bellard
//#define DEBUG_TLB
61 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
62 fd6ce8f6 bellard
63 fd6ce8f6 bellard
/* make various TB consistency checks */
64 5fafdf24 ths
//#define DEBUG_TB_CHECK
65 5fafdf24 ths
//#define DEBUG_TLB_CHECK
66 fd6ce8f6 bellard
67 1196be37 ths
//#define DEBUG_IOPORT
68 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
69 1196be37 ths
70 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
71 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
72 99773bd4 pbrook
#undef DEBUG_TB_CHECK
73 99773bd4 pbrook
#endif
74 99773bd4 pbrook
75 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
76 9fa3e853 bellard
77 bdaf78e0 blueswir1
static TranslationBlock *tbs;
78 24ab68ac Stefan Weil
static int code_gen_max_blocks;
79 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
80 bdaf78e0 blueswir1
static int nb_tbs;
81 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
82 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
83 fd6ce8f6 bellard
84 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
85 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
87 d03d860b blueswir1
 section close to code segment. */
88 d03d860b blueswir1
#define code_gen_section                                \
89 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
90 d03d860b blueswir1
    __attribute__((aligned (32)))
91 f8e2af11 Stefan Weil
#elif defined(_WIN32)
92 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
93 f8e2af11 Stefan Weil
#define code_gen_section                                \
94 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
95 d03d860b blueswir1
#else
96 d03d860b blueswir1
#define code_gen_section                                \
97 d03d860b blueswir1
    __attribute__((aligned (32)))
98 d03d860b blueswir1
#endif
99 d03d860b blueswir1
100 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
101 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
103 26a5f13b bellard
/* threshold to flush the translated code buffer */
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
105 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
106 fd6ce8f6 bellard
107 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
108 9fa3e853 bellard
int phys_ram_fd;
109 74576198 aliguori
static int in_migration;
110 94a6b54f pbrook
111 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
112 e2eef170 pbrook
#endif
113 9fa3e853 bellard
114 6a00d601 bellard
CPUState *first_cpu;
115 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
116 6a00d601 bellard
   cpu_exec() */
117 5fafdf24 ths
CPUState *cpu_single_env;
118 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
119 bf20dc07 ths
   1 = Precise instruction counting.
120 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
121 2e70f6ef pbrook
int use_icount = 0;
122 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
123 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
124 2e70f6ef pbrook
int64_t qemu_icount;
125 6a00d601 bellard
126 54936004 bellard
typedef struct PageDesc {
127 92e873b9 bellard
    /* list of TBs intersecting this ram page */
128 fd6ce8f6 bellard
    TranslationBlock *first_tb;
129 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
130 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
131 9fa3e853 bellard
    unsigned int code_write_count;
132 9fa3e853 bellard
    uint8_t *code_bitmap;
133 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
134 9fa3e853 bellard
    unsigned long flags;
135 9fa3e853 bellard
#endif
136 54936004 bellard
} PageDesc;
137 54936004 bellard
138 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
139 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
140 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
141 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
143 41c1b1c9 Paul Brook
#else
144 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
145 41c1b1c9 Paul Brook
#endif
146 bedb69ea j_mayer
#else
147 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
148 bedb69ea j_mayer
#endif
149 54936004 bellard
150 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
151 5cd2c5b6 Richard Henderson
#define L2_BITS 10
152 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
153 54936004 bellard
154 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
155 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
158 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159 5cd2c5b6 Richard Henderson
160 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
161 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
163 5cd2c5b6 Richard Henderson
#else
164 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
165 5cd2c5b6 Richard Henderson
#endif
166 5cd2c5b6 Richard Henderson
167 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
169 5cd2c5b6 Richard Henderson
#else
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
171 5cd2c5b6 Richard Henderson
#endif
172 5cd2c5b6 Richard Henderson
173 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
174 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 83fb7adf bellard
unsigned long qemu_real_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_bits;
181 83fb7adf bellard
unsigned long qemu_host_page_size;
182 83fb7adf bellard
unsigned long qemu_host_page_mask;
183 54936004 bellard
184 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
185 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
186 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
187 54936004 bellard
188 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
189 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
190 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
191 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
192 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
193 41c1b1c9 Paul Brook
} PhysPageDesc;
194 41c1b1c9 Paul Brook
195 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
196 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
197 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
198 6d9a1304 Paul Brook
199 e2eef170 pbrook
static void io_mem_init(void);
200 e2eef170 pbrook
201 33417e70 bellard
/* io memory support */
202 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
204 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
205 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
206 6658ffb8 pbrook
static int io_mem_watch;
207 6658ffb8 pbrook
#endif
208 33417e70 bellard
209 34865134 bellard
/* log support */
210 1e8b27ca Juha Riihimรคki
#ifdef WIN32
211 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
212 1e8b27ca Juha Riihimรคki
#else
213 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
214 1e8b27ca Juha Riihimรคki
#endif
215 34865134 bellard
FILE *logfile;
216 34865134 bellard
int loglevel;
217 e735b91c pbrook
static int log_append = 0;
218 34865134 bellard
219 e3db7226 bellard
/* statistics */
220 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
221 e3db7226 bellard
static int tlb_flush_count;
222 b3755a91 Paul Brook
#endif
223 e3db7226 bellard
static int tb_flush_count;
224 e3db7226 bellard
static int tb_phys_invalidate_count;
225 e3db7226 bellard
226 7cb69cae bellard
#ifdef _WIN32
227 7cb69cae bellard
static void map_exec(void *addr, long size)
228 7cb69cae bellard
{
229 7cb69cae bellard
    DWORD old_protect;
230 7cb69cae bellard
    VirtualProtect(addr, size,
231 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
232 7cb69cae bellard
    
233 7cb69cae bellard
}
234 7cb69cae bellard
#else
235 7cb69cae bellard
static void map_exec(void *addr, long size)
236 7cb69cae bellard
{
237 4369415f bellard
    unsigned long start, end, page_size;
238 7cb69cae bellard
    
239 4369415f bellard
    page_size = getpagesize();
240 7cb69cae bellard
    start = (unsigned long)addr;
241 4369415f bellard
    start &= ~(page_size - 1);
242 7cb69cae bellard
    
243 7cb69cae bellard
    end = (unsigned long)addr + size;
244 4369415f bellard
    end += page_size - 1;
245 4369415f bellard
    end &= ~(page_size - 1);
246 7cb69cae bellard
    
247 7cb69cae bellard
    mprotect((void *)start, end - start,
248 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
249 7cb69cae bellard
}
250 7cb69cae bellard
#endif
251 7cb69cae bellard
252 b346ff46 bellard
static void page_init(void)
253 54936004 bellard
{
254 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
255 54936004 bellard
       TARGET_PAGE_SIZE */
256 c2b48b69 aliguori
#ifdef _WIN32
257 c2b48b69 aliguori
    {
258 c2b48b69 aliguori
        SYSTEM_INFO system_info;
259 c2b48b69 aliguori
260 c2b48b69 aliguori
        GetSystemInfo(&system_info);
261 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
262 c2b48b69 aliguori
    }
263 c2b48b69 aliguori
#else
264 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
265 c2b48b69 aliguori
#endif
266 83fb7adf bellard
    if (qemu_host_page_size == 0)
267 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
268 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
270 83fb7adf bellard
    qemu_host_page_bits = 0;
271 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 83fb7adf bellard
        qemu_host_page_bits++;
273 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
274 50a9569b balrog
275 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
276 50a9569b balrog
    {
277 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
278 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
279 f01576f1 Juergen Lock
        int i, cnt;
280 f01576f1 Juergen Lock
281 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
282 f01576f1 Juergen Lock
        if (freep) {
283 f01576f1 Juergen Lock
            mmap_lock();
284 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
285 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
286 f01576f1 Juergen Lock
287 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
288 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
289 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
290 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291 f01576f1 Juergen Lock
292 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
293 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
294 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
295 f01576f1 Juergen Lock
                    } else {
296 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 f01576f1 Juergen Lock
                        endaddr = ~0ul;
298 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 f01576f1 Juergen Lock
#endif
300 f01576f1 Juergen Lock
                    }
301 f01576f1 Juergen Lock
                }
302 f01576f1 Juergen Lock
            }
303 f01576f1 Juergen Lock
            free(freep);
304 f01576f1 Juergen Lock
            mmap_unlock();
305 f01576f1 Juergen Lock
        }
306 f01576f1 Juergen Lock
#else
307 50a9569b balrog
        FILE *f;
308 50a9569b balrog
309 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
310 5cd2c5b6 Richard Henderson
311 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
312 50a9569b balrog
        if (f) {
313 5cd2c5b6 Richard Henderson
            mmap_lock();
314 5cd2c5b6 Richard Henderson
315 50a9569b balrog
            do {
316 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
317 5cd2c5b6 Richard Henderson
                int n;
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320 5cd2c5b6 Richard Henderson
321 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
322 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323 5cd2c5b6 Richard Henderson
324 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
325 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
326 5cd2c5b6 Richard Henderson
                    } else {
327 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
328 5cd2c5b6 Richard Henderson
                    }
329 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 50a9569b balrog
                }
331 50a9569b balrog
            } while (!feof(f));
332 5cd2c5b6 Richard Henderson
333 50a9569b balrog
            fclose(f);
334 5cd2c5b6 Richard Henderson
            mmap_unlock();
335 50a9569b balrog
        }
336 f01576f1 Juergen Lock
#endif
337 50a9569b balrog
    }
338 50a9569b balrog
#endif
339 54936004 bellard
}
340 54936004 bellard
341 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
342 54936004 bellard
{
343 41c1b1c9 Paul Brook
    PageDesc *pd;
344 41c1b1c9 Paul Brook
    void **lp;
345 41c1b1c9 Paul Brook
    int i;
346 41c1b1c9 Paul Brook
347 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
348 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
349 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
350 5cd2c5b6 Richard Henderson
    do {                                                \
351 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
352 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
353 5cd2c5b6 Richard Henderson
    } while (0)
354 5cd2c5b6 Richard Henderson
#else
355 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
356 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
357 17e2377a pbrook
#endif
358 434929bf aliguori
359 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
360 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361 5cd2c5b6 Richard Henderson
362 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
363 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 5cd2c5b6 Richard Henderson
        void **p = *lp;
365 5cd2c5b6 Richard Henderson
366 5cd2c5b6 Richard Henderson
        if (p == NULL) {
367 5cd2c5b6 Richard Henderson
            if (!alloc) {
368 5cd2c5b6 Richard Henderson
                return NULL;
369 5cd2c5b6 Richard Henderson
            }
370 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
371 5cd2c5b6 Richard Henderson
            *lp = p;
372 17e2377a pbrook
        }
373 5cd2c5b6 Richard Henderson
374 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 5cd2c5b6 Richard Henderson
    }
376 5cd2c5b6 Richard Henderson
377 5cd2c5b6 Richard Henderson
    pd = *lp;
378 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
379 5cd2c5b6 Richard Henderson
        if (!alloc) {
380 5cd2c5b6 Richard Henderson
            return NULL;
381 5cd2c5b6 Richard Henderson
        }
382 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 5cd2c5b6 Richard Henderson
        *lp = pd;
384 54936004 bellard
    }
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
#undef ALLOC
387 5cd2c5b6 Richard Henderson
388 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
389 54936004 bellard
}
390 54936004 bellard
391 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
392 54936004 bellard
{
393 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
394 fd6ce8f6 bellard
}
395 fd6ce8f6 bellard
396 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
397 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
398 92e873b9 bellard
{
399 e3f4e2a4 pbrook
    PhysPageDesc *pd;
400 5cd2c5b6 Richard Henderson
    void **lp;
401 5cd2c5b6 Richard Henderson
    int i;
402 92e873b9 bellard
403 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
404 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
405 108c49b8 bellard
406 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
407 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 5cd2c5b6 Richard Henderson
        void **p = *lp;
409 5cd2c5b6 Richard Henderson
        if (p == NULL) {
410 5cd2c5b6 Richard Henderson
            if (!alloc) {
411 5cd2c5b6 Richard Henderson
                return NULL;
412 5cd2c5b6 Richard Henderson
            }
413 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 5cd2c5b6 Richard Henderson
        }
415 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
416 108c49b8 bellard
    }
417 5cd2c5b6 Richard Henderson
418 e3f4e2a4 pbrook
    pd = *lp;
419 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
420 e3f4e2a4 pbrook
        int i;
421 5cd2c5b6 Richard Henderson
422 5cd2c5b6 Richard Henderson
        if (!alloc) {
423 108c49b8 bellard
            return NULL;
424 5cd2c5b6 Richard Henderson
        }
425 5cd2c5b6 Richard Henderson
426 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427 5cd2c5b6 Richard Henderson
428 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
429 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
431 67c4d23c pbrook
        }
432 92e873b9 bellard
    }
433 5cd2c5b6 Richard Henderson
434 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
435 92e873b9 bellard
}
436 92e873b9 bellard
437 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
438 92e873b9 bellard
{
439 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
440 92e873b9 bellard
}
441 92e873b9 bellard
442 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
443 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
444 3a7d929e bellard
                                    target_ulong vaddr);
445 c8a706fe pbrook
#define mmap_lock() do { } while(0)
446 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
447 9fa3e853 bellard
#endif
448 fd6ce8f6 bellard
449 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450 4369415f bellard
451 4369415f bellard
#if defined(CONFIG_USER_ONLY)
452 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
453 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
454 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
455 4369415f bellard
#endif
456 4369415f bellard
457 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
458 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
460 4369415f bellard
#endif
461 4369415f bellard
462 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
463 26a5f13b bellard
{
464 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
465 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
466 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
468 4369415f bellard
#else
469 26a5f13b bellard
    code_gen_buffer_size = tb_size;
470 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
471 4369415f bellard
#if defined(CONFIG_USER_ONLY)
472 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
473 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474 4369415f bellard
#else
475 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
476 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
477 4369415f bellard
#endif
478 26a5f13b bellard
    }
479 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
482 26a5f13b bellard
       the host cpu and OS */
483 26a5f13b bellard
#if defined(__linux__) 
484 26a5f13b bellard
    {
485 26a5f13b bellard
        int flags;
486 141ac468 blueswir1
        void *start = NULL;
487 141ac468 blueswir1
488 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
489 26a5f13b bellard
#if defined(__x86_64__)
490 26a5f13b bellard
        flags |= MAP_32BIT;
491 26a5f13b bellard
        /* Cannot map more than that */
492 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
493 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
494 141ac468 blueswir1
#elif defined(__sparc_v9__)
495 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
496 141ac468 blueswir1
        flags |= MAP_FIXED;
497 141ac468 blueswir1
        start = (void *) 0x60000000UL;
498 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
499 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
500 1cb0661e balrog
#elif defined(__arm__)
501 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
502 1cb0661e balrog
        flags |= MAP_FIXED;
503 1cb0661e balrog
        start = (void *) 0x01000000UL;
504 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
505 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
506 eba0b893 Richard Henderson
#elif defined(__s390x__)
507 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
508 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
509 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 eba0b893 Richard Henderson
        }
512 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
513 26a5f13b bellard
#endif
514 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
515 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
516 26a5f13b bellard
                               flags, -1, 0);
517 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
518 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 26a5f13b bellard
            exit(1);
520 26a5f13b bellard
        }
521 26a5f13b bellard
    }
522 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
524 06e67a82 aliguori
    {
525 06e67a82 aliguori
        int flags;
526 06e67a82 aliguori
        void *addr = NULL;
527 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
528 06e67a82 aliguori
#if defined(__x86_64__)
529 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 06e67a82 aliguori
         * 0x40000000 is free */
531 06e67a82 aliguori
        flags |= MAP_FIXED;
532 06e67a82 aliguori
        addr = (void *)0x40000000;
533 06e67a82 aliguori
        /* Cannot map more than that */
534 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
535 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
536 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
537 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
538 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
539 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
540 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
542 4cd31ad2 Blue Swirl
        }
543 06e67a82 aliguori
#endif
544 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
546 06e67a82 aliguori
                               flags, -1, 0);
547 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
548 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 06e67a82 aliguori
            exit(1);
550 06e67a82 aliguori
        }
551 06e67a82 aliguori
    }
552 26a5f13b bellard
#else
553 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
554 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
555 26a5f13b bellard
#endif
556 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
557 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
558 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
559 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
560 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562 26a5f13b bellard
}
563 26a5f13b bellard
564 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
566 26a5f13b bellard
   size. */
567 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
568 26a5f13b bellard
{
569 26a5f13b bellard
    cpu_gen_init();
570 26a5f13b bellard
    code_gen_alloc(tb_size);
571 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
572 4369415f bellard
    page_init();
573 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
574 26a5f13b bellard
    io_mem_init();
575 e2eef170 pbrook
#endif
576 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
578 9002ec79 Richard Henderson
       initialize the prologue now.  */
579 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
580 9002ec79 Richard Henderson
#endif
581 26a5f13b bellard
}
582 26a5f13b bellard
583 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584 9656f324 pbrook
585 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
586 e7f4eff7 Juan Quintela
{
587 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
588 9656f324 pbrook
589 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 3098dba0 aurel32
       version_id is increased. */
591 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
592 9656f324 pbrook
    tlb_flush(env, 1);
593 9656f324 pbrook
594 9656f324 pbrook
    return 0;
595 9656f324 pbrook
}
596 e7f4eff7 Juan Quintela
597 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
598 e7f4eff7 Juan Quintela
    .name = "cpu_common",
599 e7f4eff7 Juan Quintela
    .version_id = 1,
600 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
601 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
602 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
603 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
604 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
605 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
606 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
607 e7f4eff7 Juan Quintela
    }
608 e7f4eff7 Juan Quintela
};
609 9656f324 pbrook
#endif
610 9656f324 pbrook
611 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
612 950f1472 Glauber Costa
{
613 950f1472 Glauber Costa
    CPUState *env = first_cpu;
614 950f1472 Glauber Costa
615 950f1472 Glauber Costa
    while (env) {
616 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
617 950f1472 Glauber Costa
            break;
618 950f1472 Glauber Costa
        env = env->next_cpu;
619 950f1472 Glauber Costa
    }
620 950f1472 Glauber Costa
621 950f1472 Glauber Costa
    return env;
622 950f1472 Glauber Costa
}
623 950f1472 Glauber Costa
624 6a00d601 bellard
void cpu_exec_init(CPUState *env)
625 fd6ce8f6 bellard
{
626 6a00d601 bellard
    CPUState **penv;
627 6a00d601 bellard
    int cpu_index;
628 6a00d601 bellard
629 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
630 c2764719 pbrook
    cpu_list_lock();
631 c2764719 pbrook
#endif
632 6a00d601 bellard
    env->next_cpu = NULL;
633 6a00d601 bellard
    penv = &first_cpu;
634 6a00d601 bellard
    cpu_index = 0;
635 6a00d601 bellard
    while (*penv != NULL) {
636 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
637 6a00d601 bellard
        cpu_index++;
638 6a00d601 bellard
    }
639 6a00d601 bellard
    env->cpu_index = cpu_index;
640 268a362c aliguori
    env->numa_node = 0;
641 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
642 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
643 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
644 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
645 dc7a09cf Jan Kiszka
#endif
646 6a00d601 bellard
    *penv = env;
647 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
648 c2764719 pbrook
    cpu_list_unlock();
649 c2764719 pbrook
#endif
650 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
651 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
653 b3c7724c pbrook
                    cpu_save, cpu_load, env);
654 b3c7724c pbrook
#endif
655 fd6ce8f6 bellard
}
656 fd6ce8f6 bellard
657 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
658 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
659 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
660 d1a1eb74 Tristan Gingold
{
661 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
662 d1a1eb74 Tristan Gingold
663 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
664 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 d1a1eb74 Tristan Gingold
        return NULL;
666 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
667 d1a1eb74 Tristan Gingold
    tb->pc = pc;
668 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
669 d1a1eb74 Tristan Gingold
    return tb;
670 d1a1eb74 Tristan Gingold
}
671 d1a1eb74 Tristan Gingold
672 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
673 d1a1eb74 Tristan Gingold
{
674 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
675 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
676 d1a1eb74 Tristan Gingold
       be the last one generated.  */
677 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
679 d1a1eb74 Tristan Gingold
        nb_tbs--;
680 d1a1eb74 Tristan Gingold
    }
681 d1a1eb74 Tristan Gingold
}
682 d1a1eb74 Tristan Gingold
683 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
684 9fa3e853 bellard
{
685 9fa3e853 bellard
    if (p->code_bitmap) {
686 59817ccb bellard
        qemu_free(p->code_bitmap);
687 9fa3e853 bellard
        p->code_bitmap = NULL;
688 9fa3e853 bellard
    }
689 9fa3e853 bellard
    p->code_write_count = 0;
690 9fa3e853 bellard
}
691 9fa3e853 bellard
692 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693 5cd2c5b6 Richard Henderson
694 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
695 fd6ce8f6 bellard
{
696 5cd2c5b6 Richard Henderson
    int i;
697 fd6ce8f6 bellard
698 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
699 5cd2c5b6 Richard Henderson
        return;
700 5cd2c5b6 Richard Henderson
    }
701 5cd2c5b6 Richard Henderson
    if (level == 0) {
702 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
703 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
704 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
705 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
706 fd6ce8f6 bellard
        }
707 5cd2c5b6 Richard Henderson
    } else {
708 5cd2c5b6 Richard Henderson
        void **pp = *lp;
709 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
710 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
711 5cd2c5b6 Richard Henderson
        }
712 5cd2c5b6 Richard Henderson
    }
713 5cd2c5b6 Richard Henderson
}
714 5cd2c5b6 Richard Henderson
715 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
716 5cd2c5b6 Richard Henderson
{
717 5cd2c5b6 Richard Henderson
    int i;
718 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
719 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
720 fd6ce8f6 bellard
    }
721 fd6ce8f6 bellard
}
722 fd6ce8f6 bellard
723 fd6ce8f6 bellard
/* flush all the translation blocks */
724 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
725 6a00d601 bellard
void tb_flush(CPUState *env1)
726 fd6ce8f6 bellard
{
727 6a00d601 bellard
    CPUState *env;
728 0124311e bellard
#if defined(DEBUG_FLUSH)
729 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
731 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
732 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
733 fd6ce8f6 bellard
#endif
734 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
735 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
736 a208e54a pbrook
737 fd6ce8f6 bellard
    nb_tbs = 0;
738 3b46e624 ths
739 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 6a00d601 bellard
    }
742 9fa3e853 bellard
743 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
744 fd6ce8f6 bellard
    page_flush_tb();
745 9fa3e853 bellard
746 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
747 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
748 d4e8164f bellard
       expensive */
749 e3db7226 bellard
    tb_flush_count++;
750 fd6ce8f6 bellard
}
751 fd6ce8f6 bellard
752 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
753 fd6ce8f6 bellard
754 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
755 fd6ce8f6 bellard
{
756 fd6ce8f6 bellard
    TranslationBlock *tb;
757 fd6ce8f6 bellard
    int i;
758 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
759 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
761 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
763 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
764 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
765 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
766 fd6ce8f6 bellard
            }
767 fd6ce8f6 bellard
        }
768 fd6ce8f6 bellard
    }
769 fd6ce8f6 bellard
}
770 fd6ce8f6 bellard
771 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
772 fd6ce8f6 bellard
static void tb_page_check(void)
773 fd6ce8f6 bellard
{
774 fd6ce8f6 bellard
    TranslationBlock *tb;
775 fd6ce8f6 bellard
    int i, flags1, flags2;
776 3b46e624 ths
777 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
779 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
780 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
781 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
783 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
784 fd6ce8f6 bellard
            }
785 fd6ce8f6 bellard
        }
786 fd6ce8f6 bellard
    }
787 fd6ce8f6 bellard
}
788 fd6ce8f6 bellard
789 fd6ce8f6 bellard
#endif
790 fd6ce8f6 bellard
791 fd6ce8f6 bellard
/* invalidate one TB */
792 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 fd6ce8f6 bellard
                             int next_offset)
794 fd6ce8f6 bellard
{
795 fd6ce8f6 bellard
    TranslationBlock *tb1;
796 fd6ce8f6 bellard
    for(;;) {
797 fd6ce8f6 bellard
        tb1 = *ptb;
798 fd6ce8f6 bellard
        if (tb1 == tb) {
799 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 fd6ce8f6 bellard
            break;
801 fd6ce8f6 bellard
        }
802 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 fd6ce8f6 bellard
    }
804 fd6ce8f6 bellard
}
805 fd6ce8f6 bellard
806 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807 9fa3e853 bellard
{
808 9fa3e853 bellard
    TranslationBlock *tb1;
809 9fa3e853 bellard
    unsigned int n1;
810 9fa3e853 bellard
811 9fa3e853 bellard
    for(;;) {
812 9fa3e853 bellard
        tb1 = *ptb;
813 9fa3e853 bellard
        n1 = (long)tb1 & 3;
814 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 9fa3e853 bellard
        if (tb1 == tb) {
816 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
817 9fa3e853 bellard
            break;
818 9fa3e853 bellard
        }
819 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
820 9fa3e853 bellard
    }
821 9fa3e853 bellard
}
822 9fa3e853 bellard
823 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824 d4e8164f bellard
{
825 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
826 d4e8164f bellard
    unsigned int n1;
827 d4e8164f bellard
828 d4e8164f bellard
    ptb = &tb->jmp_next[n];
829 d4e8164f bellard
    tb1 = *ptb;
830 d4e8164f bellard
    if (tb1) {
831 d4e8164f bellard
        /* find tb(n) in circular list */
832 d4e8164f bellard
        for(;;) {
833 d4e8164f bellard
            tb1 = *ptb;
834 d4e8164f bellard
            n1 = (long)tb1 & 3;
835 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 d4e8164f bellard
            if (n1 == n && tb1 == tb)
837 d4e8164f bellard
                break;
838 d4e8164f bellard
            if (n1 == 2) {
839 d4e8164f bellard
                ptb = &tb1->jmp_first;
840 d4e8164f bellard
            } else {
841 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
842 d4e8164f bellard
            }
843 d4e8164f bellard
        }
844 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
845 d4e8164f bellard
        *ptb = tb->jmp_next[n];
846 d4e8164f bellard
847 d4e8164f bellard
        tb->jmp_next[n] = NULL;
848 d4e8164f bellard
    }
849 d4e8164f bellard
}
850 d4e8164f bellard
851 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
852 d4e8164f bellard
   another TB */
853 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
854 d4e8164f bellard
{
855 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856 d4e8164f bellard
}
857 d4e8164f bellard
858 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
859 fd6ce8f6 bellard
{
860 6a00d601 bellard
    CPUState *env;
861 8a40a180 bellard
    PageDesc *p;
862 d4e8164f bellard
    unsigned int h, n1;
863 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
864 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
865 3b46e624 ths
866 8a40a180 bellard
    /* remove the TB from the hash list */
867 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
869 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
870 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
871 8a40a180 bellard
872 8a40a180 bellard
    /* remove the TB from the page list */
873 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
874 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
876 8a40a180 bellard
        invalidate_page_bitmap(p);
877 8a40a180 bellard
    }
878 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
881 8a40a180 bellard
        invalidate_page_bitmap(p);
882 8a40a180 bellard
    }
883 8a40a180 bellard
884 36bdbe54 bellard
    tb_invalidated_flag = 1;
885 59817ccb bellard
886 fd6ce8f6 bellard
    /* remove the TB from the hash list */
887 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
888 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
890 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
891 6a00d601 bellard
    }
892 d4e8164f bellard
893 d4e8164f bellard
    /* suppress this TB from the two jump lists */
894 d4e8164f bellard
    tb_jmp_remove(tb, 0);
895 d4e8164f bellard
    tb_jmp_remove(tb, 1);
896 d4e8164f bellard
897 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
898 d4e8164f bellard
    tb1 = tb->jmp_first;
899 d4e8164f bellard
    for(;;) {
900 d4e8164f bellard
        n1 = (long)tb1 & 3;
901 d4e8164f bellard
        if (n1 == 2)
902 d4e8164f bellard
            break;
903 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
905 d4e8164f bellard
        tb_reset_jump(tb1, n1);
906 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
907 d4e8164f bellard
        tb1 = tb2;
908 d4e8164f bellard
    }
909 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
910 9fa3e853 bellard
911 e3db7226 bellard
    tb_phys_invalidate_count++;
912 9fa3e853 bellard
}
913 9fa3e853 bellard
914 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
915 9fa3e853 bellard
{
916 9fa3e853 bellard
    int end, mask, end1;
917 9fa3e853 bellard
918 9fa3e853 bellard
    end = start + len;
919 9fa3e853 bellard
    tab += start >> 3;
920 9fa3e853 bellard
    mask = 0xff << (start & 7);
921 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
922 9fa3e853 bellard
        if (start < end) {
923 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
924 9fa3e853 bellard
            *tab |= mask;
925 9fa3e853 bellard
        }
926 9fa3e853 bellard
    } else {
927 9fa3e853 bellard
        *tab++ |= mask;
928 9fa3e853 bellard
        start = (start + 8) & ~7;
929 9fa3e853 bellard
        end1 = end & ~7;
930 9fa3e853 bellard
        while (start < end1) {
931 9fa3e853 bellard
            *tab++ = 0xff;
932 9fa3e853 bellard
            start += 8;
933 9fa3e853 bellard
        }
934 9fa3e853 bellard
        if (start < end) {
935 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
936 9fa3e853 bellard
            *tab |= mask;
937 9fa3e853 bellard
        }
938 9fa3e853 bellard
    }
939 9fa3e853 bellard
}
940 9fa3e853 bellard
941 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
942 9fa3e853 bellard
{
943 9fa3e853 bellard
    int n, tb_start, tb_end;
944 9fa3e853 bellard
    TranslationBlock *tb;
945 3b46e624 ths
946 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
947 9fa3e853 bellard
948 9fa3e853 bellard
    tb = p->first_tb;
949 9fa3e853 bellard
    while (tb != NULL) {
950 9fa3e853 bellard
        n = (long)tb & 3;
951 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
952 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
953 9fa3e853 bellard
        if (n == 0) {
954 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
955 9fa3e853 bellard
               it is not a problem */
956 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 9fa3e853 bellard
            tb_end = tb_start + tb->size;
958 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
959 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
960 9fa3e853 bellard
        } else {
961 9fa3e853 bellard
            tb_start = 0;
962 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 9fa3e853 bellard
        }
964 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 9fa3e853 bellard
        tb = tb->page_next[n];
966 9fa3e853 bellard
    }
967 9fa3e853 bellard
}
968 9fa3e853 bellard
969 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
970 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
971 2e70f6ef pbrook
                              int flags, int cflags)
972 d720b93d bellard
{
973 d720b93d bellard
    TranslationBlock *tb;
974 d720b93d bellard
    uint8_t *tc_ptr;
975 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
976 41c1b1c9 Paul Brook
    target_ulong virt_page2;
977 d720b93d bellard
    int code_gen_size;
978 d720b93d bellard
979 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
980 c27004ec bellard
    tb = tb_alloc(pc);
981 d720b93d bellard
    if (!tb) {
982 d720b93d bellard
        /* flush must be done */
983 d720b93d bellard
        tb_flush(env);
984 d720b93d bellard
        /* cannot fail at this point */
985 c27004ec bellard
        tb = tb_alloc(pc);
986 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
987 2e70f6ef pbrook
        tb_invalidated_flag = 1;
988 d720b93d bellard
    }
989 d720b93d bellard
    tc_ptr = code_gen_ptr;
990 d720b93d bellard
    tb->tc_ptr = tc_ptr;
991 d720b93d bellard
    tb->cs_base = cs_base;
992 d720b93d bellard
    tb->flags = flags;
993 d720b93d bellard
    tb->cflags = cflags;
994 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
995 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
996 3b46e624 ths
997 d720b93d bellard
    /* check next page if needed */
998 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
999 d720b93d bellard
    phys_page2 = -1;
1000 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1001 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1002 d720b93d bellard
    }
1003 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1004 2e70f6ef pbrook
    return tb;
1005 d720b93d bellard
}
1006 3b46e624 ths
1007 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1008 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1009 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1010 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1011 d720b93d bellard
   TB if code is modified inside this TB. */
1012 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1013 d720b93d bellard
                                   int is_cpu_write_access)
1014 d720b93d bellard
{
1015 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1016 d720b93d bellard
    CPUState *env = cpu_single_env;
1017 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1018 6b917547 aliguori
    PageDesc *p;
1019 6b917547 aliguori
    int n;
1020 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1021 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1022 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1023 6b917547 aliguori
    int current_tb_modified = 0;
1024 6b917547 aliguori
    target_ulong current_pc = 0;
1025 6b917547 aliguori
    target_ulong current_cs_base = 0;
1026 6b917547 aliguori
    int current_flags = 0;
1027 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1028 9fa3e853 bellard
1029 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1030 5fafdf24 ths
    if (!p)
1031 9fa3e853 bellard
        return;
1032 5fafdf24 ths
    if (!p->code_bitmap &&
1033 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 d720b93d bellard
        is_cpu_write_access) {
1035 9fa3e853 bellard
        /* build code bitmap */
1036 9fa3e853 bellard
        build_page_bitmap(p);
1037 9fa3e853 bellard
    }
1038 9fa3e853 bellard
1039 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1040 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 9fa3e853 bellard
    tb = p->first_tb;
1042 9fa3e853 bellard
    while (tb != NULL) {
1043 9fa3e853 bellard
        n = (long)tb & 3;
1044 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1045 9fa3e853 bellard
        tb_next = tb->page_next[n];
1046 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1047 9fa3e853 bellard
        if (n == 0) {
1048 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1049 9fa3e853 bellard
               it is not a problem */
1050 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1052 9fa3e853 bellard
        } else {
1053 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1054 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 9fa3e853 bellard
        }
1056 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1057 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1058 d720b93d bellard
            if (current_tb_not_found) {
1059 d720b93d bellard
                current_tb_not_found = 0;
1060 d720b93d bellard
                current_tb = NULL;
1061 2e70f6ef pbrook
                if (env->mem_io_pc) {
1062 d720b93d bellard
                    /* now we have a real cpu fault */
1063 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1064 d720b93d bellard
                }
1065 d720b93d bellard
            }
1066 d720b93d bellard
            if (current_tb == tb &&
1067 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1069 d720b93d bellard
                its execution. We could be more precise by checking
1070 d720b93d bellard
                that the modification is after the current PC, but it
1071 d720b93d bellard
                would require a specialized function to partially
1072 d720b93d bellard
                restore the CPU state */
1073 3b46e624 ths
1074 d720b93d bellard
                current_tb_modified = 1;
1075 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1076 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 6b917547 aliguori
                                     &current_flags);
1078 d720b93d bellard
            }
1079 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1080 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1081 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1082 6f5a9f7e bellard
            saved_tb = NULL;
1083 6f5a9f7e bellard
            if (env) {
1084 6f5a9f7e bellard
                saved_tb = env->current_tb;
1085 6f5a9f7e bellard
                env->current_tb = NULL;
1086 6f5a9f7e bellard
            }
1087 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1088 6f5a9f7e bellard
            if (env) {
1089 6f5a9f7e bellard
                env->current_tb = saved_tb;
1090 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1091 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1092 6f5a9f7e bellard
            }
1093 9fa3e853 bellard
        }
1094 9fa3e853 bellard
        tb = tb_next;
1095 9fa3e853 bellard
    }
1096 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1097 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1098 9fa3e853 bellard
    if (!p->first_tb) {
1099 9fa3e853 bellard
        invalidate_page_bitmap(p);
1100 d720b93d bellard
        if (is_cpu_write_access) {
1101 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1102 d720b93d bellard
        }
1103 d720b93d bellard
    }
1104 d720b93d bellard
#endif
1105 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1106 d720b93d bellard
    if (current_tb_modified) {
1107 d720b93d bellard
        /* we generate a block containing just the instruction
1108 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1109 d720b93d bellard
           itself */
1110 ea1c1802 bellard
        env->current_tb = NULL;
1111 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1112 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1113 9fa3e853 bellard
    }
1114 fd6ce8f6 bellard
#endif
1115 9fa3e853 bellard
}
1116 fd6ce8f6 bellard
1117 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1118 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1119 9fa3e853 bellard
{
1120 9fa3e853 bellard
    PageDesc *p;
1121 9fa3e853 bellard
    int offset, b;
1122 59817ccb bellard
#if 0
1123 a4193c8a bellard
    if (1) {
1124 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1126 93fcfe39 aliguori
                  cpu_single_env->eip,
1127 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1128 59817ccb bellard
    }
1129 59817ccb bellard
#endif
1130 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1131 5fafdf24 ths
    if (!p)
1132 9fa3e853 bellard
        return;
1133 9fa3e853 bellard
    if (p->code_bitmap) {
1134 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1135 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1137 9fa3e853 bellard
            goto do_invalidate;
1138 9fa3e853 bellard
    } else {
1139 9fa3e853 bellard
    do_invalidate:
1140 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1141 9fa3e853 bellard
    }
1142 9fa3e853 bellard
}
1143 9fa3e853 bellard
1144 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1145 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1146 d720b93d bellard
                                    unsigned long pc, void *puc)
1147 9fa3e853 bellard
{
1148 6b917547 aliguori
    TranslationBlock *tb;
1149 9fa3e853 bellard
    PageDesc *p;
1150 6b917547 aliguori
    int n;
1151 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1152 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1153 d720b93d bellard
    CPUState *env = cpu_single_env;
1154 6b917547 aliguori
    int current_tb_modified = 0;
1155 6b917547 aliguori
    target_ulong current_pc = 0;
1156 6b917547 aliguori
    target_ulong current_cs_base = 0;
1157 6b917547 aliguori
    int current_flags = 0;
1158 d720b93d bellard
#endif
1159 9fa3e853 bellard
1160 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1161 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1162 5fafdf24 ths
    if (!p)
1163 9fa3e853 bellard
        return;
1164 9fa3e853 bellard
    tb = p->first_tb;
1165 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1166 d720b93d bellard
    if (tb && pc != 0) {
1167 d720b93d bellard
        current_tb = tb_find_pc(pc);
1168 d720b93d bellard
    }
1169 d720b93d bellard
#endif
1170 9fa3e853 bellard
    while (tb != NULL) {
1171 9fa3e853 bellard
        n = (long)tb & 3;
1172 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1173 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1174 d720b93d bellard
        if (current_tb == tb &&
1175 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1177 d720b93d bellard
                   its execution. We could be more precise by checking
1178 d720b93d bellard
                   that the modification is after the current PC, but it
1179 d720b93d bellard
                   would require a specialized function to partially
1180 d720b93d bellard
                   restore the CPU state */
1181 3b46e624 ths
1182 d720b93d bellard
            current_tb_modified = 1;
1183 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1184 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 6b917547 aliguori
                                 &current_flags);
1186 d720b93d bellard
        }
1187 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1188 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1189 9fa3e853 bellard
        tb = tb->page_next[n];
1190 9fa3e853 bellard
    }
1191 fd6ce8f6 bellard
    p->first_tb = NULL;
1192 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1193 d720b93d bellard
    if (current_tb_modified) {
1194 d720b93d bellard
        /* we generate a block containing just the instruction
1195 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1196 d720b93d bellard
           itself */
1197 ea1c1802 bellard
        env->current_tb = NULL;
1198 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1200 d720b93d bellard
    }
1201 d720b93d bellard
#endif
1202 fd6ce8f6 bellard
}
1203 9fa3e853 bellard
#endif
1204 fd6ce8f6 bellard
1205 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1206 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1207 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1208 fd6ce8f6 bellard
{
1209 fd6ce8f6 bellard
    PageDesc *p;
1210 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1211 9fa3e853 bellard
1212 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1213 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1214 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1215 9fa3e853 bellard
    last_first_tb = p->first_tb;
1216 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1217 9fa3e853 bellard
    invalidate_page_bitmap(p);
1218 fd6ce8f6 bellard
1219 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1220 d720b93d bellard
1221 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1222 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1223 53a5960a pbrook
        target_ulong addr;
1224 53a5960a pbrook
        PageDesc *p2;
1225 9fa3e853 bellard
        int prot;
1226 9fa3e853 bellard
1227 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1228 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1229 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1230 fd6ce8f6 bellard
        prot = 0;
1231 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1232 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1233 53a5960a pbrook
1234 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1235 53a5960a pbrook
            if (!p2)
1236 53a5960a pbrook
                continue;
1237 53a5960a pbrook
            prot |= p2->flags;
1238 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1239 53a5960a pbrook
          }
1240 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1241 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1242 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1243 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1244 53a5960a pbrook
               page_addr);
1245 fd6ce8f6 bellard
#endif
1246 fd6ce8f6 bellard
    }
1247 9fa3e853 bellard
#else
1248 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1249 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1250 9fa3e853 bellard
       allocated in a physical page */
1251 9fa3e853 bellard
    if (!last_first_tb) {
1252 6a00d601 bellard
        tlb_protect_code(page_addr);
1253 9fa3e853 bellard
    }
1254 9fa3e853 bellard
#endif
1255 d720b93d bellard
1256 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1257 fd6ce8f6 bellard
}
1258 fd6ce8f6 bellard
1259 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1260 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1261 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1262 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1263 d4e8164f bellard
{
1264 9fa3e853 bellard
    unsigned int h;
1265 9fa3e853 bellard
    TranslationBlock **ptb;
1266 9fa3e853 bellard
1267 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1268 c8a706fe pbrook
       before we are done.  */
1269 c8a706fe pbrook
    mmap_lock();
1270 9fa3e853 bellard
    /* add in the physical hash table */
1271 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1272 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1273 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1274 9fa3e853 bellard
    *ptb = tb;
1275 fd6ce8f6 bellard
1276 fd6ce8f6 bellard
    /* add in the page list */
1277 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1278 9fa3e853 bellard
    if (phys_page2 != -1)
1279 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1280 9fa3e853 bellard
    else
1281 9fa3e853 bellard
        tb->page_addr[1] = -1;
1282 9fa3e853 bellard
1283 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1284 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1285 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1286 d4e8164f bellard
1287 d4e8164f bellard
    /* init original jump addresses */
1288 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1289 d4e8164f bellard
        tb_reset_jump(tb, 0);
1290 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1291 d4e8164f bellard
        tb_reset_jump(tb, 1);
1292 8a40a180 bellard
1293 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1294 8a40a180 bellard
    tb_page_check();
1295 8a40a180 bellard
#endif
1296 c8a706fe pbrook
    mmap_unlock();
1297 fd6ce8f6 bellard
}
1298 fd6ce8f6 bellard
1299 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1300 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1301 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1302 fd6ce8f6 bellard
{
1303 9fa3e853 bellard
    int m_min, m_max, m;
1304 9fa3e853 bellard
    unsigned long v;
1305 9fa3e853 bellard
    TranslationBlock *tb;
1306 a513fe19 bellard
1307 a513fe19 bellard
    if (nb_tbs <= 0)
1308 a513fe19 bellard
        return NULL;
1309 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1310 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1311 a513fe19 bellard
        return NULL;
1312 a513fe19 bellard
    /* binary search (cf Knuth) */
1313 a513fe19 bellard
    m_min = 0;
1314 a513fe19 bellard
    m_max = nb_tbs - 1;
1315 a513fe19 bellard
    while (m_min <= m_max) {
1316 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1317 a513fe19 bellard
        tb = &tbs[m];
1318 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1319 a513fe19 bellard
        if (v == tc_ptr)
1320 a513fe19 bellard
            return tb;
1321 a513fe19 bellard
        else if (tc_ptr < v) {
1322 a513fe19 bellard
            m_max = m - 1;
1323 a513fe19 bellard
        } else {
1324 a513fe19 bellard
            m_min = m + 1;
1325 a513fe19 bellard
        }
1326 5fafdf24 ths
    }
1327 a513fe19 bellard
    return &tbs[m_max];
1328 a513fe19 bellard
}
1329 7501267e bellard
1330 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1331 ea041c0e bellard
1332 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1333 ea041c0e bellard
{
1334 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1335 ea041c0e bellard
    unsigned int n1;
1336 ea041c0e bellard
1337 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1338 ea041c0e bellard
    if (tb1 != NULL) {
1339 ea041c0e bellard
        /* find head of list */
1340 ea041c0e bellard
        for(;;) {
1341 ea041c0e bellard
            n1 = (long)tb1 & 3;
1342 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1343 ea041c0e bellard
            if (n1 == 2)
1344 ea041c0e bellard
                break;
1345 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1346 ea041c0e bellard
        }
1347 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1348 ea041c0e bellard
        tb_next = tb1;
1349 ea041c0e bellard
1350 ea041c0e bellard
        /* remove tb from the jmp_first list */
1351 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1352 ea041c0e bellard
        for(;;) {
1353 ea041c0e bellard
            tb1 = *ptb;
1354 ea041c0e bellard
            n1 = (long)tb1 & 3;
1355 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1356 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1357 ea041c0e bellard
                break;
1358 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1359 ea041c0e bellard
        }
1360 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1361 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1362 3b46e624 ths
1363 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1364 ea041c0e bellard
        tb_reset_jump(tb, n);
1365 ea041c0e bellard
1366 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1367 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1368 ea041c0e bellard
    }
1369 ea041c0e bellard
}
1370 ea041c0e bellard
1371 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1372 ea041c0e bellard
{
1373 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1374 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1375 ea041c0e bellard
}
1376 ea041c0e bellard
1377 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1378 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1379 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380 94df27fd Paul Brook
{
1381 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1382 94df27fd Paul Brook
}
1383 94df27fd Paul Brook
#else
1384 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1385 d720b93d bellard
{
1386 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1387 9b3c35e0 j_mayer
    target_ulong pd;
1388 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1389 c2f07f81 pbrook
    PhysPageDesc *p;
1390 d720b93d bellard
1391 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1392 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1393 c2f07f81 pbrook
    if (!p) {
1394 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1395 c2f07f81 pbrook
    } else {
1396 c2f07f81 pbrook
        pd = p->phys_offset;
1397 c2f07f81 pbrook
    }
1398 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1399 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1400 d720b93d bellard
}
1401 c27004ec bellard
#endif
1402 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1403 d720b93d bellard
1404 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1405 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1406 c527ee8f Paul Brook
1407 c527ee8f Paul Brook
{
1408 c527ee8f Paul Brook
}
1409 c527ee8f Paul Brook
1410 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1411 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1412 c527ee8f Paul Brook
{
1413 c527ee8f Paul Brook
    return -ENOSYS;
1414 c527ee8f Paul Brook
}
1415 c527ee8f Paul Brook
#else
1416 6658ffb8 pbrook
/* Add a watchpoint.  */
1417 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1418 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1419 6658ffb8 pbrook
{
1420 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1421 c0ce998e aliguori
    CPUWatchpoint *wp;
1422 6658ffb8 pbrook
1423 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1424 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1425 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1426 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1427 b4051334 aliguori
        return -EINVAL;
1428 b4051334 aliguori
    }
1429 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1430 a1d1bb31 aliguori
1431 a1d1bb31 aliguori
    wp->vaddr = addr;
1432 b4051334 aliguori
    wp->len_mask = len_mask;
1433 a1d1bb31 aliguori
    wp->flags = flags;
1434 a1d1bb31 aliguori
1435 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1436 c0ce998e aliguori
    if (flags & BP_GDB)
1437 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1438 c0ce998e aliguori
    else
1439 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1440 6658ffb8 pbrook
1441 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1442 a1d1bb31 aliguori
1443 a1d1bb31 aliguori
    if (watchpoint)
1444 a1d1bb31 aliguori
        *watchpoint = wp;
1445 a1d1bb31 aliguori
    return 0;
1446 6658ffb8 pbrook
}
1447 6658ffb8 pbrook
1448 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1449 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1450 a1d1bb31 aliguori
                          int flags)
1451 6658ffb8 pbrook
{
1452 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1453 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1454 6658ffb8 pbrook
1455 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1456 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1457 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1458 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1459 6658ffb8 pbrook
            return 0;
1460 6658ffb8 pbrook
        }
1461 6658ffb8 pbrook
    }
1462 a1d1bb31 aliguori
    return -ENOENT;
1463 6658ffb8 pbrook
}
1464 6658ffb8 pbrook
1465 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1466 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1467 a1d1bb31 aliguori
{
1468 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1469 7d03f82f edgar_igl
1470 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1471 a1d1bb31 aliguori
1472 a1d1bb31 aliguori
    qemu_free(watchpoint);
1473 a1d1bb31 aliguori
}
1474 a1d1bb31 aliguori
1475 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1476 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1477 a1d1bb31 aliguori
{
1478 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1479 a1d1bb31 aliguori
1480 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1481 a1d1bb31 aliguori
        if (wp->flags & mask)
1482 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1483 c0ce998e aliguori
    }
1484 7d03f82f edgar_igl
}
1485 c527ee8f Paul Brook
#endif
1486 7d03f82f edgar_igl
1487 a1d1bb31 aliguori
/* Add a breakpoint.  */
1488 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1489 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1490 4c3a88a2 bellard
{
1491 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1492 c0ce998e aliguori
    CPUBreakpoint *bp;
1493 3b46e624 ths
1494 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1495 4c3a88a2 bellard
1496 a1d1bb31 aliguori
    bp->pc = pc;
1497 a1d1bb31 aliguori
    bp->flags = flags;
1498 a1d1bb31 aliguori
1499 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1500 c0ce998e aliguori
    if (flags & BP_GDB)
1501 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1502 c0ce998e aliguori
    else
1503 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1504 3b46e624 ths
1505 d720b93d bellard
    breakpoint_invalidate(env, pc);
1506 a1d1bb31 aliguori
1507 a1d1bb31 aliguori
    if (breakpoint)
1508 a1d1bb31 aliguori
        *breakpoint = bp;
1509 4c3a88a2 bellard
    return 0;
1510 4c3a88a2 bellard
#else
1511 a1d1bb31 aliguori
    return -ENOSYS;
1512 4c3a88a2 bellard
#endif
1513 4c3a88a2 bellard
}
1514 4c3a88a2 bellard
1515 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1516 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1517 a1d1bb31 aliguori
{
1518 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1519 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1520 a1d1bb31 aliguori
1521 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1522 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1523 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1524 a1d1bb31 aliguori
            return 0;
1525 a1d1bb31 aliguori
        }
1526 7d03f82f edgar_igl
    }
1527 a1d1bb31 aliguori
    return -ENOENT;
1528 a1d1bb31 aliguori
#else
1529 a1d1bb31 aliguori
    return -ENOSYS;
1530 7d03f82f edgar_igl
#endif
1531 7d03f82f edgar_igl
}
1532 7d03f82f edgar_igl
1533 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1534 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1535 4c3a88a2 bellard
{
1536 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1537 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1538 d720b93d bellard
1539 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1540 a1d1bb31 aliguori
1541 a1d1bb31 aliguori
    qemu_free(breakpoint);
1542 a1d1bb31 aliguori
#endif
1543 a1d1bb31 aliguori
}
1544 a1d1bb31 aliguori
1545 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1546 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1547 a1d1bb31 aliguori
{
1548 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1549 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1550 a1d1bb31 aliguori
1551 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1552 a1d1bb31 aliguori
        if (bp->flags & mask)
1553 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1554 c0ce998e aliguori
    }
1555 4c3a88a2 bellard
#endif
1556 4c3a88a2 bellard
}
1557 4c3a88a2 bellard
1558 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1559 c33a346e bellard
   CPU loop after each instruction */
1560 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1561 c33a346e bellard
{
1562 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1563 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1564 c33a346e bellard
        env->singlestep_enabled = enabled;
1565 e22a25c9 aliguori
        if (kvm_enabled())
1566 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1567 e22a25c9 aliguori
        else {
1568 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1569 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1570 e22a25c9 aliguori
            tb_flush(env);
1571 e22a25c9 aliguori
        }
1572 c33a346e bellard
    }
1573 c33a346e bellard
#endif
1574 c33a346e bellard
}
1575 c33a346e bellard
1576 34865134 bellard
/* enable or disable low levels log */
1577 34865134 bellard
void cpu_set_log(int log_flags)
1578 34865134 bellard
{
1579 34865134 bellard
    loglevel = log_flags;
1580 34865134 bellard
    if (loglevel && !logfile) {
1581 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1582 34865134 bellard
        if (!logfile) {
1583 34865134 bellard
            perror(logfilename);
1584 34865134 bellard
            _exit(1);
1585 34865134 bellard
        }
1586 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1587 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1588 9fa3e853 bellard
        {
1589 b55266b5 blueswir1
            static char logfile_buf[4096];
1590 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1591 9fa3e853 bellard
        }
1592 bf65f53f Filip Navara
#elif !defined(_WIN32)
1593 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1594 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1595 9fa3e853 bellard
#endif
1596 e735b91c pbrook
        log_append = 1;
1597 e735b91c pbrook
    }
1598 e735b91c pbrook
    if (!loglevel && logfile) {
1599 e735b91c pbrook
        fclose(logfile);
1600 e735b91c pbrook
        logfile = NULL;
1601 34865134 bellard
    }
1602 34865134 bellard
}
1603 34865134 bellard
1604 34865134 bellard
void cpu_set_log_filename(const char *filename)
1605 34865134 bellard
{
1606 34865134 bellard
    logfilename = strdup(filename);
1607 e735b91c pbrook
    if (logfile) {
1608 e735b91c pbrook
        fclose(logfile);
1609 e735b91c pbrook
        logfile = NULL;
1610 e735b91c pbrook
    }
1611 e735b91c pbrook
    cpu_set_log(loglevel);
1612 34865134 bellard
}
1613 c33a346e bellard
1614 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1615 ea041c0e bellard
{
1616 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1617 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1618 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1619 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1620 ea041c0e bellard
    TranslationBlock *tb;
1621 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1622 59817ccb bellard
1623 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1624 3098dba0 aurel32
    tb = env->current_tb;
1625 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1626 3098dba0 aurel32
       all the potentially executing TB */
1627 f76cfe56 Riku Voipio
    if (tb) {
1628 3098dba0 aurel32
        env->current_tb = NULL;
1629 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1630 be214e6c aurel32
    }
1631 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1632 3098dba0 aurel32
}
1633 3098dba0 aurel32
1634 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1635 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1636 ec6959d0 Jan Kiszka
static void tcg_handle_interrupt(CPUState *env, int mask)
1637 3098dba0 aurel32
{
1638 3098dba0 aurel32
    int old_mask;
1639 be214e6c aurel32
1640 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1641 68a79315 bellard
    env->interrupt_request |= mask;
1642 3098dba0 aurel32
1643 8edac960 aliguori
    /*
1644 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1645 8edac960 aliguori
     * case its halted.
1646 8edac960 aliguori
     */
1647 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1648 8edac960 aliguori
        qemu_cpu_kick(env);
1649 8edac960 aliguori
        return;
1650 8edac960 aliguori
    }
1651 8edac960 aliguori
1652 2e70f6ef pbrook
    if (use_icount) {
1653 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1654 2e70f6ef pbrook
        if (!can_do_io(env)
1655 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1656 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1657 2e70f6ef pbrook
        }
1658 2e70f6ef pbrook
    } else {
1659 3098dba0 aurel32
        cpu_unlink_tb(env);
1660 ea041c0e bellard
    }
1661 ea041c0e bellard
}
1662 ea041c0e bellard
1663 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1664 ec6959d0 Jan Kiszka
1665 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1666 97ffbd8d Jan Kiszka
1667 97ffbd8d Jan Kiszka
void cpu_interrupt(CPUState *env, int mask)
1668 97ffbd8d Jan Kiszka
{
1669 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1670 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1671 97ffbd8d Jan Kiszka
}
1672 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1673 97ffbd8d Jan Kiszka
1674 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1675 b54ad049 bellard
{
1676 b54ad049 bellard
    env->interrupt_request &= ~mask;
1677 b54ad049 bellard
}
1678 b54ad049 bellard
1679 3098dba0 aurel32
void cpu_exit(CPUState *env)
1680 3098dba0 aurel32
{
1681 3098dba0 aurel32
    env->exit_request = 1;
1682 3098dba0 aurel32
    cpu_unlink_tb(env);
1683 3098dba0 aurel32
}
1684 3098dba0 aurel32
1685 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1686 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1687 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1688 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1689 f193c797 bellard
      "show target assembly code for each compiled TB" },
1690 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1691 57fec1fe bellard
      "show micro ops for each compiled TB" },
1692 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1693 e01a1157 blueswir1
      "show micro ops "
1694 e01a1157 blueswir1
#ifdef TARGET_I386
1695 e01a1157 blueswir1
      "before eflags optimization and "
1696 f193c797 bellard
#endif
1697 e01a1157 blueswir1
      "after liveness analysis" },
1698 f193c797 bellard
    { CPU_LOG_INT, "int",
1699 f193c797 bellard
      "show interrupts/exceptions in short format" },
1700 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1701 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1702 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1703 e91c8a77 ths
      "show CPU state before block translation" },
1704 f193c797 bellard
#ifdef TARGET_I386
1705 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1706 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1707 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1708 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1709 f193c797 bellard
#endif
1710 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1711 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1712 fd872598 bellard
      "show all i/o ports accesses" },
1713 8e3a9fd2 bellard
#endif
1714 f193c797 bellard
    { 0, NULL, NULL },
1715 f193c797 bellard
};
1716 f193c797 bellard
1717 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1718 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1719 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1720 f6f3fbca Michael S. Tsirkin
1721 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1722 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1723 0fd542fb Michael S. Tsirkin
                                  ram_addr_t phys_offset,
1724 0fd542fb Michael S. Tsirkin
                                  bool log_dirty)
1725 f6f3fbca Michael S. Tsirkin
{
1726 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1727 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1728 0fd542fb Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1729 f6f3fbca Michael S. Tsirkin
    }
1730 f6f3fbca Michael S. Tsirkin
}
1731 f6f3fbca Michael S. Tsirkin
1732 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1733 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1734 f6f3fbca Michael S. Tsirkin
{
1735 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1736 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1737 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1738 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1739 f6f3fbca Michael S. Tsirkin
            return r;
1740 f6f3fbca Michael S. Tsirkin
    }
1741 f6f3fbca Michael S. Tsirkin
    return 0;
1742 f6f3fbca Michael S. Tsirkin
}
1743 f6f3fbca Michael S. Tsirkin
1744 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1745 f6f3fbca Michael S. Tsirkin
{
1746 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1747 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1748 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1749 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1750 f6f3fbca Michael S. Tsirkin
            return r;
1751 f6f3fbca Michael S. Tsirkin
    }
1752 f6f3fbca Michael S. Tsirkin
    return 0;
1753 f6f3fbca Michael S. Tsirkin
}
1754 f6f3fbca Michael S. Tsirkin
1755 8d4c78e7 Alex Williamson
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1756 8d4c78e7 Alex Williamson
 * address.  Each intermediate table provides the next L2_BITs of guest
1757 8d4c78e7 Alex Williamson
 * physical address space.  The number of levels vary based on host and
1758 8d4c78e7 Alex Williamson
 * guest configuration, making it efficient to build the final guest
1759 8d4c78e7 Alex Williamson
 * physical address by seeding the L1 offset and shifting and adding in
1760 8d4c78e7 Alex Williamson
 * each L2 offset as we recurse through them. */
1761 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1762 8d4c78e7 Alex Williamson
                                 int level, void **lp, target_phys_addr_t addr)
1763 f6f3fbca Michael S. Tsirkin
{
1764 5cd2c5b6 Richard Henderson
    int i;
1765 f6f3fbca Michael S. Tsirkin
1766 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1767 5cd2c5b6 Richard Henderson
        return;
1768 5cd2c5b6 Richard Henderson
    }
1769 5cd2c5b6 Richard Henderson
    if (level == 0) {
1770 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1771 8d4c78e7 Alex Williamson
        addr <<= L2_BITS + TARGET_PAGE_BITS;
1772 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1773 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1774 8d4c78e7 Alex Williamson
                client->set_memory(client, addr | i << TARGET_PAGE_BITS,
1775 0fd542fb Michael S. Tsirkin
                                   TARGET_PAGE_SIZE, pd[i].phys_offset, false);
1776 f6f3fbca Michael S. Tsirkin
            }
1777 5cd2c5b6 Richard Henderson
        }
1778 5cd2c5b6 Richard Henderson
    } else {
1779 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1780 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1781 8d4c78e7 Alex Williamson
            phys_page_for_each_1(client, level - 1, pp + i,
1782 8d4c78e7 Alex Williamson
                                 (addr << L2_BITS) | i);
1783 f6f3fbca Michael S. Tsirkin
        }
1784 f6f3fbca Michael S. Tsirkin
    }
1785 f6f3fbca Michael S. Tsirkin
}
1786 f6f3fbca Michael S. Tsirkin
1787 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1788 f6f3fbca Michael S. Tsirkin
{
1789 5cd2c5b6 Richard Henderson
    int i;
1790 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1791 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1792 8d4c78e7 Alex Williamson
                             l1_phys_map + i, i);
1793 f6f3fbca Michael S. Tsirkin
    }
1794 f6f3fbca Michael S. Tsirkin
}
1795 f6f3fbca Michael S. Tsirkin
1796 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1797 f6f3fbca Michael S. Tsirkin
{
1798 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1799 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1800 f6f3fbca Michael S. Tsirkin
}
1801 f6f3fbca Michael S. Tsirkin
1802 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1803 f6f3fbca Michael S. Tsirkin
{
1804 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1805 f6f3fbca Michael S. Tsirkin
}
1806 f6f3fbca Michael S. Tsirkin
#endif
1807 f6f3fbca Michael S. Tsirkin
1808 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1809 f193c797 bellard
{
1810 f193c797 bellard
    if (strlen(s2) != n)
1811 f193c797 bellard
        return 0;
1812 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1813 f193c797 bellard
}
1814 3b46e624 ths
1815 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1816 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1817 f193c797 bellard
{
1818 c7cd6a37 blueswir1
    const CPULogItem *item;
1819 f193c797 bellard
    int mask;
1820 f193c797 bellard
    const char *p, *p1;
1821 f193c797 bellard
1822 f193c797 bellard
    p = str;
1823 f193c797 bellard
    mask = 0;
1824 f193c797 bellard
    for(;;) {
1825 f193c797 bellard
        p1 = strchr(p, ',');
1826 f193c797 bellard
        if (!p1)
1827 f193c797 bellard
            p1 = p + strlen(p);
1828 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1829 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1830 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1831 9742bf26 Yoshiaki Tamura
            }
1832 9742bf26 Yoshiaki Tamura
        } else {
1833 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1834 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1835 9742bf26 Yoshiaki Tamura
                    goto found;
1836 9742bf26 Yoshiaki Tamura
            }
1837 9742bf26 Yoshiaki Tamura
            return 0;
1838 f193c797 bellard
        }
1839 f193c797 bellard
    found:
1840 f193c797 bellard
        mask |= item->mask;
1841 f193c797 bellard
        if (*p1 != ',')
1842 f193c797 bellard
            break;
1843 f193c797 bellard
        p = p1 + 1;
1844 f193c797 bellard
    }
1845 f193c797 bellard
    return mask;
1846 f193c797 bellard
}
1847 ea041c0e bellard
1848 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1849 7501267e bellard
{
1850 7501267e bellard
    va_list ap;
1851 493ae1f0 pbrook
    va_list ap2;
1852 7501267e bellard
1853 7501267e bellard
    va_start(ap, fmt);
1854 493ae1f0 pbrook
    va_copy(ap2, ap);
1855 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1856 7501267e bellard
    vfprintf(stderr, fmt, ap);
1857 7501267e bellard
    fprintf(stderr, "\n");
1858 7501267e bellard
#ifdef TARGET_I386
1859 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1860 7fe48483 bellard
#else
1861 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1862 7501267e bellard
#endif
1863 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1864 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1865 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1866 93fcfe39 aliguori
        qemu_log("\n");
1867 f9373291 j_mayer
#ifdef TARGET_I386
1868 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1869 f9373291 j_mayer
#else
1870 93fcfe39 aliguori
        log_cpu_state(env, 0);
1871 f9373291 j_mayer
#endif
1872 31b1a7b4 aliguori
        qemu_log_flush();
1873 93fcfe39 aliguori
        qemu_log_close();
1874 924edcae balrog
    }
1875 493ae1f0 pbrook
    va_end(ap2);
1876 f9373291 j_mayer
    va_end(ap);
1877 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1878 fd052bf6 Riku Voipio
    {
1879 fd052bf6 Riku Voipio
        struct sigaction act;
1880 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1881 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1882 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1883 fd052bf6 Riku Voipio
    }
1884 fd052bf6 Riku Voipio
#endif
1885 7501267e bellard
    abort();
1886 7501267e bellard
}
1887 7501267e bellard
1888 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1889 c5be9f08 ths
{
1890 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1891 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1892 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1893 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1894 5a38f081 aliguori
    CPUBreakpoint *bp;
1895 5a38f081 aliguori
    CPUWatchpoint *wp;
1896 5a38f081 aliguori
#endif
1897 5a38f081 aliguori
1898 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1899 5a38f081 aliguori
1900 5a38f081 aliguori
    /* Preserve chaining and index. */
1901 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1902 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1903 5a38f081 aliguori
1904 5a38f081 aliguori
    /* Clone all break/watchpoints.
1905 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1906 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1907 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1908 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1909 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1910 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1911 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1912 5a38f081 aliguori
    }
1913 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1914 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1915 5a38f081 aliguori
                              wp->flags, NULL);
1916 5a38f081 aliguori
    }
1917 5a38f081 aliguori
#endif
1918 5a38f081 aliguori
1919 c5be9f08 ths
    return new_env;
1920 c5be9f08 ths
}
1921 c5be9f08 ths
1922 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1923 0124311e bellard
1924 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1925 5c751e99 edgar_igl
{
1926 5c751e99 edgar_igl
    unsigned int i;
1927 5c751e99 edgar_igl
1928 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1929 5c751e99 edgar_igl
       overlap the flushed page.  */
1930 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1931 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1932 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1933 5c751e99 edgar_igl
1934 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1935 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1936 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1937 5c751e99 edgar_igl
}
1938 5c751e99 edgar_igl
1939 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1940 08738984 Igor Kovalenko
    .addr_read  = -1,
1941 08738984 Igor Kovalenko
    .addr_write = -1,
1942 08738984 Igor Kovalenko
    .addr_code  = -1,
1943 08738984 Igor Kovalenko
    .addend     = -1,
1944 08738984 Igor Kovalenko
};
1945 08738984 Igor Kovalenko
1946 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1947 ee8b7021 bellard
   implemented yet) */
1948 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1949 33417e70 bellard
{
1950 33417e70 bellard
    int i;
1951 0124311e bellard
1952 9fa3e853 bellard
#if defined(DEBUG_TLB)
1953 9fa3e853 bellard
    printf("tlb_flush:\n");
1954 9fa3e853 bellard
#endif
1955 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1956 0124311e bellard
       links while we are modifying them */
1957 0124311e bellard
    env->current_tb = NULL;
1958 0124311e bellard
1959 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1960 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1961 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1962 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1963 cfde4bd9 Isaku Yamahata
        }
1964 33417e70 bellard
    }
1965 9fa3e853 bellard
1966 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1967 9fa3e853 bellard
1968 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1969 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1970 e3db7226 bellard
    tlb_flush_count++;
1971 33417e70 bellard
}
1972 33417e70 bellard
1973 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1974 61382a50 bellard
{
1975 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1976 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1977 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1978 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1979 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1980 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1981 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1982 84b7b8e7 bellard
    }
1983 61382a50 bellard
}
1984 61382a50 bellard
1985 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1986 33417e70 bellard
{
1987 8a40a180 bellard
    int i;
1988 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1989 0124311e bellard
1990 9fa3e853 bellard
#if defined(DEBUG_TLB)
1991 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1992 9fa3e853 bellard
#endif
1993 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1994 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1995 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1996 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1997 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1998 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
1999 d4c430a8 Paul Brook
#endif
2000 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2001 d4c430a8 Paul Brook
        return;
2002 d4c430a8 Paul Brook
    }
2003 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2004 0124311e bellard
       links while we are modifying them */
2005 0124311e bellard
    env->current_tb = NULL;
2006 61382a50 bellard
2007 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2008 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2009 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2010 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2011 0124311e bellard
2012 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2013 9fa3e853 bellard
}
2014 9fa3e853 bellard
2015 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2016 9fa3e853 bellard
   can be detected */
2017 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2018 9fa3e853 bellard
{
2019 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2020 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2021 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2022 9fa3e853 bellard
}
2023 9fa3e853 bellard
2024 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2025 3a7d929e bellard
   tested for self modifying code */
2026 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2027 3a7d929e bellard
                                    target_ulong vaddr)
2028 9fa3e853 bellard
{
2029 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2030 1ccde1cb bellard
}
2031 1ccde1cb bellard
2032 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2033 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2034 1ccde1cb bellard
{
2035 1ccde1cb bellard
    unsigned long addr;
2036 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2037 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2038 1ccde1cb bellard
        if ((addr - start) < length) {
2039 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2040 1ccde1cb bellard
        }
2041 1ccde1cb bellard
    }
2042 1ccde1cb bellard
}
2043 1ccde1cb bellard
2044 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2045 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2046 0a962c02 bellard
                                     int dirty_flags)
2047 1ccde1cb bellard
{
2048 1ccde1cb bellard
    CPUState *env;
2049 4f2ac237 bellard
    unsigned long length, start1;
2050 f7c11b53 Yoshiaki Tamura
    int i;
2051 1ccde1cb bellard
2052 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2053 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2054 1ccde1cb bellard
2055 1ccde1cb bellard
    length = end - start;
2056 1ccde1cb bellard
    if (length == 0)
2057 1ccde1cb bellard
        return;
2058 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2059 f23db169 bellard
2060 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2061 1ccde1cb bellard
       when accessing the range */
2062 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2063 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
2064 5579c7f3 pbrook
       address comparisons below.  */
2065 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2066 5579c7f3 pbrook
            != (end - 1) - start) {
2067 5579c7f3 pbrook
        abort();
2068 5579c7f3 pbrook
    }
2069 5579c7f3 pbrook
2070 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2071 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2072 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2073 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2074 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2075 cfde4bd9 Isaku Yamahata
                                      start1, length);
2076 cfde4bd9 Isaku Yamahata
        }
2077 6a00d601 bellard
    }
2078 1ccde1cb bellard
}
2079 1ccde1cb bellard
2080 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2081 74576198 aliguori
{
2082 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2083 74576198 aliguori
    in_migration = enable;
2084 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2085 f6f3fbca Michael S. Tsirkin
    return ret;
2086 74576198 aliguori
}
2087 74576198 aliguori
2088 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2089 74576198 aliguori
{
2090 74576198 aliguori
    return in_migration;
2091 74576198 aliguori
}
2092 74576198 aliguori
2093 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2094 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2095 2bec46dc aliguori
{
2096 7b8f3b78 Michael S. Tsirkin
    int ret;
2097 151f7749 Jan Kiszka
2098 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2099 151f7749 Jan Kiszka
    return ret;
2100 2bec46dc aliguori
}
2101 2bec46dc aliguori
2102 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2103 e5896b12 Anthony PERARD
                           ram_addr_t size)
2104 e5896b12 Anthony PERARD
{
2105 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2106 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2107 e5896b12 Anthony PERARD
        if (client->log_start) {
2108 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2109 e5896b12 Anthony PERARD
            if (r < 0) {
2110 e5896b12 Anthony PERARD
                return r;
2111 e5896b12 Anthony PERARD
            }
2112 e5896b12 Anthony PERARD
        }
2113 e5896b12 Anthony PERARD
    }
2114 e5896b12 Anthony PERARD
    return 0;
2115 e5896b12 Anthony PERARD
}
2116 e5896b12 Anthony PERARD
2117 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2118 e5896b12 Anthony PERARD
                          ram_addr_t size)
2119 e5896b12 Anthony PERARD
{
2120 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2121 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2122 e5896b12 Anthony PERARD
        if (client->log_stop) {
2123 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2124 e5896b12 Anthony PERARD
            if (r < 0) {
2125 e5896b12 Anthony PERARD
                return r;
2126 e5896b12 Anthony PERARD
            }
2127 e5896b12 Anthony PERARD
        }
2128 e5896b12 Anthony PERARD
    }
2129 e5896b12 Anthony PERARD
    return 0;
2130 e5896b12 Anthony PERARD
}
2131 e5896b12 Anthony PERARD
2132 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2133 3a7d929e bellard
{
2134 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2135 5579c7f3 pbrook
    void *p;
2136 3a7d929e bellard
2137 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2138 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2139 5579c7f3 pbrook
            + tlb_entry->addend);
2140 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2141 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2142 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2143 3a7d929e bellard
        }
2144 3a7d929e bellard
    }
2145 3a7d929e bellard
}
2146 3a7d929e bellard
2147 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2148 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2149 3a7d929e bellard
{
2150 3a7d929e bellard
    int i;
2151 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2152 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2153 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2154 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2155 cfde4bd9 Isaku Yamahata
    }
2156 3a7d929e bellard
}
2157 3a7d929e bellard
2158 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2159 1ccde1cb bellard
{
2160 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2161 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2162 1ccde1cb bellard
}
2163 1ccde1cb bellard
2164 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2165 0f459d16 pbrook
   so that it is no longer dirty */
2166 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2167 1ccde1cb bellard
{
2168 1ccde1cb bellard
    int i;
2169 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2170 1ccde1cb bellard
2171 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2172 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2173 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2174 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2175 9fa3e853 bellard
}
2176 9fa3e853 bellard
2177 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2178 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2179 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2180 d4c430a8 Paul Brook
                               target_ulong size)
2181 d4c430a8 Paul Brook
{
2182 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2183 d4c430a8 Paul Brook
2184 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2185 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2186 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2187 d4c430a8 Paul Brook
        return;
2188 d4c430a8 Paul Brook
    }
2189 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2190 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2191 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2192 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2193 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2194 d4c430a8 Paul Brook
        mask <<= 1;
2195 d4c430a8 Paul Brook
    }
2196 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2197 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2198 d4c430a8 Paul Brook
}
2199 d4c430a8 Paul Brook
2200 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2201 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2202 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2203 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2204 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2205 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2206 9fa3e853 bellard
{
2207 92e873b9 bellard
    PhysPageDesc *p;
2208 4f2ac237 bellard
    unsigned long pd;
2209 9fa3e853 bellard
    unsigned int index;
2210 4f2ac237 bellard
    target_ulong address;
2211 0f459d16 pbrook
    target_ulong code_address;
2212 355b1943 Paul Brook
    unsigned long addend;
2213 84b7b8e7 bellard
    CPUTLBEntry *te;
2214 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2215 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2216 9fa3e853 bellard
2217 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2218 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2219 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2220 d4c430a8 Paul Brook
    }
2221 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2222 9fa3e853 bellard
    if (!p) {
2223 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2224 9fa3e853 bellard
    } else {
2225 9fa3e853 bellard
        pd = p->phys_offset;
2226 9fa3e853 bellard
    }
2227 9fa3e853 bellard
#if defined(DEBUG_TLB)
2228 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2229 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2230 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2231 9fa3e853 bellard
#endif
2232 9fa3e853 bellard
2233 0f459d16 pbrook
    address = vaddr;
2234 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2235 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2236 0f459d16 pbrook
        address |= TLB_MMIO;
2237 0f459d16 pbrook
    }
2238 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2239 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2240 0f459d16 pbrook
        /* Normal RAM.  */
2241 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2242 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2243 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2244 0f459d16 pbrook
        else
2245 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2246 0f459d16 pbrook
    } else {
2247 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2248 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2249 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2250 0f459d16 pbrook
           and avoid full address decoding in every device.
2251 0f459d16 pbrook
           We can't use the high bits of pd for this because
2252 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2253 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2254 8da3ff18 pbrook
        if (p) {
2255 8da3ff18 pbrook
            iotlb += p->region_offset;
2256 8da3ff18 pbrook
        } else {
2257 8da3ff18 pbrook
            iotlb += paddr;
2258 8da3ff18 pbrook
        }
2259 0f459d16 pbrook
    }
2260 0f459d16 pbrook
2261 0f459d16 pbrook
    code_address = address;
2262 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2263 0f459d16 pbrook
       watchpoint trap routines.  */
2264 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2265 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2266 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2267 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2268 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2269 bf298f83 Jun Koi
                address |= TLB_MMIO;
2270 bf298f83 Jun Koi
                break;
2271 bf298f83 Jun Koi
            }
2272 6658ffb8 pbrook
        }
2273 0f459d16 pbrook
    }
2274 d79acba4 balrog
2275 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2276 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2277 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2278 0f459d16 pbrook
    te->addend = addend - vaddr;
2279 0f459d16 pbrook
    if (prot & PAGE_READ) {
2280 0f459d16 pbrook
        te->addr_read = address;
2281 0f459d16 pbrook
    } else {
2282 0f459d16 pbrook
        te->addr_read = -1;
2283 0f459d16 pbrook
    }
2284 5c751e99 edgar_igl
2285 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2286 0f459d16 pbrook
        te->addr_code = code_address;
2287 0f459d16 pbrook
    } else {
2288 0f459d16 pbrook
        te->addr_code = -1;
2289 0f459d16 pbrook
    }
2290 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2291 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2292 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2293 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2294 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2295 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2296 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2297 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2298 9fa3e853 bellard
        } else {
2299 0f459d16 pbrook
            te->addr_write = address;
2300 9fa3e853 bellard
        }
2301 0f459d16 pbrook
    } else {
2302 0f459d16 pbrook
        te->addr_write = -1;
2303 9fa3e853 bellard
    }
2304 9fa3e853 bellard
}
2305 9fa3e853 bellard
2306 0124311e bellard
#else
2307 0124311e bellard
2308 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2309 0124311e bellard
{
2310 0124311e bellard
}
2311 0124311e bellard
2312 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2313 0124311e bellard
{
2314 0124311e bellard
}
2315 0124311e bellard
2316 edf8e2af Mika Westerberg
/*
2317 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2318 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2319 edf8e2af Mika Westerberg
 */
2320 5cd2c5b6 Richard Henderson
2321 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2322 5cd2c5b6 Richard Henderson
{
2323 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2324 5cd2c5b6 Richard Henderson
    void *priv;
2325 5cd2c5b6 Richard Henderson
    unsigned long start;
2326 5cd2c5b6 Richard Henderson
    int prot;
2327 5cd2c5b6 Richard Henderson
};
2328 5cd2c5b6 Richard Henderson
2329 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2330 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2331 5cd2c5b6 Richard Henderson
{
2332 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2333 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2334 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2335 5cd2c5b6 Richard Henderson
            return rc;
2336 5cd2c5b6 Richard Henderson
        }
2337 5cd2c5b6 Richard Henderson
    }
2338 5cd2c5b6 Richard Henderson
2339 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2340 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2341 5cd2c5b6 Richard Henderson
2342 5cd2c5b6 Richard Henderson
    return 0;
2343 5cd2c5b6 Richard Henderson
}
2344 5cd2c5b6 Richard Henderson
2345 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2346 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2347 5cd2c5b6 Richard Henderson
{
2348 b480d9b7 Paul Brook
    abi_ulong pa;
2349 5cd2c5b6 Richard Henderson
    int i, rc;
2350 5cd2c5b6 Richard Henderson
2351 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2352 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2353 5cd2c5b6 Richard Henderson
    }
2354 5cd2c5b6 Richard Henderson
2355 5cd2c5b6 Richard Henderson
    if (level == 0) {
2356 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2357 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2358 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2359 5cd2c5b6 Richard Henderson
2360 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2361 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2362 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2363 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2364 5cd2c5b6 Richard Henderson
                    return rc;
2365 9fa3e853 bellard
                }
2366 9fa3e853 bellard
            }
2367 5cd2c5b6 Richard Henderson
        }
2368 5cd2c5b6 Richard Henderson
    } else {
2369 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2370 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2371 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2372 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2373 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2374 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2375 5cd2c5b6 Richard Henderson
                return rc;
2376 5cd2c5b6 Richard Henderson
            }
2377 5cd2c5b6 Richard Henderson
        }
2378 5cd2c5b6 Richard Henderson
    }
2379 5cd2c5b6 Richard Henderson
2380 5cd2c5b6 Richard Henderson
    return 0;
2381 5cd2c5b6 Richard Henderson
}
2382 5cd2c5b6 Richard Henderson
2383 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2384 5cd2c5b6 Richard Henderson
{
2385 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2386 5cd2c5b6 Richard Henderson
    unsigned long i;
2387 5cd2c5b6 Richard Henderson
2388 5cd2c5b6 Richard Henderson
    data.fn = fn;
2389 5cd2c5b6 Richard Henderson
    data.priv = priv;
2390 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2391 5cd2c5b6 Richard Henderson
    data.prot = 0;
2392 5cd2c5b6 Richard Henderson
2393 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2394 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2395 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2396 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2397 5cd2c5b6 Richard Henderson
            return rc;
2398 9fa3e853 bellard
        }
2399 33417e70 bellard
    }
2400 5cd2c5b6 Richard Henderson
2401 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2402 edf8e2af Mika Westerberg
}
2403 edf8e2af Mika Westerberg
2404 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2405 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2406 edf8e2af Mika Westerberg
{
2407 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2408 edf8e2af Mika Westerberg
2409 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2410 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2411 edf8e2af Mika Westerberg
        start, end, end - start,
2412 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2413 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2414 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2415 edf8e2af Mika Westerberg
2416 edf8e2af Mika Westerberg
    return (0);
2417 edf8e2af Mika Westerberg
}
2418 edf8e2af Mika Westerberg
2419 edf8e2af Mika Westerberg
/* dump memory mappings */
2420 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2421 edf8e2af Mika Westerberg
{
2422 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2423 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2424 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2425 33417e70 bellard
}
2426 33417e70 bellard
2427 53a5960a pbrook
int page_get_flags(target_ulong address)
2428 33417e70 bellard
{
2429 9fa3e853 bellard
    PageDesc *p;
2430 9fa3e853 bellard
2431 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2432 33417e70 bellard
    if (!p)
2433 9fa3e853 bellard
        return 0;
2434 9fa3e853 bellard
    return p->flags;
2435 9fa3e853 bellard
}
2436 9fa3e853 bellard
2437 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2438 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2439 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2440 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2441 9fa3e853 bellard
{
2442 376a7909 Richard Henderson
    target_ulong addr, len;
2443 376a7909 Richard Henderson
2444 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2445 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2446 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2447 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2449 376a7909 Richard Henderson
#endif
2450 376a7909 Richard Henderson
    assert(start < end);
2451 9fa3e853 bellard
2452 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2453 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2454 376a7909 Richard Henderson
2455 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2456 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2457 376a7909 Richard Henderson
    }
2458 376a7909 Richard Henderson
2459 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2460 376a7909 Richard Henderson
         len != 0;
2461 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2462 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2463 376a7909 Richard Henderson
2464 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2465 376a7909 Richard Henderson
           the code inside.  */
2466 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2467 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2468 9fa3e853 bellard
            p->first_tb) {
2469 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2470 9fa3e853 bellard
        }
2471 9fa3e853 bellard
        p->flags = flags;
2472 9fa3e853 bellard
    }
2473 33417e70 bellard
}
2474 33417e70 bellard
2475 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2476 3d97b40b ths
{
2477 3d97b40b ths
    PageDesc *p;
2478 3d97b40b ths
    target_ulong end;
2479 3d97b40b ths
    target_ulong addr;
2480 3d97b40b ths
2481 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2482 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2483 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2484 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2485 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2486 376a7909 Richard Henderson
#endif
2487 376a7909 Richard Henderson
2488 3e0650a9 Richard Henderson
    if (len == 0) {
2489 3e0650a9 Richard Henderson
        return 0;
2490 3e0650a9 Richard Henderson
    }
2491 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2492 376a7909 Richard Henderson
        /* We've wrapped around.  */
2493 55f280c9 balrog
        return -1;
2494 376a7909 Richard Henderson
    }
2495 55f280c9 balrog
2496 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2497 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2498 3d97b40b ths
2499 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2500 376a7909 Richard Henderson
         len != 0;
2501 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2502 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2503 3d97b40b ths
        if( !p )
2504 3d97b40b ths
            return -1;
2505 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2506 3d97b40b ths
            return -1;
2507 3d97b40b ths
2508 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2509 3d97b40b ths
            return -1;
2510 dae3270c bellard
        if (flags & PAGE_WRITE) {
2511 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2512 dae3270c bellard
                return -1;
2513 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2514 dae3270c bellard
               contains translated code */
2515 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2516 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2517 dae3270c bellard
                    return -1;
2518 dae3270c bellard
            }
2519 dae3270c bellard
            return 0;
2520 dae3270c bellard
        }
2521 3d97b40b ths
    }
2522 3d97b40b ths
    return 0;
2523 3d97b40b ths
}
2524 3d97b40b ths
2525 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2526 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2527 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2528 9fa3e853 bellard
{
2529 45d679d6 Aurelien Jarno
    unsigned int prot;
2530 45d679d6 Aurelien Jarno
    PageDesc *p;
2531 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2532 9fa3e853 bellard
2533 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2534 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2535 c8a706fe pbrook
       practice it seems to be ok.  */
2536 c8a706fe pbrook
    mmap_lock();
2537 c8a706fe pbrook
2538 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2539 45d679d6 Aurelien Jarno
    if (!p) {
2540 c8a706fe pbrook
        mmap_unlock();
2541 9fa3e853 bellard
        return 0;
2542 c8a706fe pbrook
    }
2543 45d679d6 Aurelien Jarno
2544 9fa3e853 bellard
    /* if the page was really writable, then we change its
2545 9fa3e853 bellard
       protection back to writable */
2546 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2547 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2548 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2549 45d679d6 Aurelien Jarno
2550 45d679d6 Aurelien Jarno
        prot = 0;
2551 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2552 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2553 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2554 45d679d6 Aurelien Jarno
            prot |= p->flags;
2555 45d679d6 Aurelien Jarno
2556 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2557 9fa3e853 bellard
               the corresponding translated code. */
2558 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2559 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2560 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2561 9fa3e853 bellard
#endif
2562 9fa3e853 bellard
        }
2563 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2564 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2565 45d679d6 Aurelien Jarno
2566 45d679d6 Aurelien Jarno
        mmap_unlock();
2567 45d679d6 Aurelien Jarno
        return 1;
2568 9fa3e853 bellard
    }
2569 c8a706fe pbrook
    mmap_unlock();
2570 9fa3e853 bellard
    return 0;
2571 9fa3e853 bellard
}
2572 9fa3e853 bellard
2573 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2574 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2575 1ccde1cb bellard
{
2576 1ccde1cb bellard
}
2577 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2578 9fa3e853 bellard
2579 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2580 8da3ff18 pbrook
2581 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2582 c04b2b78 Paul Brook
typedef struct subpage_t {
2583 c04b2b78 Paul Brook
    target_phys_addr_t base;
2584 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2585 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2586 c04b2b78 Paul Brook
} subpage_t;
2587 c04b2b78 Paul Brook
2588 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2589 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2590 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2591 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2592 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2593 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2594 db7b5426 blueswir1
                      need_subpage)                                     \
2595 db7b5426 blueswir1
    do {                                                                \
2596 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2597 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2598 db7b5426 blueswir1
        else {                                                          \
2599 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2600 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2601 db7b5426 blueswir1
                need_subpage = 1;                                       \
2602 db7b5426 blueswir1
        }                                                               \
2603 db7b5426 blueswir1
                                                                        \
2604 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2605 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2606 db7b5426 blueswir1
        else {                                                          \
2607 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2608 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2609 db7b5426 blueswir1
                need_subpage = 1;                                       \
2610 db7b5426 blueswir1
        }                                                               \
2611 db7b5426 blueswir1
    } while (0)
2612 db7b5426 blueswir1
2613 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2614 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2615 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2616 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2617 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2618 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2619 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2620 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2621 0fd542fb Michael S. Tsirkin
void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2622 c227f099 Anthony Liguori
                                         ram_addr_t size,
2623 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2624 0fd542fb Michael S. Tsirkin
                                         ram_addr_t region_offset,
2625 0fd542fb Michael S. Tsirkin
                                         bool log_dirty)
2626 33417e70 bellard
{
2627 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2628 92e873b9 bellard
    PhysPageDesc *p;
2629 9d42037b bellard
    CPUState *env;
2630 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2631 f6405247 Richard Henderson
    subpage_t *subpage;
2632 33417e70 bellard
2633 3b8e6a2d Edgar E. Iglesias
    assert(size);
2634 0fd542fb Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2635 f6f3fbca Michael S. Tsirkin
2636 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2637 67c4d23c pbrook
        region_offset = start_addr;
2638 67c4d23c pbrook
    }
2639 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2640 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2641 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2642 3b8e6a2d Edgar E. Iglesias
2643 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2644 3b8e6a2d Edgar E. Iglesias
    do {
2645 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2646 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2647 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2648 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2649 db7b5426 blueswir1
            int need_subpage = 0;
2650 db7b5426 blueswir1
2651 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2652 db7b5426 blueswir1
                          need_subpage);
2653 f6405247 Richard Henderson
            if (need_subpage) {
2654 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2655 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2656 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2657 8da3ff18 pbrook
                                           p->region_offset);
2658 db7b5426 blueswir1
                } else {
2659 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2660 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2661 db7b5426 blueswir1
                }
2662 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2663 8da3ff18 pbrook
                                 region_offset);
2664 8da3ff18 pbrook
                p->region_offset = 0;
2665 db7b5426 blueswir1
            } else {
2666 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2667 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2668 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2669 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2670 db7b5426 blueswir1
            }
2671 db7b5426 blueswir1
        } else {
2672 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2673 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2674 8da3ff18 pbrook
            p->region_offset = region_offset;
2675 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2676 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2677 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2678 0e8f0967 pbrook
            } else {
2679 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2680 db7b5426 blueswir1
                int need_subpage = 0;
2681 db7b5426 blueswir1
2682 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2683 db7b5426 blueswir1
                              end_addr2, need_subpage);
2684 db7b5426 blueswir1
2685 f6405247 Richard Henderson
                if (need_subpage) {
2686 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2687 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2688 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2689 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2690 8da3ff18 pbrook
                                     phys_offset, region_offset);
2691 8da3ff18 pbrook
                    p->region_offset = 0;
2692 db7b5426 blueswir1
                }
2693 db7b5426 blueswir1
            }
2694 db7b5426 blueswir1
        }
2695 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2696 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2697 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2698 3b46e624 ths
2699 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2700 9d42037b bellard
       reset the modified entries */
2701 9d42037b bellard
    /* XXX: slow ! */
2702 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2703 9d42037b bellard
        tlb_flush(env, 1);
2704 9d42037b bellard
    }
2705 33417e70 bellard
}
2706 33417e70 bellard
2707 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2708 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2709 ba863458 bellard
{
2710 ba863458 bellard
    PhysPageDesc *p;
2711 ba863458 bellard
2712 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2713 ba863458 bellard
    if (!p)
2714 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2715 ba863458 bellard
    return p->phys_offset;
2716 ba863458 bellard
}
2717 ba863458 bellard
2718 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2719 f65ed4c1 aliguori
{
2720 f65ed4c1 aliguori
    if (kvm_enabled())
2721 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2722 f65ed4c1 aliguori
}
2723 f65ed4c1 aliguori
2724 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2725 f65ed4c1 aliguori
{
2726 f65ed4c1 aliguori
    if (kvm_enabled())
2727 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2728 f65ed4c1 aliguori
}
2729 f65ed4c1 aliguori
2730 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2731 62a2744c Sheng Yang
{
2732 62a2744c Sheng Yang
    if (kvm_enabled())
2733 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2734 62a2744c Sheng Yang
}
2735 62a2744c Sheng Yang
2736 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2737 c902760f Marcelo Tosatti
2738 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2739 c902760f Marcelo Tosatti
2740 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2741 c902760f Marcelo Tosatti
2742 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2743 c902760f Marcelo Tosatti
{
2744 c902760f Marcelo Tosatti
    struct statfs fs;
2745 c902760f Marcelo Tosatti
    int ret;
2746 c902760f Marcelo Tosatti
2747 c902760f Marcelo Tosatti
    do {
2748 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2749 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2750 c902760f Marcelo Tosatti
2751 c902760f Marcelo Tosatti
    if (ret != 0) {
2752 9742bf26 Yoshiaki Tamura
        perror(path);
2753 9742bf26 Yoshiaki Tamura
        return 0;
2754 c902760f Marcelo Tosatti
    }
2755 c902760f Marcelo Tosatti
2756 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2757 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2758 c902760f Marcelo Tosatti
2759 c902760f Marcelo Tosatti
    return fs.f_bsize;
2760 c902760f Marcelo Tosatti
}
2761 c902760f Marcelo Tosatti
2762 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2763 04b16653 Alex Williamson
                            ram_addr_t memory,
2764 04b16653 Alex Williamson
                            const char *path)
2765 c902760f Marcelo Tosatti
{
2766 c902760f Marcelo Tosatti
    char *filename;
2767 c902760f Marcelo Tosatti
    void *area;
2768 c902760f Marcelo Tosatti
    int fd;
2769 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2770 c902760f Marcelo Tosatti
    int flags;
2771 c902760f Marcelo Tosatti
#endif
2772 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2773 c902760f Marcelo Tosatti
2774 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2775 c902760f Marcelo Tosatti
    if (!hpagesize) {
2776 9742bf26 Yoshiaki Tamura
        return NULL;
2777 c902760f Marcelo Tosatti
    }
2778 c902760f Marcelo Tosatti
2779 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2780 c902760f Marcelo Tosatti
        return NULL;
2781 c902760f Marcelo Tosatti
    }
2782 c902760f Marcelo Tosatti
2783 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2784 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2785 c902760f Marcelo Tosatti
        return NULL;
2786 c902760f Marcelo Tosatti
    }
2787 c902760f Marcelo Tosatti
2788 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2789 9742bf26 Yoshiaki Tamura
        return NULL;
2790 c902760f Marcelo Tosatti
    }
2791 c902760f Marcelo Tosatti
2792 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2793 c902760f Marcelo Tosatti
    if (fd < 0) {
2794 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2795 9742bf26 Yoshiaki Tamura
        free(filename);
2796 9742bf26 Yoshiaki Tamura
        return NULL;
2797 c902760f Marcelo Tosatti
    }
2798 c902760f Marcelo Tosatti
    unlink(filename);
2799 c902760f Marcelo Tosatti
    free(filename);
2800 c902760f Marcelo Tosatti
2801 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2802 c902760f Marcelo Tosatti
2803 c902760f Marcelo Tosatti
    /*
2804 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2805 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2806 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2807 c902760f Marcelo Tosatti
     * mmap will fail.
2808 c902760f Marcelo Tosatti
     */
2809 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2810 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2811 c902760f Marcelo Tosatti
2812 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2813 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2814 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2815 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2816 c902760f Marcelo Tosatti
     */
2817 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2818 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2819 c902760f Marcelo Tosatti
#else
2820 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2821 c902760f Marcelo Tosatti
#endif
2822 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2823 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2824 9742bf26 Yoshiaki Tamura
        close(fd);
2825 9742bf26 Yoshiaki Tamura
        return (NULL);
2826 c902760f Marcelo Tosatti
    }
2827 04b16653 Alex Williamson
    block->fd = fd;
2828 c902760f Marcelo Tosatti
    return area;
2829 c902760f Marcelo Tosatti
}
2830 c902760f Marcelo Tosatti
#endif
2831 c902760f Marcelo Tosatti
2832 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2833 d17b5288 Alex Williamson
{
2834 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2835 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2836 04b16653 Alex Williamson
2837 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2838 04b16653 Alex Williamson
        return 0;
2839 04b16653 Alex Williamson
2840 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2841 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2842 04b16653 Alex Williamson
2843 04b16653 Alex Williamson
        end = block->offset + block->length;
2844 04b16653 Alex Williamson
2845 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2846 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2847 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2848 04b16653 Alex Williamson
            }
2849 04b16653 Alex Williamson
        }
2850 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2851 04b16653 Alex Williamson
            offset =  end;
2852 04b16653 Alex Williamson
            mingap = next - end;
2853 04b16653 Alex Williamson
        }
2854 04b16653 Alex Williamson
    }
2855 04b16653 Alex Williamson
    return offset;
2856 04b16653 Alex Williamson
}
2857 04b16653 Alex Williamson
2858 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2859 04b16653 Alex Williamson
{
2860 d17b5288 Alex Williamson
    RAMBlock *block;
2861 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2862 d17b5288 Alex Williamson
2863 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2864 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2865 d17b5288 Alex Williamson
2866 d17b5288 Alex Williamson
    return last;
2867 d17b5288 Alex Williamson
}
2868 d17b5288 Alex Williamson
2869 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2870 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2871 84b89d78 Cam Macdonell
{
2872 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2873 84b89d78 Cam Macdonell
2874 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2875 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2876 84b89d78 Cam Macdonell
2877 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2878 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2879 84b89d78 Cam Macdonell
        if (id) {
2880 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2881 84b89d78 Cam Macdonell
            qemu_free(id);
2882 84b89d78 Cam Macdonell
        }
2883 84b89d78 Cam Macdonell
    }
2884 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2885 84b89d78 Cam Macdonell
2886 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2888 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2889 84b89d78 Cam Macdonell
                    new_block->idstr);
2890 84b89d78 Cam Macdonell
            abort();
2891 84b89d78 Cam Macdonell
        }
2892 84b89d78 Cam Macdonell
    }
2893 84b89d78 Cam Macdonell
2894 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2895 6977dfe6 Yoshiaki Tamura
    if (host) {
2896 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2897 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2898 6977dfe6 Yoshiaki Tamura
    } else {
2899 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2900 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2901 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2902 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2903 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2904 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2905 6977dfe6 Yoshiaki Tamura
            }
2906 c902760f Marcelo Tosatti
#else
2907 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2908 6977dfe6 Yoshiaki Tamura
            exit(1);
2909 c902760f Marcelo Tosatti
#endif
2910 6977dfe6 Yoshiaki Tamura
        } else {
2911 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2912 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2913 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2914 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2915 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2916 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2917 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2918 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2919 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2920 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2921 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2922 fb8b2735 Alexander Graf
                abort();
2923 fb8b2735 Alexander Graf
            }
2924 6b02494d Alexander Graf
#else
2925 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
2926 432d268c Jun Nakajima
                xen_ram_alloc(new_block->offset, size);
2927 432d268c Jun Nakajima
            } else {
2928 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2929 432d268c Jun Nakajima
            }
2930 6b02494d Alexander Graf
#endif
2931 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2932 6977dfe6 Yoshiaki Tamura
        }
2933 c902760f Marcelo Tosatti
    }
2934 94a6b54f pbrook
    new_block->length = size;
2935 94a6b54f pbrook
2936 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2937 94a6b54f pbrook
2938 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2939 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2940 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2941 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2942 94a6b54f pbrook
2943 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2944 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2945 6f0437e8 Jan Kiszka
2946 94a6b54f pbrook
    return new_block->offset;
2947 94a6b54f pbrook
}
2948 e9a1ab19 bellard
2949 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2950 6977dfe6 Yoshiaki Tamura
{
2951 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2952 6977dfe6 Yoshiaki Tamura
}
2953 6977dfe6 Yoshiaki Tamura
2954 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2955 1f2e98b6 Alex Williamson
{
2956 1f2e98b6 Alex Williamson
    RAMBlock *block;
2957 1f2e98b6 Alex Williamson
2958 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2959 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2960 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2961 1f2e98b6 Alex Williamson
            qemu_free(block);
2962 1f2e98b6 Alex Williamson
            return;
2963 1f2e98b6 Alex Williamson
        }
2964 1f2e98b6 Alex Williamson
    }
2965 1f2e98b6 Alex Williamson
}
2966 1f2e98b6 Alex Williamson
2967 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2968 e9a1ab19 bellard
{
2969 04b16653 Alex Williamson
    RAMBlock *block;
2970 04b16653 Alex Williamson
2971 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2972 04b16653 Alex Williamson
        if (addr == block->offset) {
2973 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2974 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2975 cd19cfa2 Huang Ying
                ;
2976 cd19cfa2 Huang Ying
            } else if (mem_path) {
2977 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2978 04b16653 Alex Williamson
                if (block->fd) {
2979 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2980 04b16653 Alex Williamson
                    close(block->fd);
2981 04b16653 Alex Williamson
                } else {
2982 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2983 04b16653 Alex Williamson
                }
2984 fd28aa13 Jan Kiszka
#else
2985 fd28aa13 Jan Kiszka
                abort();
2986 04b16653 Alex Williamson
#endif
2987 04b16653 Alex Williamson
            } else {
2988 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2989 04b16653 Alex Williamson
                munmap(block->host, block->length);
2990 04b16653 Alex Williamson
#else
2991 432d268c Jun Nakajima
                if (xen_mapcache_enabled()) {
2992 432d268c Jun Nakajima
                    qemu_invalidate_entry(block->host);
2993 432d268c Jun Nakajima
                } else {
2994 432d268c Jun Nakajima
                    qemu_vfree(block->host);
2995 432d268c Jun Nakajima
                }
2996 04b16653 Alex Williamson
#endif
2997 04b16653 Alex Williamson
            }
2998 04b16653 Alex Williamson
            qemu_free(block);
2999 04b16653 Alex Williamson
            return;
3000 04b16653 Alex Williamson
        }
3001 04b16653 Alex Williamson
    }
3002 04b16653 Alex Williamson
3003 e9a1ab19 bellard
}
3004 e9a1ab19 bellard
3005 cd19cfa2 Huang Ying
#ifndef _WIN32
3006 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3007 cd19cfa2 Huang Ying
{
3008 cd19cfa2 Huang Ying
    RAMBlock *block;
3009 cd19cfa2 Huang Ying
    ram_addr_t offset;
3010 cd19cfa2 Huang Ying
    int flags;
3011 cd19cfa2 Huang Ying
    void *area, *vaddr;
3012 cd19cfa2 Huang Ying
3013 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3014 cd19cfa2 Huang Ying
        offset = addr - block->offset;
3015 cd19cfa2 Huang Ying
        if (offset < block->length) {
3016 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
3017 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3018 cd19cfa2 Huang Ying
                ;
3019 cd19cfa2 Huang Ying
            } else {
3020 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
3021 cd19cfa2 Huang Ying
                munmap(vaddr, length);
3022 cd19cfa2 Huang Ying
                if (mem_path) {
3023 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
3024 cd19cfa2 Huang Ying
                    if (block->fd) {
3025 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
3026 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3027 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
3028 cd19cfa2 Huang Ying
#else
3029 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
3030 cd19cfa2 Huang Ying
#endif
3031 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3032 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
3033 cd19cfa2 Huang Ying
                    } else {
3034 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3035 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3036 cd19cfa2 Huang Ying
                                    flags, -1, 0);
3037 cd19cfa2 Huang Ying
                    }
3038 fd28aa13 Jan Kiszka
#else
3039 fd28aa13 Jan Kiszka
                    abort();
3040 cd19cfa2 Huang Ying
#endif
3041 cd19cfa2 Huang Ying
                } else {
3042 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3043 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
3044 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3045 cd19cfa2 Huang Ying
                                flags, -1, 0);
3046 cd19cfa2 Huang Ying
#else
3047 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3048 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3049 cd19cfa2 Huang Ying
                                flags, -1, 0);
3050 cd19cfa2 Huang Ying
#endif
3051 cd19cfa2 Huang Ying
                }
3052 cd19cfa2 Huang Ying
                if (area != vaddr) {
3053 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3054 cd19cfa2 Huang Ying
                            length, addr);
3055 cd19cfa2 Huang Ying
                    exit(1);
3056 cd19cfa2 Huang Ying
                }
3057 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3058 cd19cfa2 Huang Ying
            }
3059 cd19cfa2 Huang Ying
            return;
3060 cd19cfa2 Huang Ying
        }
3061 cd19cfa2 Huang Ying
    }
3062 cd19cfa2 Huang Ying
}
3063 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3064 cd19cfa2 Huang Ying
3065 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3066 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3067 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3068 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3069 5579c7f3 pbrook

3070 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3071 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3072 5579c7f3 pbrook
 */
3073 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3074 dc828ca1 pbrook
{
3075 94a6b54f pbrook
    RAMBlock *block;
3076 94a6b54f pbrook
3077 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3078 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3079 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3080 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3081 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3082 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3083 7d82af38 Vincent Palatin
            }
3084 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3085 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3086 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3087 432d268c Jun Nakajima
                 */
3088 432d268c Jun Nakajima
                if (block->offset == 0) {
3089 432d268c Jun Nakajima
                    return qemu_map_cache(addr, 0, 1);
3090 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3091 432d268c Jun Nakajima
                    block->host = xen_map_block(block->offset, block->length);
3092 432d268c Jun Nakajima
                }
3093 432d268c Jun Nakajima
            }
3094 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3095 f471a17e Alex Williamson
        }
3096 94a6b54f pbrook
    }
3097 f471a17e Alex Williamson
3098 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3099 f471a17e Alex Williamson
    abort();
3100 f471a17e Alex Williamson
3101 f471a17e Alex Williamson
    return NULL;
3102 dc828ca1 pbrook
}
3103 dc828ca1 pbrook
3104 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3105 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3106 b2e0a138 Michael S. Tsirkin
 */
3107 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3108 b2e0a138 Michael S. Tsirkin
{
3109 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3110 b2e0a138 Michael S. Tsirkin
3111 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3112 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3113 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3114 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3115 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3116 432d268c Jun Nakajima
                 */
3117 432d268c Jun Nakajima
                if (block->offset == 0) {
3118 432d268c Jun Nakajima
                    return qemu_map_cache(addr, 0, 1);
3119 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3120 432d268c Jun Nakajima
                    block->host = xen_map_block(block->offset, block->length);
3121 432d268c Jun Nakajima
                }
3122 432d268c Jun Nakajima
            }
3123 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3124 b2e0a138 Michael S. Tsirkin
        }
3125 b2e0a138 Michael S. Tsirkin
    }
3126 b2e0a138 Michael S. Tsirkin
3127 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3128 b2e0a138 Michael S. Tsirkin
    abort();
3129 b2e0a138 Michael S. Tsirkin
3130 b2e0a138 Michael S. Tsirkin
    return NULL;
3131 b2e0a138 Michael S. Tsirkin
}
3132 b2e0a138 Michael S. Tsirkin
3133 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
3134 050a0ddf Anthony PERARD
{
3135 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
3136 050a0ddf Anthony PERARD
3137 050a0ddf Anthony PERARD
    if (xen_mapcache_enabled()) {
3138 050a0ddf Anthony PERARD
        RAMBlock *block;
3139 050a0ddf Anthony PERARD
3140 050a0ddf Anthony PERARD
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3141 050a0ddf Anthony PERARD
            if (addr == block->host) {
3142 050a0ddf Anthony PERARD
                break;
3143 050a0ddf Anthony PERARD
            }
3144 050a0ddf Anthony PERARD
        }
3145 050a0ddf Anthony PERARD
        if (block && block->host) {
3146 050a0ddf Anthony PERARD
            xen_unmap_block(block->host, block->length);
3147 050a0ddf Anthony PERARD
            block->host = NULL;
3148 050a0ddf Anthony PERARD
        } else {
3149 050a0ddf Anthony PERARD
            qemu_map_cache_unlock(addr);
3150 050a0ddf Anthony PERARD
        }
3151 050a0ddf Anthony PERARD
    }
3152 050a0ddf Anthony PERARD
}
3153 050a0ddf Anthony PERARD
3154 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3155 5579c7f3 pbrook
{
3156 94a6b54f pbrook
    RAMBlock *block;
3157 94a6b54f pbrook
    uint8_t *host = ptr;
3158 94a6b54f pbrook
3159 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3160 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
3161 432d268c Jun Nakajima
        if (block->host == NULL) {
3162 432d268c Jun Nakajima
            continue;
3163 432d268c Jun Nakajima
        }
3164 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3165 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3166 e890261f Marcelo Tosatti
            return 0;
3167 f471a17e Alex Williamson
        }
3168 94a6b54f pbrook
    }
3169 432d268c Jun Nakajima
3170 432d268c Jun Nakajima
    if (xen_mapcache_enabled()) {
3171 432d268c Jun Nakajima
        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3172 432d268c Jun Nakajima
        return 0;
3173 432d268c Jun Nakajima
    }
3174 432d268c Jun Nakajima
3175 e890261f Marcelo Tosatti
    return -1;
3176 e890261f Marcelo Tosatti
}
3177 f471a17e Alex Williamson
3178 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3179 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3180 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3181 e890261f Marcelo Tosatti
{
3182 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3183 f471a17e Alex Williamson
3184 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3185 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3186 e890261f Marcelo Tosatti
        abort();
3187 e890261f Marcelo Tosatti
    }
3188 e890261f Marcelo Tosatti
    return ram_addr;
3189 5579c7f3 pbrook
}
3190 5579c7f3 pbrook
3191 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3192 33417e70 bellard
{
3193 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3194 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3195 67d3b957 pbrook
#endif
3196 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3197 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3198 e18231a3 blueswir1
#endif
3199 e18231a3 blueswir1
    return 0;
3200 e18231a3 blueswir1
}
3201 e18231a3 blueswir1
3202 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3203 e18231a3 blueswir1
{
3204 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3205 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3206 e18231a3 blueswir1
#endif
3207 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3208 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3209 e18231a3 blueswir1
#endif
3210 e18231a3 blueswir1
    return 0;
3211 e18231a3 blueswir1
}
3212 e18231a3 blueswir1
3213 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3214 e18231a3 blueswir1
{
3215 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3216 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3217 e18231a3 blueswir1
#endif
3218 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3219 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3220 b4f0a316 blueswir1
#endif
3221 33417e70 bellard
    return 0;
3222 33417e70 bellard
}
3223 33417e70 bellard
3224 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3225 33417e70 bellard
{
3226 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3227 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3228 67d3b957 pbrook
#endif
3229 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3230 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3231 e18231a3 blueswir1
#endif
3232 e18231a3 blueswir1
}
3233 e18231a3 blueswir1
3234 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3235 e18231a3 blueswir1
{
3236 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3237 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3238 e18231a3 blueswir1
#endif
3239 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3240 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3241 e18231a3 blueswir1
#endif
3242 e18231a3 blueswir1
}
3243 e18231a3 blueswir1
3244 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3245 e18231a3 blueswir1
{
3246 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3247 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3248 e18231a3 blueswir1
#endif
3249 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3250 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3251 b4f0a316 blueswir1
#endif
3252 33417e70 bellard
}
3253 33417e70 bellard
3254 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3255 33417e70 bellard
    unassigned_mem_readb,
3256 e18231a3 blueswir1
    unassigned_mem_readw,
3257 e18231a3 blueswir1
    unassigned_mem_readl,
3258 33417e70 bellard
};
3259 33417e70 bellard
3260 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3261 33417e70 bellard
    unassigned_mem_writeb,
3262 e18231a3 blueswir1
    unassigned_mem_writew,
3263 e18231a3 blueswir1
    unassigned_mem_writel,
3264 33417e70 bellard
};
3265 33417e70 bellard
3266 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3267 0f459d16 pbrook
                                uint32_t val)
3268 9fa3e853 bellard
{
3269 3a7d929e bellard
    int dirty_flags;
3270 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3271 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3272 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3273 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3274 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3275 9fa3e853 bellard
#endif
3276 3a7d929e bellard
    }
3277 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3278 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3279 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3280 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3281 f23db169 bellard
       flushed */
3282 f23db169 bellard
    if (dirty_flags == 0xff)
3283 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3284 9fa3e853 bellard
}
3285 9fa3e853 bellard
3286 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3287 0f459d16 pbrook
                                uint32_t val)
3288 9fa3e853 bellard
{
3289 3a7d929e bellard
    int dirty_flags;
3290 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3291 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3292 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3293 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3294 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3295 9fa3e853 bellard
#endif
3296 3a7d929e bellard
    }
3297 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3298 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3299 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3300 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3301 f23db169 bellard
       flushed */
3302 f23db169 bellard
    if (dirty_flags == 0xff)
3303 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3304 9fa3e853 bellard
}
3305 9fa3e853 bellard
3306 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3307 0f459d16 pbrook
                                uint32_t val)
3308 9fa3e853 bellard
{
3309 3a7d929e bellard
    int dirty_flags;
3310 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3311 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3312 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3313 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3314 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3315 9fa3e853 bellard
#endif
3316 3a7d929e bellard
    }
3317 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3318 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3319 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3320 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3321 f23db169 bellard
       flushed */
3322 f23db169 bellard
    if (dirty_flags == 0xff)
3323 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3324 9fa3e853 bellard
}
3325 9fa3e853 bellard
3326 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3327 9fa3e853 bellard
    NULL, /* never used */
3328 9fa3e853 bellard
    NULL, /* never used */
3329 9fa3e853 bellard
    NULL, /* never used */
3330 9fa3e853 bellard
};
3331 9fa3e853 bellard
3332 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3333 1ccde1cb bellard
    notdirty_mem_writeb,
3334 1ccde1cb bellard
    notdirty_mem_writew,
3335 1ccde1cb bellard
    notdirty_mem_writel,
3336 1ccde1cb bellard
};
3337 1ccde1cb bellard
3338 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3339 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3340 0f459d16 pbrook
{
3341 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3342 06d55cc1 aliguori
    target_ulong pc, cs_base;
3343 06d55cc1 aliguori
    TranslationBlock *tb;
3344 0f459d16 pbrook
    target_ulong vaddr;
3345 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3346 06d55cc1 aliguori
    int cpu_flags;
3347 0f459d16 pbrook
3348 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3349 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3350 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3351 06d55cc1 aliguori
         * current instruction. */
3352 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3353 06d55cc1 aliguori
        return;
3354 06d55cc1 aliguori
    }
3355 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3356 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3357 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3358 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3359 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3360 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3361 6e140f28 aliguori
                env->watchpoint_hit = wp;
3362 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3363 6e140f28 aliguori
                if (!tb) {
3364 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3365 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3366 6e140f28 aliguori
                }
3367 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3368 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3369 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3370 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3371 6e140f28 aliguori
                } else {
3372 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3373 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3374 6e140f28 aliguori
                }
3375 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3376 06d55cc1 aliguori
            }
3377 6e140f28 aliguori
        } else {
3378 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3379 0f459d16 pbrook
        }
3380 0f459d16 pbrook
    }
3381 0f459d16 pbrook
}
3382 0f459d16 pbrook
3383 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3384 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3385 6658ffb8 pbrook
   phys routines.  */
3386 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3387 6658ffb8 pbrook
{
3388 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3389 6658ffb8 pbrook
    return ldub_phys(addr);
3390 6658ffb8 pbrook
}
3391 6658ffb8 pbrook
3392 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3393 6658ffb8 pbrook
{
3394 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3395 6658ffb8 pbrook
    return lduw_phys(addr);
3396 6658ffb8 pbrook
}
3397 6658ffb8 pbrook
3398 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3399 6658ffb8 pbrook
{
3400 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3401 6658ffb8 pbrook
    return ldl_phys(addr);
3402 6658ffb8 pbrook
}
3403 6658ffb8 pbrook
3404 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3405 6658ffb8 pbrook
                             uint32_t val)
3406 6658ffb8 pbrook
{
3407 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3408 6658ffb8 pbrook
    stb_phys(addr, val);
3409 6658ffb8 pbrook
}
3410 6658ffb8 pbrook
3411 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3412 6658ffb8 pbrook
                             uint32_t val)
3413 6658ffb8 pbrook
{
3414 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3415 6658ffb8 pbrook
    stw_phys(addr, val);
3416 6658ffb8 pbrook
}
3417 6658ffb8 pbrook
3418 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3419 6658ffb8 pbrook
                             uint32_t val)
3420 6658ffb8 pbrook
{
3421 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3422 6658ffb8 pbrook
    stl_phys(addr, val);
3423 6658ffb8 pbrook
}
3424 6658ffb8 pbrook
3425 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3426 6658ffb8 pbrook
    watch_mem_readb,
3427 6658ffb8 pbrook
    watch_mem_readw,
3428 6658ffb8 pbrook
    watch_mem_readl,
3429 6658ffb8 pbrook
};
3430 6658ffb8 pbrook
3431 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3432 6658ffb8 pbrook
    watch_mem_writeb,
3433 6658ffb8 pbrook
    watch_mem_writew,
3434 6658ffb8 pbrook
    watch_mem_writel,
3435 6658ffb8 pbrook
};
3436 6658ffb8 pbrook
3437 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3438 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3439 f6405247 Richard Henderson
                                        unsigned int len)
3440 db7b5426 blueswir1
{
3441 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3442 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3443 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3444 db7b5426 blueswir1
           mmio, len, addr, idx);
3445 db7b5426 blueswir1
#endif
3446 db7b5426 blueswir1
3447 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3448 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3449 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3450 db7b5426 blueswir1
}
3451 db7b5426 blueswir1
3452 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3453 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3454 db7b5426 blueswir1
{
3455 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3456 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3457 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3458 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3459 db7b5426 blueswir1
#endif
3460 f6405247 Richard Henderson
3461 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3462 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3463 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3464 db7b5426 blueswir1
}
3465 db7b5426 blueswir1
3466 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3467 db7b5426 blueswir1
{
3468 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3469 db7b5426 blueswir1
}
3470 db7b5426 blueswir1
3471 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3472 db7b5426 blueswir1
                            uint32_t value)
3473 db7b5426 blueswir1
{
3474 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3475 db7b5426 blueswir1
}
3476 db7b5426 blueswir1
3477 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3478 db7b5426 blueswir1
{
3479 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3480 db7b5426 blueswir1
}
3481 db7b5426 blueswir1
3482 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3483 db7b5426 blueswir1
                            uint32_t value)
3484 db7b5426 blueswir1
{
3485 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3486 db7b5426 blueswir1
}
3487 db7b5426 blueswir1
3488 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3489 db7b5426 blueswir1
{
3490 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3491 db7b5426 blueswir1
}
3492 db7b5426 blueswir1
3493 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3494 f6405247 Richard Henderson
                            uint32_t value)
3495 db7b5426 blueswir1
{
3496 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3497 db7b5426 blueswir1
}
3498 db7b5426 blueswir1
3499 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3500 db7b5426 blueswir1
    &subpage_readb,
3501 db7b5426 blueswir1
    &subpage_readw,
3502 db7b5426 blueswir1
    &subpage_readl,
3503 db7b5426 blueswir1
};
3504 db7b5426 blueswir1
3505 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3506 db7b5426 blueswir1
    &subpage_writeb,
3507 db7b5426 blueswir1
    &subpage_writew,
3508 db7b5426 blueswir1
    &subpage_writel,
3509 db7b5426 blueswir1
};
3510 db7b5426 blueswir1
3511 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3512 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3513 db7b5426 blueswir1
{
3514 db7b5426 blueswir1
    int idx, eidx;
3515 db7b5426 blueswir1
3516 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3517 db7b5426 blueswir1
        return -1;
3518 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3519 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3520 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3521 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3522 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3523 db7b5426 blueswir1
#endif
3524 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3525 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3526 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3528 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3529 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3530 db7b5426 blueswir1
    }
3531 db7b5426 blueswir1
3532 db7b5426 blueswir1
    return 0;
3533 db7b5426 blueswir1
}
3534 db7b5426 blueswir1
3535 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3536 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3537 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3538 db7b5426 blueswir1
{
3539 c227f099 Anthony Liguori
    subpage_t *mmio;
3540 db7b5426 blueswir1
    int subpage_memory;
3541 db7b5426 blueswir1
3542 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3543 1eec614b aliguori
3544 1eec614b aliguori
    mmio->base = base;
3545 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3546 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3547 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3548 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3549 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3550 db7b5426 blueswir1
#endif
3551 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3552 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3553 db7b5426 blueswir1
3554 db7b5426 blueswir1
    return mmio;
3555 db7b5426 blueswir1
}
3556 db7b5426 blueswir1
3557 88715657 aliguori
static int get_free_io_mem_idx(void)
3558 88715657 aliguori
{
3559 88715657 aliguori
    int i;
3560 88715657 aliguori
3561 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3562 88715657 aliguori
        if (!io_mem_used[i]) {
3563 88715657 aliguori
            io_mem_used[i] = 1;
3564 88715657 aliguori
            return i;
3565 88715657 aliguori
        }
3566 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3567 88715657 aliguori
    return -1;
3568 88715657 aliguori
}
3569 88715657 aliguori
3570 dd310534 Alexander Graf
/*
3571 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3572 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3573 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3574 dd310534 Alexander Graf
 *
3575 dd310534 Alexander Graf
 * CPU          Device           swap?
3576 dd310534 Alexander Graf
 *
3577 dd310534 Alexander Graf
 * little       little           no
3578 dd310534 Alexander Graf
 * little       big              yes
3579 dd310534 Alexander Graf
 * big          little           yes
3580 dd310534 Alexander Graf
 * big          big              no
3581 dd310534 Alexander Graf
 */
3582 dd310534 Alexander Graf
3583 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3584 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3585 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3586 dd310534 Alexander Graf
    void *opaque;
3587 dd310534 Alexander Graf
} SwapEndianContainer;
3588 dd310534 Alexander Graf
3589 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3590 dd310534 Alexander Graf
{
3591 dd310534 Alexander Graf
    uint32_t val;
3592 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3593 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3594 dd310534 Alexander Graf
    return val;
3595 dd310534 Alexander Graf
}
3596 dd310534 Alexander Graf
3597 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3598 dd310534 Alexander Graf
{
3599 dd310534 Alexander Graf
    uint32_t val;
3600 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3601 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3602 dd310534 Alexander Graf
    return val;
3603 dd310534 Alexander Graf
}
3604 dd310534 Alexander Graf
3605 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3606 dd310534 Alexander Graf
{
3607 dd310534 Alexander Graf
    uint32_t val;
3608 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3609 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3610 dd310534 Alexander Graf
    return val;
3611 dd310534 Alexander Graf
}
3612 dd310534 Alexander Graf
3613 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3614 dd310534 Alexander Graf
    swapendian_mem_readb,
3615 dd310534 Alexander Graf
    swapendian_mem_readw,
3616 dd310534 Alexander Graf
    swapendian_mem_readl
3617 dd310534 Alexander Graf
};
3618 dd310534 Alexander Graf
3619 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3620 dd310534 Alexander Graf
                                  uint32_t val)
3621 dd310534 Alexander Graf
{
3622 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3623 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3624 dd310534 Alexander Graf
}
3625 dd310534 Alexander Graf
3626 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3627 dd310534 Alexander Graf
                                  uint32_t val)
3628 dd310534 Alexander Graf
{
3629 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3630 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3631 dd310534 Alexander Graf
}
3632 dd310534 Alexander Graf
3633 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3634 dd310534 Alexander Graf
                                  uint32_t val)
3635 dd310534 Alexander Graf
{
3636 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3637 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3638 dd310534 Alexander Graf
}
3639 dd310534 Alexander Graf
3640 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3641 dd310534 Alexander Graf
    swapendian_mem_writeb,
3642 dd310534 Alexander Graf
    swapendian_mem_writew,
3643 dd310534 Alexander Graf
    swapendian_mem_writel
3644 dd310534 Alexander Graf
};
3645 dd310534 Alexander Graf
3646 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3647 dd310534 Alexander Graf
{
3648 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3649 dd310534 Alexander Graf
    int i;
3650 dd310534 Alexander Graf
3651 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3652 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3653 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3654 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3655 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3656 dd310534 Alexander Graf
3657 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3658 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3659 dd310534 Alexander Graf
    }
3660 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3661 dd310534 Alexander Graf
}
3662 dd310534 Alexander Graf
3663 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3664 dd310534 Alexander Graf
{
3665 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3666 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3667 dd310534 Alexander Graf
    }
3668 dd310534 Alexander Graf
}
3669 dd310534 Alexander Graf
3670 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3671 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3672 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3673 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3674 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3675 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3676 4254fab8 blueswir1
   returned if error. */
3677 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3678 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3679 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3680 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3681 33417e70 bellard
{
3682 3cab721d Richard Henderson
    int i;
3683 3cab721d Richard Henderson
3684 33417e70 bellard
    if (io_index <= 0) {
3685 88715657 aliguori
        io_index = get_free_io_mem_idx();
3686 88715657 aliguori
        if (io_index == -1)
3687 88715657 aliguori
            return io_index;
3688 33417e70 bellard
    } else {
3689 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3690 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3691 33417e70 bellard
            return -1;
3692 33417e70 bellard
    }
3693 b5ff1b31 bellard
3694 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3695 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3696 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3697 3cab721d Richard Henderson
    }
3698 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3699 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3700 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3701 3cab721d Richard Henderson
    }
3702 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3703 f6405247 Richard Henderson
3704 dd310534 Alexander Graf
    switch (endian) {
3705 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3706 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3707 dd310534 Alexander Graf
        swapendian_init(io_index);
3708 dd310534 Alexander Graf
#endif
3709 dd310534 Alexander Graf
        break;
3710 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3711 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3712 dd310534 Alexander Graf
        swapendian_init(io_index);
3713 dd310534 Alexander Graf
#endif
3714 dd310534 Alexander Graf
        break;
3715 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3716 dd310534 Alexander Graf
    default:
3717 dd310534 Alexander Graf
        break;
3718 dd310534 Alexander Graf
    }
3719 dd310534 Alexander Graf
3720 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3721 33417e70 bellard
}
3722 61382a50 bellard
3723 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3724 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3725 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3726 1eed09cb Avi Kivity
{
3727 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3728 1eed09cb Avi Kivity
}
3729 1eed09cb Avi Kivity
3730 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3731 88715657 aliguori
{
3732 88715657 aliguori
    int i;
3733 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3734 88715657 aliguori
3735 dd310534 Alexander Graf
    swapendian_del(io_index);
3736 dd310534 Alexander Graf
3737 88715657 aliguori
    for (i=0;i < 3; i++) {
3738 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3739 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3740 88715657 aliguori
    }
3741 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3742 88715657 aliguori
    io_mem_used[io_index] = 0;
3743 88715657 aliguori
}
3744 88715657 aliguori
3745 e9179ce1 Avi Kivity
static void io_mem_init(void)
3746 e9179ce1 Avi Kivity
{
3747 e9179ce1 Avi Kivity
    int i;
3748 e9179ce1 Avi Kivity
3749 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3750 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3751 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3752 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3753 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3754 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3755 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3756 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3757 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3758 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3759 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3760 e9179ce1 Avi Kivity
3761 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3762 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3763 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3764 e9179ce1 Avi Kivity
}
3765 e9179ce1 Avi Kivity
3766 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3767 e2eef170 pbrook
3768 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3769 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3770 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3771 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3772 13eb76e0 bellard
{
3773 13eb76e0 bellard
    int l, flags;
3774 13eb76e0 bellard
    target_ulong page;
3775 53a5960a pbrook
    void * p;
3776 13eb76e0 bellard
3777 13eb76e0 bellard
    while (len > 0) {
3778 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3779 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3780 13eb76e0 bellard
        if (l > len)
3781 13eb76e0 bellard
            l = len;
3782 13eb76e0 bellard
        flags = page_get_flags(page);
3783 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3784 a68fe89c Paul Brook
            return -1;
3785 13eb76e0 bellard
        if (is_write) {
3786 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3787 a68fe89c Paul Brook
                return -1;
3788 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3789 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3790 a68fe89c Paul Brook
                return -1;
3791 72fb7daa aurel32
            memcpy(p, buf, l);
3792 72fb7daa aurel32
            unlock_user(p, addr, l);
3793 13eb76e0 bellard
        } else {
3794 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3795 a68fe89c Paul Brook
                return -1;
3796 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3797 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3798 a68fe89c Paul Brook
                return -1;
3799 72fb7daa aurel32
            memcpy(buf, p, l);
3800 5b257578 aurel32
            unlock_user(p, addr, 0);
3801 13eb76e0 bellard
        }
3802 13eb76e0 bellard
        len -= l;
3803 13eb76e0 bellard
        buf += l;
3804 13eb76e0 bellard
        addr += l;
3805 13eb76e0 bellard
    }
3806 a68fe89c Paul Brook
    return 0;
3807 13eb76e0 bellard
}
3808 8df1cd07 bellard
3809 13eb76e0 bellard
#else
3810 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3811 13eb76e0 bellard
                            int len, int is_write)
3812 13eb76e0 bellard
{
3813 13eb76e0 bellard
    int l, io_index;
3814 13eb76e0 bellard
    uint8_t *ptr;
3815 13eb76e0 bellard
    uint32_t val;
3816 c227f099 Anthony Liguori
    target_phys_addr_t page;
3817 2e12669a bellard
    unsigned long pd;
3818 92e873b9 bellard
    PhysPageDesc *p;
3819 3b46e624 ths
3820 13eb76e0 bellard
    while (len > 0) {
3821 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3822 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3823 13eb76e0 bellard
        if (l > len)
3824 13eb76e0 bellard
            l = len;
3825 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3826 13eb76e0 bellard
        if (!p) {
3827 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3828 13eb76e0 bellard
        } else {
3829 13eb76e0 bellard
            pd = p->phys_offset;
3830 13eb76e0 bellard
        }
3831 3b46e624 ths
3832 13eb76e0 bellard
        if (is_write) {
3833 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3834 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3835 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3836 8da3ff18 pbrook
                if (p)
3837 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3838 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3839 6a00d601 bellard
                   potential bugs */
3840 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3841 1c213d19 bellard
                    /* 32 bit write access */
3842 c27004ec bellard
                    val = ldl_p(buf);
3843 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3844 13eb76e0 bellard
                    l = 4;
3845 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3846 1c213d19 bellard
                    /* 16 bit write access */
3847 c27004ec bellard
                    val = lduw_p(buf);
3848 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3849 13eb76e0 bellard
                    l = 2;
3850 13eb76e0 bellard
                } else {
3851 1c213d19 bellard
                    /* 8 bit write access */
3852 c27004ec bellard
                    val = ldub_p(buf);
3853 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3854 13eb76e0 bellard
                    l = 1;
3855 13eb76e0 bellard
                }
3856 13eb76e0 bellard
            } else {
3857 b448f2f3 bellard
                unsigned long addr1;
3858 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3859 13eb76e0 bellard
                /* RAM case */
3860 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3861 13eb76e0 bellard
                memcpy(ptr, buf, l);
3862 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3863 3a7d929e bellard
                    /* invalidate code */
3864 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3865 3a7d929e bellard
                    /* set dirty bit */
3866 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3867 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3868 3a7d929e bellard
                }
3869 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3870 13eb76e0 bellard
            }
3871 13eb76e0 bellard
        } else {
3872 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3873 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3874 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3875 13eb76e0 bellard
                /* I/O case */
3876 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3877 8da3ff18 pbrook
                if (p)
3878 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3879 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3880 13eb76e0 bellard
                    /* 32 bit read access */
3881 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3882 c27004ec bellard
                    stl_p(buf, val);
3883 13eb76e0 bellard
                    l = 4;
3884 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3885 13eb76e0 bellard
                    /* 16 bit read access */
3886 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3887 c27004ec bellard
                    stw_p(buf, val);
3888 13eb76e0 bellard
                    l = 2;
3889 13eb76e0 bellard
                } else {
3890 1c213d19 bellard
                    /* 8 bit read access */
3891 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3892 c27004ec bellard
                    stb_p(buf, val);
3893 13eb76e0 bellard
                    l = 1;
3894 13eb76e0 bellard
                }
3895 13eb76e0 bellard
            } else {
3896 13eb76e0 bellard
                /* RAM case */
3897 050a0ddf Anthony PERARD
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3898 050a0ddf Anthony PERARD
                memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3899 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3900 13eb76e0 bellard
            }
3901 13eb76e0 bellard
        }
3902 13eb76e0 bellard
        len -= l;
3903 13eb76e0 bellard
        buf += l;
3904 13eb76e0 bellard
        addr += l;
3905 13eb76e0 bellard
    }
3906 13eb76e0 bellard
}
3907 8df1cd07 bellard
3908 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3909 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3910 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3911 d0ecd2aa bellard
{
3912 d0ecd2aa bellard
    int l;
3913 d0ecd2aa bellard
    uint8_t *ptr;
3914 c227f099 Anthony Liguori
    target_phys_addr_t page;
3915 d0ecd2aa bellard
    unsigned long pd;
3916 d0ecd2aa bellard
    PhysPageDesc *p;
3917 3b46e624 ths
3918 d0ecd2aa bellard
    while (len > 0) {
3919 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3920 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3921 d0ecd2aa bellard
        if (l > len)
3922 d0ecd2aa bellard
            l = len;
3923 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3924 d0ecd2aa bellard
        if (!p) {
3925 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3926 d0ecd2aa bellard
        } else {
3927 d0ecd2aa bellard
            pd = p->phys_offset;
3928 d0ecd2aa bellard
        }
3929 3b46e624 ths
3930 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3931 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3932 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3933 d0ecd2aa bellard
            /* do nothing */
3934 d0ecd2aa bellard
        } else {
3935 d0ecd2aa bellard
            unsigned long addr1;
3936 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3937 d0ecd2aa bellard
            /* ROM/RAM case */
3938 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3939 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3940 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3941 d0ecd2aa bellard
        }
3942 d0ecd2aa bellard
        len -= l;
3943 d0ecd2aa bellard
        buf += l;
3944 d0ecd2aa bellard
        addr += l;
3945 d0ecd2aa bellard
    }
3946 d0ecd2aa bellard
}
3947 d0ecd2aa bellard
3948 6d16c2f8 aliguori
typedef struct {
3949 6d16c2f8 aliguori
    void *buffer;
3950 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3951 c227f099 Anthony Liguori
    target_phys_addr_t len;
3952 6d16c2f8 aliguori
} BounceBuffer;
3953 6d16c2f8 aliguori
3954 6d16c2f8 aliguori
static BounceBuffer bounce;
3955 6d16c2f8 aliguori
3956 ba223c29 aliguori
typedef struct MapClient {
3957 ba223c29 aliguori
    void *opaque;
3958 ba223c29 aliguori
    void (*callback)(void *opaque);
3959 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3960 ba223c29 aliguori
} MapClient;
3961 ba223c29 aliguori
3962 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3963 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3964 ba223c29 aliguori
3965 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3966 ba223c29 aliguori
{
3967 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3968 ba223c29 aliguori
3969 ba223c29 aliguori
    client->opaque = opaque;
3970 ba223c29 aliguori
    client->callback = callback;
3971 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3972 ba223c29 aliguori
    return client;
3973 ba223c29 aliguori
}
3974 ba223c29 aliguori
3975 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3976 ba223c29 aliguori
{
3977 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3978 ba223c29 aliguori
3979 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3980 34d5e948 Isaku Yamahata
    qemu_free(client);
3981 ba223c29 aliguori
}
3982 ba223c29 aliguori
3983 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3984 ba223c29 aliguori
{
3985 ba223c29 aliguori
    MapClient *client;
3986 ba223c29 aliguori
3987 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3988 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3989 ba223c29 aliguori
        client->callback(client->opaque);
3990 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3991 ba223c29 aliguori
    }
3992 ba223c29 aliguori
}
3993 ba223c29 aliguori
3994 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3995 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3996 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3997 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3998 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3999 ba223c29 aliguori
 * likely to succeed.
4000 6d16c2f8 aliguori
 */
4001 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
4002 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
4003 6d16c2f8 aliguori
                              int is_write)
4004 6d16c2f8 aliguori
{
4005 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
4006 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
4007 6d16c2f8 aliguori
    int l;
4008 6d16c2f8 aliguori
    uint8_t *ret = NULL;
4009 6d16c2f8 aliguori
    uint8_t *ptr;
4010 c227f099 Anthony Liguori
    target_phys_addr_t page;
4011 6d16c2f8 aliguori
    unsigned long pd;
4012 6d16c2f8 aliguori
    PhysPageDesc *p;
4013 6d16c2f8 aliguori
    unsigned long addr1;
4014 6d16c2f8 aliguori
4015 6d16c2f8 aliguori
    while (len > 0) {
4016 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
4017 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
4018 6d16c2f8 aliguori
        if (l > len)
4019 6d16c2f8 aliguori
            l = len;
4020 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
4021 6d16c2f8 aliguori
        if (!p) {
4022 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
4023 6d16c2f8 aliguori
        } else {
4024 6d16c2f8 aliguori
            pd = p->phys_offset;
4025 6d16c2f8 aliguori
        }
4026 6d16c2f8 aliguori
4027 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4028 6d16c2f8 aliguori
            if (done || bounce.buffer) {
4029 6d16c2f8 aliguori
                break;
4030 6d16c2f8 aliguori
            }
4031 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4032 6d16c2f8 aliguori
            bounce.addr = addr;
4033 6d16c2f8 aliguori
            bounce.len = l;
4034 6d16c2f8 aliguori
            if (!is_write) {
4035 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
4036 6d16c2f8 aliguori
            }
4037 6d16c2f8 aliguori
            ptr = bounce.buffer;
4038 6d16c2f8 aliguori
        } else {
4039 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4040 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
4041 6d16c2f8 aliguori
        }
4042 6d16c2f8 aliguori
        if (!done) {
4043 6d16c2f8 aliguori
            ret = ptr;
4044 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
4045 6d16c2f8 aliguori
            break;
4046 6d16c2f8 aliguori
        }
4047 6d16c2f8 aliguori
4048 6d16c2f8 aliguori
        len -= l;
4049 6d16c2f8 aliguori
        addr += l;
4050 6d16c2f8 aliguori
        done += l;
4051 6d16c2f8 aliguori
    }
4052 6d16c2f8 aliguori
    *plen = done;
4053 6d16c2f8 aliguori
    return ret;
4054 6d16c2f8 aliguori
}
4055 6d16c2f8 aliguori
4056 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4057 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
4058 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
4059 6d16c2f8 aliguori
 */
4060 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4061 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
4062 6d16c2f8 aliguori
{
4063 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
4064 6d16c2f8 aliguori
        if (is_write) {
4065 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4066 6d16c2f8 aliguori
            while (access_len) {
4067 6d16c2f8 aliguori
                unsigned l;
4068 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
4069 6d16c2f8 aliguori
                if (l > access_len)
4070 6d16c2f8 aliguori
                    l = access_len;
4071 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
4072 6d16c2f8 aliguori
                    /* invalidate code */
4073 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4074 6d16c2f8 aliguori
                    /* set dirty bit */
4075 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
4076 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
4077 6d16c2f8 aliguori
                }
4078 6d16c2f8 aliguori
                addr1 += l;
4079 6d16c2f8 aliguori
                access_len -= l;
4080 6d16c2f8 aliguori
            }
4081 6d16c2f8 aliguori
        }
4082 050a0ddf Anthony PERARD
        if (xen_mapcache_enabled()) {
4083 050a0ddf Anthony PERARD
            uint8_t *buffer1 = buffer;
4084 050a0ddf Anthony PERARD
            uint8_t *end_buffer = buffer + len;
4085 050a0ddf Anthony PERARD
4086 050a0ddf Anthony PERARD
            while (buffer1 < end_buffer) {
4087 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(buffer1);
4088 050a0ddf Anthony PERARD
                buffer1 += TARGET_PAGE_SIZE;
4089 050a0ddf Anthony PERARD
            }
4090 050a0ddf Anthony PERARD
        }
4091 6d16c2f8 aliguori
        return;
4092 6d16c2f8 aliguori
    }
4093 6d16c2f8 aliguori
    if (is_write) {
4094 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4095 6d16c2f8 aliguori
    }
4096 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
4097 6d16c2f8 aliguori
    bounce.buffer = NULL;
4098 ba223c29 aliguori
    cpu_notify_map_clients();
4099 6d16c2f8 aliguori
}
4100 d0ecd2aa bellard
4101 8df1cd07 bellard
/* warning: addr must be aligned */
4102 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
4103 8df1cd07 bellard
{
4104 8df1cd07 bellard
    int io_index;
4105 8df1cd07 bellard
    uint8_t *ptr;
4106 8df1cd07 bellard
    uint32_t val;
4107 8df1cd07 bellard
    unsigned long pd;
4108 8df1cd07 bellard
    PhysPageDesc *p;
4109 8df1cd07 bellard
4110 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4111 8df1cd07 bellard
    if (!p) {
4112 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4113 8df1cd07 bellard
    } else {
4114 8df1cd07 bellard
        pd = p->phys_offset;
4115 8df1cd07 bellard
    }
4116 3b46e624 ths
4117 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4118 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4119 8df1cd07 bellard
        /* I/O case */
4120 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4121 8da3ff18 pbrook
        if (p)
4122 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4123 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4124 8df1cd07 bellard
    } else {
4125 8df1cd07 bellard
        /* RAM case */
4126 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4127 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4128 8df1cd07 bellard
        val = ldl_p(ptr);
4129 8df1cd07 bellard
    }
4130 8df1cd07 bellard
    return val;
4131 8df1cd07 bellard
}
4132 8df1cd07 bellard
4133 84b7b8e7 bellard
/* warning: addr must be aligned */
4134 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
4135 84b7b8e7 bellard
{
4136 84b7b8e7 bellard
    int io_index;
4137 84b7b8e7 bellard
    uint8_t *ptr;
4138 84b7b8e7 bellard
    uint64_t val;
4139 84b7b8e7 bellard
    unsigned long pd;
4140 84b7b8e7 bellard
    PhysPageDesc *p;
4141 84b7b8e7 bellard
4142 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4143 84b7b8e7 bellard
    if (!p) {
4144 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4145 84b7b8e7 bellard
    } else {
4146 84b7b8e7 bellard
        pd = p->phys_offset;
4147 84b7b8e7 bellard
    }
4148 3b46e624 ths
4149 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4150 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4151 84b7b8e7 bellard
        /* I/O case */
4152 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4153 8da3ff18 pbrook
        if (p)
4154 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4155 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4156 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4157 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4158 84b7b8e7 bellard
#else
4159 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4160 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4161 84b7b8e7 bellard
#endif
4162 84b7b8e7 bellard
    } else {
4163 84b7b8e7 bellard
        /* RAM case */
4164 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4165 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4166 84b7b8e7 bellard
        val = ldq_p(ptr);
4167 84b7b8e7 bellard
    }
4168 84b7b8e7 bellard
    return val;
4169 84b7b8e7 bellard
}
4170 84b7b8e7 bellard
4171 aab33094 bellard
/* XXX: optimize */
4172 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4173 aab33094 bellard
{
4174 aab33094 bellard
    uint8_t val;
4175 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4176 aab33094 bellard
    return val;
4177 aab33094 bellard
}
4178 aab33094 bellard
4179 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4180 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
4181 aab33094 bellard
{
4182 733f0b02 Michael S. Tsirkin
    int io_index;
4183 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4184 733f0b02 Michael S. Tsirkin
    uint64_t val;
4185 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4186 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4187 733f0b02 Michael S. Tsirkin
4188 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4189 733f0b02 Michael S. Tsirkin
    if (!p) {
4190 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4191 733f0b02 Michael S. Tsirkin
    } else {
4192 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4193 733f0b02 Michael S. Tsirkin
    }
4194 733f0b02 Michael S. Tsirkin
4195 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4196 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4197 733f0b02 Michael S. Tsirkin
        /* I/O case */
4198 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4199 733f0b02 Michael S. Tsirkin
        if (p)
4200 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4201 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4202 733f0b02 Michael S. Tsirkin
    } else {
4203 733f0b02 Michael S. Tsirkin
        /* RAM case */
4204 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4205 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4206 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
4207 733f0b02 Michael S. Tsirkin
    }
4208 733f0b02 Michael S. Tsirkin
    return val;
4209 aab33094 bellard
}
4210 aab33094 bellard
4211 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4212 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4213 8df1cd07 bellard
   bits are used to track modified PTEs */
4214 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4215 8df1cd07 bellard
{
4216 8df1cd07 bellard
    int io_index;
4217 8df1cd07 bellard
    uint8_t *ptr;
4218 8df1cd07 bellard
    unsigned long pd;
4219 8df1cd07 bellard
    PhysPageDesc *p;
4220 8df1cd07 bellard
4221 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4222 8df1cd07 bellard
    if (!p) {
4223 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4224 8df1cd07 bellard
    } else {
4225 8df1cd07 bellard
        pd = p->phys_offset;
4226 8df1cd07 bellard
    }
4227 3b46e624 ths
4228 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4229 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4230 8da3ff18 pbrook
        if (p)
4231 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4232 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4233 8df1cd07 bellard
    } else {
4234 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4235 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4236 8df1cd07 bellard
        stl_p(ptr, val);
4237 74576198 aliguori
4238 74576198 aliguori
        if (unlikely(in_migration)) {
4239 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4240 74576198 aliguori
                /* invalidate code */
4241 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4242 74576198 aliguori
                /* set dirty bit */
4243 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4244 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4245 74576198 aliguori
            }
4246 74576198 aliguori
        }
4247 8df1cd07 bellard
    }
4248 8df1cd07 bellard
}
4249 8df1cd07 bellard
4250 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4251 bc98a7ef j_mayer
{
4252 bc98a7ef j_mayer
    int io_index;
4253 bc98a7ef j_mayer
    uint8_t *ptr;
4254 bc98a7ef j_mayer
    unsigned long pd;
4255 bc98a7ef j_mayer
    PhysPageDesc *p;
4256 bc98a7ef j_mayer
4257 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4258 bc98a7ef j_mayer
    if (!p) {
4259 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4260 bc98a7ef j_mayer
    } else {
4261 bc98a7ef j_mayer
        pd = p->phys_offset;
4262 bc98a7ef j_mayer
    }
4263 3b46e624 ths
4264 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4265 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4266 8da3ff18 pbrook
        if (p)
4267 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4268 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4269 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4270 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4271 bc98a7ef j_mayer
#else
4272 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4273 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4274 bc98a7ef j_mayer
#endif
4275 bc98a7ef j_mayer
    } else {
4276 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4277 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4278 bc98a7ef j_mayer
        stq_p(ptr, val);
4279 bc98a7ef j_mayer
    }
4280 bc98a7ef j_mayer
}
4281 bc98a7ef j_mayer
4282 8df1cd07 bellard
/* warning: addr must be aligned */
4283 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4284 8df1cd07 bellard
{
4285 8df1cd07 bellard
    int io_index;
4286 8df1cd07 bellard
    uint8_t *ptr;
4287 8df1cd07 bellard
    unsigned long pd;
4288 8df1cd07 bellard
    PhysPageDesc *p;
4289 8df1cd07 bellard
4290 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4291 8df1cd07 bellard
    if (!p) {
4292 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4293 8df1cd07 bellard
    } else {
4294 8df1cd07 bellard
        pd = p->phys_offset;
4295 8df1cd07 bellard
    }
4296 3b46e624 ths
4297 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4298 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4299 8da3ff18 pbrook
        if (p)
4300 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4301 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4302 8df1cd07 bellard
    } else {
4303 8df1cd07 bellard
        unsigned long addr1;
4304 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4305 8df1cd07 bellard
        /* RAM case */
4306 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4307 8df1cd07 bellard
        stl_p(ptr, val);
4308 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4309 3a7d929e bellard
            /* invalidate code */
4310 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4311 3a7d929e bellard
            /* set dirty bit */
4312 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4313 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4314 3a7d929e bellard
        }
4315 8df1cd07 bellard
    }
4316 8df1cd07 bellard
}
4317 8df1cd07 bellard
4318 aab33094 bellard
/* XXX: optimize */
4319 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4320 aab33094 bellard
{
4321 aab33094 bellard
    uint8_t v = val;
4322 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4323 aab33094 bellard
}
4324 aab33094 bellard
4325 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4326 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4327 aab33094 bellard
{
4328 733f0b02 Michael S. Tsirkin
    int io_index;
4329 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4330 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4331 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4332 733f0b02 Michael S. Tsirkin
4333 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4334 733f0b02 Michael S. Tsirkin
    if (!p) {
4335 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4336 733f0b02 Michael S. Tsirkin
    } else {
4337 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4338 733f0b02 Michael S. Tsirkin
    }
4339 733f0b02 Michael S. Tsirkin
4340 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4341 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4342 733f0b02 Michael S. Tsirkin
        if (p)
4343 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4344 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4345 733f0b02 Michael S. Tsirkin
    } else {
4346 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4347 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4348 733f0b02 Michael S. Tsirkin
        /* RAM case */
4349 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4350 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4351 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4352 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4353 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4354 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4355 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4356 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4357 733f0b02 Michael S. Tsirkin
        }
4358 733f0b02 Michael S. Tsirkin
    }
4359 aab33094 bellard
}
4360 aab33094 bellard
4361 aab33094 bellard
/* XXX: optimize */
4362 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4363 aab33094 bellard
{
4364 aab33094 bellard
    val = tswap64(val);
4365 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4366 aab33094 bellard
}
4367 aab33094 bellard
4368 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4369 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4370 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4371 13eb76e0 bellard
{
4372 13eb76e0 bellard
    int l;
4373 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4374 9b3c35e0 j_mayer
    target_ulong page;
4375 13eb76e0 bellard
4376 13eb76e0 bellard
    while (len > 0) {
4377 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4378 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4379 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4380 13eb76e0 bellard
        if (phys_addr == -1)
4381 13eb76e0 bellard
            return -1;
4382 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4383 13eb76e0 bellard
        if (l > len)
4384 13eb76e0 bellard
            l = len;
4385 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4386 5e2972fd aliguori
        if (is_write)
4387 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4388 5e2972fd aliguori
        else
4389 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4390 13eb76e0 bellard
        len -= l;
4391 13eb76e0 bellard
        buf += l;
4392 13eb76e0 bellard
        addr += l;
4393 13eb76e0 bellard
    }
4394 13eb76e0 bellard
    return 0;
4395 13eb76e0 bellard
}
4396 a68fe89c Paul Brook
#endif
4397 13eb76e0 bellard
4398 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4399 2e70f6ef pbrook
   must be at the end of the TB */
4400 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4401 2e70f6ef pbrook
{
4402 2e70f6ef pbrook
    TranslationBlock *tb;
4403 2e70f6ef pbrook
    uint32_t n, cflags;
4404 2e70f6ef pbrook
    target_ulong pc, cs_base;
4405 2e70f6ef pbrook
    uint64_t flags;
4406 2e70f6ef pbrook
4407 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4408 2e70f6ef pbrook
    if (!tb) {
4409 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4410 2e70f6ef pbrook
                  retaddr);
4411 2e70f6ef pbrook
    }
4412 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4413 618ba8e6 Stefan Weil
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4414 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4415 bf20dc07 ths
       occurred.  */
4416 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4417 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4418 2e70f6ef pbrook
    n++;
4419 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4420 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4421 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4422 2e70f6ef pbrook
       branch.  */
4423 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4424 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4425 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4426 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4427 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4428 2e70f6ef pbrook
    }
4429 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4430 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4431 2e70f6ef pbrook
            && n > 1) {
4432 2e70f6ef pbrook
        env->pc -= 2;
4433 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4434 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4435 2e70f6ef pbrook
    }
4436 2e70f6ef pbrook
#endif
4437 2e70f6ef pbrook
    /* This should never happen.  */
4438 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4439 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4440 2e70f6ef pbrook
4441 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4442 2e70f6ef pbrook
    pc = tb->pc;
4443 2e70f6ef pbrook
    cs_base = tb->cs_base;
4444 2e70f6ef pbrook
    flags = tb->flags;
4445 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4446 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4447 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4448 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4449 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4450 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4451 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4452 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4453 2e70f6ef pbrook
       second new TB.  */
4454 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4455 2e70f6ef pbrook
}
4456 2e70f6ef pbrook
4457 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4458 b3755a91 Paul Brook
4459 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4460 e3db7226 bellard
{
4461 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4462 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4463 e3db7226 bellard
    TranslationBlock *tb;
4464 3b46e624 ths
4465 e3db7226 bellard
    target_code_size = 0;
4466 e3db7226 bellard
    max_target_code_size = 0;
4467 e3db7226 bellard
    cross_page = 0;
4468 e3db7226 bellard
    direct_jmp_count = 0;
4469 e3db7226 bellard
    direct_jmp2_count = 0;
4470 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4471 e3db7226 bellard
        tb = &tbs[i];
4472 e3db7226 bellard
        target_code_size += tb->size;
4473 e3db7226 bellard
        if (tb->size > max_target_code_size)
4474 e3db7226 bellard
            max_target_code_size = tb->size;
4475 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4476 e3db7226 bellard
            cross_page++;
4477 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4478 e3db7226 bellard
            direct_jmp_count++;
4479 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4480 e3db7226 bellard
                direct_jmp2_count++;
4481 e3db7226 bellard
            }
4482 e3db7226 bellard
        }
4483 e3db7226 bellard
    }
4484 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4485 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4486 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4487 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4488 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4489 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4490 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4491 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4492 e3db7226 bellard
                max_target_code_size);
4493 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4494 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4495 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4496 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4497 5fafdf24 ths
            cross_page,
4498 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4499 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4500 5fafdf24 ths
                direct_jmp_count,
4501 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4502 e3db7226 bellard
                direct_jmp2_count,
4503 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4504 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4505 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4506 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4507 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4508 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4509 e3db7226 bellard
}
4510 e3db7226 bellard
4511 61382a50 bellard
#define MMUSUFFIX _cmmu
4512 61382a50 bellard
#define GETPC() NULL
4513 61382a50 bellard
#define env cpu_single_env
4514 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4515 61382a50 bellard
4516 61382a50 bellard
#define SHIFT 0
4517 61382a50 bellard
#include "softmmu_template.h"
4518 61382a50 bellard
4519 61382a50 bellard
#define SHIFT 1
4520 61382a50 bellard
#include "softmmu_template.h"
4521 61382a50 bellard
4522 61382a50 bellard
#define SHIFT 2
4523 61382a50 bellard
#include "softmmu_template.h"
4524 61382a50 bellard
4525 61382a50 bellard
#define SHIFT 3
4526 61382a50 bellard
#include "softmmu_template.h"
4527 61382a50 bellard
4528 61382a50 bellard
#undef env
4529 61382a50 bellard
4530 61382a50 bellard
#endif