Statistics
| Branch: | Revision:

root / exec.c @ 1e78bcc1

History | View | Annotate | Download (139 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 74576198 aliguori
#include "osdep.h"
33 7ba1e619 aliguori
#include "kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
37 53a5960a pbrook
#include <qemu.h>
38 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
39 f01576f1 Juergen Lock
#include <sys/param.h>
40 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
41 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
42 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
43 f01576f1 Juergen Lock
#include <sys/time.h>
44 f01576f1 Juergen Lock
#include <sys/proc.h>
45 f01576f1 Juergen Lock
#include <machine/profile.h>
46 f01576f1 Juergen Lock
#define _KERNEL
47 f01576f1 Juergen Lock
#include <sys/user.h>
48 f01576f1 Juergen Lock
#undef _KERNEL
49 f01576f1 Juergen Lock
#undef sigqueue
50 f01576f1 Juergen Lock
#include <libutil.h>
51 f01576f1 Juergen Lock
#endif
52 f01576f1 Juergen Lock
#endif
53 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
54 432d268c Jun Nakajima
#include "xen-mapcache.h"
55 6506e4f9 Stefano Stabellini
#include "trace.h"
56 53a5960a pbrook
#endif
57 54936004 bellard
58 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
59 66e85a21 bellard
//#define DEBUG_FLUSH
60 9fa3e853 bellard
//#define DEBUG_TLB
61 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
62 fd6ce8f6 bellard
63 fd6ce8f6 bellard
/* make various TB consistency checks */
64 5fafdf24 ths
//#define DEBUG_TB_CHECK
65 5fafdf24 ths
//#define DEBUG_TLB_CHECK
66 fd6ce8f6 bellard
67 1196be37 ths
//#define DEBUG_IOPORT
68 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
69 1196be37 ths
70 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
71 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
72 99773bd4 pbrook
#undef DEBUG_TB_CHECK
73 99773bd4 pbrook
#endif
74 99773bd4 pbrook
75 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
76 9fa3e853 bellard
77 bdaf78e0 blueswir1
static TranslationBlock *tbs;
78 24ab68ac Stefan Weil
static int code_gen_max_blocks;
79 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
80 bdaf78e0 blueswir1
static int nb_tbs;
81 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
82 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
83 fd6ce8f6 bellard
84 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
85 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
87 d03d860b blueswir1
 section close to code segment. */
88 d03d860b blueswir1
#define code_gen_section                                \
89 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
90 d03d860b blueswir1
    __attribute__((aligned (32)))
91 f8e2af11 Stefan Weil
#elif defined(_WIN32)
92 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
93 f8e2af11 Stefan Weil
#define code_gen_section                                \
94 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
95 d03d860b blueswir1
#else
96 d03d860b blueswir1
#define code_gen_section                                \
97 d03d860b blueswir1
    __attribute__((aligned (32)))
98 d03d860b blueswir1
#endif
99 d03d860b blueswir1
100 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
101 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
103 26a5f13b bellard
/* threshold to flush the translated code buffer */
104 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
105 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
106 fd6ce8f6 bellard
107 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
108 9fa3e853 bellard
int phys_ram_fd;
109 74576198 aliguori
static int in_migration;
110 94a6b54f pbrook
111 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
112 e2eef170 pbrook
#endif
113 9fa3e853 bellard
114 6a00d601 bellard
CPUState *first_cpu;
115 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
116 6a00d601 bellard
   cpu_exec() */
117 5fafdf24 ths
CPUState *cpu_single_env;
118 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
119 bf20dc07 ths
   1 = Precise instruction counting.
120 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
121 2e70f6ef pbrook
int use_icount = 0;
122 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
123 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
124 2e70f6ef pbrook
int64_t qemu_icount;
125 6a00d601 bellard
126 54936004 bellard
typedef struct PageDesc {
127 92e873b9 bellard
    /* list of TBs intersecting this ram page */
128 fd6ce8f6 bellard
    TranslationBlock *first_tb;
129 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
130 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
131 9fa3e853 bellard
    unsigned int code_write_count;
132 9fa3e853 bellard
    uint8_t *code_bitmap;
133 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
134 9fa3e853 bellard
    unsigned long flags;
135 9fa3e853 bellard
#endif
136 54936004 bellard
} PageDesc;
137 54936004 bellard
138 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
139 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
140 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
141 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
143 41c1b1c9 Paul Brook
#else
144 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
145 41c1b1c9 Paul Brook
#endif
146 bedb69ea j_mayer
#else
147 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
148 bedb69ea j_mayer
#endif
149 54936004 bellard
150 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
151 5cd2c5b6 Richard Henderson
#define L2_BITS 10
152 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
153 54936004 bellard
154 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
155 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
158 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159 5cd2c5b6 Richard Henderson
160 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
161 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
163 5cd2c5b6 Richard Henderson
#else
164 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
165 5cd2c5b6 Richard Henderson
#endif
166 5cd2c5b6 Richard Henderson
167 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
169 5cd2c5b6 Richard Henderson
#else
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
171 5cd2c5b6 Richard Henderson
#endif
172 5cd2c5b6 Richard Henderson
173 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
174 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 83fb7adf bellard
unsigned long qemu_real_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_bits;
181 83fb7adf bellard
unsigned long qemu_host_page_size;
182 83fb7adf bellard
unsigned long qemu_host_page_mask;
183 54936004 bellard
184 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
185 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
186 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
187 54936004 bellard
188 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
189 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
190 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
191 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
192 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
193 41c1b1c9 Paul Brook
} PhysPageDesc;
194 41c1b1c9 Paul Brook
195 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
196 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
197 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
198 6d9a1304 Paul Brook
199 e2eef170 pbrook
static void io_mem_init(void);
200 e2eef170 pbrook
201 33417e70 bellard
/* io memory support */
202 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
204 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
205 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
206 6658ffb8 pbrook
static int io_mem_watch;
207 6658ffb8 pbrook
#endif
208 33417e70 bellard
209 34865134 bellard
/* log support */
210 1e8b27ca Juha Riihimรคki
#ifdef WIN32
211 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
212 1e8b27ca Juha Riihimรคki
#else
213 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
214 1e8b27ca Juha Riihimรคki
#endif
215 34865134 bellard
FILE *logfile;
216 34865134 bellard
int loglevel;
217 e735b91c pbrook
static int log_append = 0;
218 34865134 bellard
219 e3db7226 bellard
/* statistics */
220 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
221 e3db7226 bellard
static int tlb_flush_count;
222 b3755a91 Paul Brook
#endif
223 e3db7226 bellard
static int tb_flush_count;
224 e3db7226 bellard
static int tb_phys_invalidate_count;
225 e3db7226 bellard
226 7cb69cae bellard
#ifdef _WIN32
227 7cb69cae bellard
static void map_exec(void *addr, long size)
228 7cb69cae bellard
{
229 7cb69cae bellard
    DWORD old_protect;
230 7cb69cae bellard
    VirtualProtect(addr, size,
231 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
232 7cb69cae bellard
    
233 7cb69cae bellard
}
234 7cb69cae bellard
#else
235 7cb69cae bellard
static void map_exec(void *addr, long size)
236 7cb69cae bellard
{
237 4369415f bellard
    unsigned long start, end, page_size;
238 7cb69cae bellard
    
239 4369415f bellard
    page_size = getpagesize();
240 7cb69cae bellard
    start = (unsigned long)addr;
241 4369415f bellard
    start &= ~(page_size - 1);
242 7cb69cae bellard
    
243 7cb69cae bellard
    end = (unsigned long)addr + size;
244 4369415f bellard
    end += page_size - 1;
245 4369415f bellard
    end &= ~(page_size - 1);
246 7cb69cae bellard
    
247 7cb69cae bellard
    mprotect((void *)start, end - start,
248 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
249 7cb69cae bellard
}
250 7cb69cae bellard
#endif
251 7cb69cae bellard
252 b346ff46 bellard
static void page_init(void)
253 54936004 bellard
{
254 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
255 54936004 bellard
       TARGET_PAGE_SIZE */
256 c2b48b69 aliguori
#ifdef _WIN32
257 c2b48b69 aliguori
    {
258 c2b48b69 aliguori
        SYSTEM_INFO system_info;
259 c2b48b69 aliguori
260 c2b48b69 aliguori
        GetSystemInfo(&system_info);
261 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
262 c2b48b69 aliguori
    }
263 c2b48b69 aliguori
#else
264 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
265 c2b48b69 aliguori
#endif
266 83fb7adf bellard
    if (qemu_host_page_size == 0)
267 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
268 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
270 83fb7adf bellard
    qemu_host_page_bits = 0;
271 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 83fb7adf bellard
        qemu_host_page_bits++;
273 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
274 50a9569b balrog
275 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
276 50a9569b balrog
    {
277 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
278 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
279 f01576f1 Juergen Lock
        int i, cnt;
280 f01576f1 Juergen Lock
281 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
282 f01576f1 Juergen Lock
        if (freep) {
283 f01576f1 Juergen Lock
            mmap_lock();
284 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
285 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
286 f01576f1 Juergen Lock
287 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
288 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
289 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
290 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291 f01576f1 Juergen Lock
292 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
293 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
294 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
295 f01576f1 Juergen Lock
                    } else {
296 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 f01576f1 Juergen Lock
                        endaddr = ~0ul;
298 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 f01576f1 Juergen Lock
#endif
300 f01576f1 Juergen Lock
                    }
301 f01576f1 Juergen Lock
                }
302 f01576f1 Juergen Lock
            }
303 f01576f1 Juergen Lock
            free(freep);
304 f01576f1 Juergen Lock
            mmap_unlock();
305 f01576f1 Juergen Lock
        }
306 f01576f1 Juergen Lock
#else
307 50a9569b balrog
        FILE *f;
308 50a9569b balrog
309 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
310 5cd2c5b6 Richard Henderson
311 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
312 50a9569b balrog
        if (f) {
313 5cd2c5b6 Richard Henderson
            mmap_lock();
314 5cd2c5b6 Richard Henderson
315 50a9569b balrog
            do {
316 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
317 5cd2c5b6 Richard Henderson
                int n;
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320 5cd2c5b6 Richard Henderson
321 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
322 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323 5cd2c5b6 Richard Henderson
324 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
325 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
326 5cd2c5b6 Richard Henderson
                    } else {
327 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
328 5cd2c5b6 Richard Henderson
                    }
329 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 50a9569b balrog
                }
331 50a9569b balrog
            } while (!feof(f));
332 5cd2c5b6 Richard Henderson
333 50a9569b balrog
            fclose(f);
334 5cd2c5b6 Richard Henderson
            mmap_unlock();
335 50a9569b balrog
        }
336 f01576f1 Juergen Lock
#endif
337 50a9569b balrog
    }
338 50a9569b balrog
#endif
339 54936004 bellard
}
340 54936004 bellard
341 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
342 54936004 bellard
{
343 41c1b1c9 Paul Brook
    PageDesc *pd;
344 41c1b1c9 Paul Brook
    void **lp;
345 41c1b1c9 Paul Brook
    int i;
346 41c1b1c9 Paul Brook
347 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
348 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
349 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
350 5cd2c5b6 Richard Henderson
    do {                                                \
351 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
352 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
353 5cd2c5b6 Richard Henderson
    } while (0)
354 5cd2c5b6 Richard Henderson
#else
355 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
356 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
357 17e2377a pbrook
#endif
358 434929bf aliguori
359 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
360 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361 5cd2c5b6 Richard Henderson
362 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
363 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 5cd2c5b6 Richard Henderson
        void **p = *lp;
365 5cd2c5b6 Richard Henderson
366 5cd2c5b6 Richard Henderson
        if (p == NULL) {
367 5cd2c5b6 Richard Henderson
            if (!alloc) {
368 5cd2c5b6 Richard Henderson
                return NULL;
369 5cd2c5b6 Richard Henderson
            }
370 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
371 5cd2c5b6 Richard Henderson
            *lp = p;
372 17e2377a pbrook
        }
373 5cd2c5b6 Richard Henderson
374 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 5cd2c5b6 Richard Henderson
    }
376 5cd2c5b6 Richard Henderson
377 5cd2c5b6 Richard Henderson
    pd = *lp;
378 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
379 5cd2c5b6 Richard Henderson
        if (!alloc) {
380 5cd2c5b6 Richard Henderson
            return NULL;
381 5cd2c5b6 Richard Henderson
        }
382 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 5cd2c5b6 Richard Henderson
        *lp = pd;
384 54936004 bellard
    }
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
#undef ALLOC
387 5cd2c5b6 Richard Henderson
388 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
389 54936004 bellard
}
390 54936004 bellard
391 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
392 54936004 bellard
{
393 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
394 fd6ce8f6 bellard
}
395 fd6ce8f6 bellard
396 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
397 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
398 92e873b9 bellard
{
399 e3f4e2a4 pbrook
    PhysPageDesc *pd;
400 5cd2c5b6 Richard Henderson
    void **lp;
401 5cd2c5b6 Richard Henderson
    int i;
402 92e873b9 bellard
403 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
404 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
405 108c49b8 bellard
406 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
407 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 5cd2c5b6 Richard Henderson
        void **p = *lp;
409 5cd2c5b6 Richard Henderson
        if (p == NULL) {
410 5cd2c5b6 Richard Henderson
            if (!alloc) {
411 5cd2c5b6 Richard Henderson
                return NULL;
412 5cd2c5b6 Richard Henderson
            }
413 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 5cd2c5b6 Richard Henderson
        }
415 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
416 108c49b8 bellard
    }
417 5cd2c5b6 Richard Henderson
418 e3f4e2a4 pbrook
    pd = *lp;
419 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
420 e3f4e2a4 pbrook
        int i;
421 5cd2c5b6 Richard Henderson
422 5cd2c5b6 Richard Henderson
        if (!alloc) {
423 108c49b8 bellard
            return NULL;
424 5cd2c5b6 Richard Henderson
        }
425 5cd2c5b6 Richard Henderson
426 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427 5cd2c5b6 Richard Henderson
428 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
429 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
431 67c4d23c pbrook
        }
432 92e873b9 bellard
    }
433 5cd2c5b6 Richard Henderson
434 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
435 92e873b9 bellard
}
436 92e873b9 bellard
437 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
438 92e873b9 bellard
{
439 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
440 92e873b9 bellard
}
441 92e873b9 bellard
442 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
443 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
444 3a7d929e bellard
                                    target_ulong vaddr);
445 c8a706fe pbrook
#define mmap_lock() do { } while(0)
446 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
447 9fa3e853 bellard
#endif
448 fd6ce8f6 bellard
449 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450 4369415f bellard
451 4369415f bellard
#if defined(CONFIG_USER_ONLY)
452 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
453 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
454 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
455 4369415f bellard
#endif
456 4369415f bellard
457 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
458 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
460 4369415f bellard
#endif
461 4369415f bellard
462 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
463 26a5f13b bellard
{
464 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
465 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
466 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
468 4369415f bellard
#else
469 26a5f13b bellard
    code_gen_buffer_size = tb_size;
470 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
471 4369415f bellard
#if defined(CONFIG_USER_ONLY)
472 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
473 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474 4369415f bellard
#else
475 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
476 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
477 4369415f bellard
#endif
478 26a5f13b bellard
    }
479 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
482 26a5f13b bellard
       the host cpu and OS */
483 26a5f13b bellard
#if defined(__linux__) 
484 26a5f13b bellard
    {
485 26a5f13b bellard
        int flags;
486 141ac468 blueswir1
        void *start = NULL;
487 141ac468 blueswir1
488 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
489 26a5f13b bellard
#if defined(__x86_64__)
490 26a5f13b bellard
        flags |= MAP_32BIT;
491 26a5f13b bellard
        /* Cannot map more than that */
492 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
493 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
494 141ac468 blueswir1
#elif defined(__sparc_v9__)
495 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
496 141ac468 blueswir1
        flags |= MAP_FIXED;
497 141ac468 blueswir1
        start = (void *) 0x60000000UL;
498 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
499 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
500 1cb0661e balrog
#elif defined(__arm__)
501 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
502 1cb0661e balrog
        flags |= MAP_FIXED;
503 1cb0661e balrog
        start = (void *) 0x01000000UL;
504 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
505 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
506 eba0b893 Richard Henderson
#elif defined(__s390x__)
507 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
508 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
509 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 eba0b893 Richard Henderson
        }
512 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
513 26a5f13b bellard
#endif
514 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
515 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
516 26a5f13b bellard
                               flags, -1, 0);
517 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
518 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 26a5f13b bellard
            exit(1);
520 26a5f13b bellard
        }
521 26a5f13b bellard
    }
522 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
524 06e67a82 aliguori
    {
525 06e67a82 aliguori
        int flags;
526 06e67a82 aliguori
        void *addr = NULL;
527 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
528 06e67a82 aliguori
#if defined(__x86_64__)
529 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 06e67a82 aliguori
         * 0x40000000 is free */
531 06e67a82 aliguori
        flags |= MAP_FIXED;
532 06e67a82 aliguori
        addr = (void *)0x40000000;
533 06e67a82 aliguori
        /* Cannot map more than that */
534 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
535 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
536 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
537 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
538 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
539 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
540 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
542 4cd31ad2 Blue Swirl
        }
543 06e67a82 aliguori
#endif
544 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
546 06e67a82 aliguori
                               flags, -1, 0);
547 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
548 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 06e67a82 aliguori
            exit(1);
550 06e67a82 aliguori
        }
551 06e67a82 aliguori
    }
552 26a5f13b bellard
#else
553 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
554 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
555 26a5f13b bellard
#endif
556 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
557 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
558 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
559 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
560 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562 26a5f13b bellard
}
563 26a5f13b bellard
564 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
566 26a5f13b bellard
   size. */
567 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
568 26a5f13b bellard
{
569 26a5f13b bellard
    cpu_gen_init();
570 26a5f13b bellard
    code_gen_alloc(tb_size);
571 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
572 4369415f bellard
    page_init();
573 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
574 26a5f13b bellard
    io_mem_init();
575 e2eef170 pbrook
#endif
576 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
578 9002ec79 Richard Henderson
       initialize the prologue now.  */
579 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
580 9002ec79 Richard Henderson
#endif
581 26a5f13b bellard
}
582 26a5f13b bellard
583 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584 9656f324 pbrook
585 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
586 e7f4eff7 Juan Quintela
{
587 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
588 9656f324 pbrook
589 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 3098dba0 aurel32
       version_id is increased. */
591 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
592 9656f324 pbrook
    tlb_flush(env, 1);
593 9656f324 pbrook
594 9656f324 pbrook
    return 0;
595 9656f324 pbrook
}
596 e7f4eff7 Juan Quintela
597 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
598 e7f4eff7 Juan Quintela
    .name = "cpu_common",
599 e7f4eff7 Juan Quintela
    .version_id = 1,
600 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
601 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
602 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
603 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
604 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
605 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
606 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
607 e7f4eff7 Juan Quintela
    }
608 e7f4eff7 Juan Quintela
};
609 9656f324 pbrook
#endif
610 9656f324 pbrook
611 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
612 950f1472 Glauber Costa
{
613 950f1472 Glauber Costa
    CPUState *env = first_cpu;
614 950f1472 Glauber Costa
615 950f1472 Glauber Costa
    while (env) {
616 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
617 950f1472 Glauber Costa
            break;
618 950f1472 Glauber Costa
        env = env->next_cpu;
619 950f1472 Glauber Costa
    }
620 950f1472 Glauber Costa
621 950f1472 Glauber Costa
    return env;
622 950f1472 Glauber Costa
}
623 950f1472 Glauber Costa
624 6a00d601 bellard
void cpu_exec_init(CPUState *env)
625 fd6ce8f6 bellard
{
626 6a00d601 bellard
    CPUState **penv;
627 6a00d601 bellard
    int cpu_index;
628 6a00d601 bellard
629 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
630 c2764719 pbrook
    cpu_list_lock();
631 c2764719 pbrook
#endif
632 6a00d601 bellard
    env->next_cpu = NULL;
633 6a00d601 bellard
    penv = &first_cpu;
634 6a00d601 bellard
    cpu_index = 0;
635 6a00d601 bellard
    while (*penv != NULL) {
636 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
637 6a00d601 bellard
        cpu_index++;
638 6a00d601 bellard
    }
639 6a00d601 bellard
    env->cpu_index = cpu_index;
640 268a362c aliguori
    env->numa_node = 0;
641 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
642 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
643 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
644 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
645 dc7a09cf Jan Kiszka
#endif
646 6a00d601 bellard
    *penv = env;
647 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
648 c2764719 pbrook
    cpu_list_unlock();
649 c2764719 pbrook
#endif
650 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
651 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
653 b3c7724c pbrook
                    cpu_save, cpu_load, env);
654 b3c7724c pbrook
#endif
655 fd6ce8f6 bellard
}
656 fd6ce8f6 bellard
657 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
658 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
659 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
660 d1a1eb74 Tristan Gingold
{
661 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
662 d1a1eb74 Tristan Gingold
663 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
664 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 d1a1eb74 Tristan Gingold
        return NULL;
666 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
667 d1a1eb74 Tristan Gingold
    tb->pc = pc;
668 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
669 d1a1eb74 Tristan Gingold
    return tb;
670 d1a1eb74 Tristan Gingold
}
671 d1a1eb74 Tristan Gingold
672 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
673 d1a1eb74 Tristan Gingold
{
674 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
675 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
676 d1a1eb74 Tristan Gingold
       be the last one generated.  */
677 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
679 d1a1eb74 Tristan Gingold
        nb_tbs--;
680 d1a1eb74 Tristan Gingold
    }
681 d1a1eb74 Tristan Gingold
}
682 d1a1eb74 Tristan Gingold
683 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
684 9fa3e853 bellard
{
685 9fa3e853 bellard
    if (p->code_bitmap) {
686 59817ccb bellard
        qemu_free(p->code_bitmap);
687 9fa3e853 bellard
        p->code_bitmap = NULL;
688 9fa3e853 bellard
    }
689 9fa3e853 bellard
    p->code_write_count = 0;
690 9fa3e853 bellard
}
691 9fa3e853 bellard
692 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693 5cd2c5b6 Richard Henderson
694 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
695 fd6ce8f6 bellard
{
696 5cd2c5b6 Richard Henderson
    int i;
697 fd6ce8f6 bellard
698 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
699 5cd2c5b6 Richard Henderson
        return;
700 5cd2c5b6 Richard Henderson
    }
701 5cd2c5b6 Richard Henderson
    if (level == 0) {
702 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
703 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
704 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
705 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
706 fd6ce8f6 bellard
        }
707 5cd2c5b6 Richard Henderson
    } else {
708 5cd2c5b6 Richard Henderson
        void **pp = *lp;
709 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
710 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
711 5cd2c5b6 Richard Henderson
        }
712 5cd2c5b6 Richard Henderson
    }
713 5cd2c5b6 Richard Henderson
}
714 5cd2c5b6 Richard Henderson
715 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
716 5cd2c5b6 Richard Henderson
{
717 5cd2c5b6 Richard Henderson
    int i;
718 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
719 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
720 fd6ce8f6 bellard
    }
721 fd6ce8f6 bellard
}
722 fd6ce8f6 bellard
723 fd6ce8f6 bellard
/* flush all the translation blocks */
724 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
725 6a00d601 bellard
void tb_flush(CPUState *env1)
726 fd6ce8f6 bellard
{
727 6a00d601 bellard
    CPUState *env;
728 0124311e bellard
#if defined(DEBUG_FLUSH)
729 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
731 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
732 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
733 fd6ce8f6 bellard
#endif
734 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
735 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
736 a208e54a pbrook
737 fd6ce8f6 bellard
    nb_tbs = 0;
738 3b46e624 ths
739 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 6a00d601 bellard
    }
742 9fa3e853 bellard
743 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
744 fd6ce8f6 bellard
    page_flush_tb();
745 9fa3e853 bellard
746 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
747 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
748 d4e8164f bellard
       expensive */
749 e3db7226 bellard
    tb_flush_count++;
750 fd6ce8f6 bellard
}
751 fd6ce8f6 bellard
752 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
753 fd6ce8f6 bellard
754 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
755 fd6ce8f6 bellard
{
756 fd6ce8f6 bellard
    TranslationBlock *tb;
757 fd6ce8f6 bellard
    int i;
758 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
759 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
761 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
763 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
764 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
765 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
766 fd6ce8f6 bellard
            }
767 fd6ce8f6 bellard
        }
768 fd6ce8f6 bellard
    }
769 fd6ce8f6 bellard
}
770 fd6ce8f6 bellard
771 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
772 fd6ce8f6 bellard
static void tb_page_check(void)
773 fd6ce8f6 bellard
{
774 fd6ce8f6 bellard
    TranslationBlock *tb;
775 fd6ce8f6 bellard
    int i, flags1, flags2;
776 3b46e624 ths
777 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
779 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
780 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
781 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
783 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
784 fd6ce8f6 bellard
            }
785 fd6ce8f6 bellard
        }
786 fd6ce8f6 bellard
    }
787 fd6ce8f6 bellard
}
788 fd6ce8f6 bellard
789 fd6ce8f6 bellard
#endif
790 fd6ce8f6 bellard
791 fd6ce8f6 bellard
/* invalidate one TB */
792 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 fd6ce8f6 bellard
                             int next_offset)
794 fd6ce8f6 bellard
{
795 fd6ce8f6 bellard
    TranslationBlock *tb1;
796 fd6ce8f6 bellard
    for(;;) {
797 fd6ce8f6 bellard
        tb1 = *ptb;
798 fd6ce8f6 bellard
        if (tb1 == tb) {
799 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 fd6ce8f6 bellard
            break;
801 fd6ce8f6 bellard
        }
802 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 fd6ce8f6 bellard
    }
804 fd6ce8f6 bellard
}
805 fd6ce8f6 bellard
806 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807 9fa3e853 bellard
{
808 9fa3e853 bellard
    TranslationBlock *tb1;
809 9fa3e853 bellard
    unsigned int n1;
810 9fa3e853 bellard
811 9fa3e853 bellard
    for(;;) {
812 9fa3e853 bellard
        tb1 = *ptb;
813 9fa3e853 bellard
        n1 = (long)tb1 & 3;
814 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 9fa3e853 bellard
        if (tb1 == tb) {
816 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
817 9fa3e853 bellard
            break;
818 9fa3e853 bellard
        }
819 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
820 9fa3e853 bellard
    }
821 9fa3e853 bellard
}
822 9fa3e853 bellard
823 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824 d4e8164f bellard
{
825 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
826 d4e8164f bellard
    unsigned int n1;
827 d4e8164f bellard
828 d4e8164f bellard
    ptb = &tb->jmp_next[n];
829 d4e8164f bellard
    tb1 = *ptb;
830 d4e8164f bellard
    if (tb1) {
831 d4e8164f bellard
        /* find tb(n) in circular list */
832 d4e8164f bellard
        for(;;) {
833 d4e8164f bellard
            tb1 = *ptb;
834 d4e8164f bellard
            n1 = (long)tb1 & 3;
835 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 d4e8164f bellard
            if (n1 == n && tb1 == tb)
837 d4e8164f bellard
                break;
838 d4e8164f bellard
            if (n1 == 2) {
839 d4e8164f bellard
                ptb = &tb1->jmp_first;
840 d4e8164f bellard
            } else {
841 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
842 d4e8164f bellard
            }
843 d4e8164f bellard
        }
844 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
845 d4e8164f bellard
        *ptb = tb->jmp_next[n];
846 d4e8164f bellard
847 d4e8164f bellard
        tb->jmp_next[n] = NULL;
848 d4e8164f bellard
    }
849 d4e8164f bellard
}
850 d4e8164f bellard
851 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
852 d4e8164f bellard
   another TB */
853 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
854 d4e8164f bellard
{
855 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856 d4e8164f bellard
}
857 d4e8164f bellard
858 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
859 fd6ce8f6 bellard
{
860 6a00d601 bellard
    CPUState *env;
861 8a40a180 bellard
    PageDesc *p;
862 d4e8164f bellard
    unsigned int h, n1;
863 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
864 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
865 3b46e624 ths
866 8a40a180 bellard
    /* remove the TB from the hash list */
867 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
869 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
870 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
871 8a40a180 bellard
872 8a40a180 bellard
    /* remove the TB from the page list */
873 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
874 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
876 8a40a180 bellard
        invalidate_page_bitmap(p);
877 8a40a180 bellard
    }
878 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
881 8a40a180 bellard
        invalidate_page_bitmap(p);
882 8a40a180 bellard
    }
883 8a40a180 bellard
884 36bdbe54 bellard
    tb_invalidated_flag = 1;
885 59817ccb bellard
886 fd6ce8f6 bellard
    /* remove the TB from the hash list */
887 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
888 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
890 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
891 6a00d601 bellard
    }
892 d4e8164f bellard
893 d4e8164f bellard
    /* suppress this TB from the two jump lists */
894 d4e8164f bellard
    tb_jmp_remove(tb, 0);
895 d4e8164f bellard
    tb_jmp_remove(tb, 1);
896 d4e8164f bellard
897 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
898 d4e8164f bellard
    tb1 = tb->jmp_first;
899 d4e8164f bellard
    for(;;) {
900 d4e8164f bellard
        n1 = (long)tb1 & 3;
901 d4e8164f bellard
        if (n1 == 2)
902 d4e8164f bellard
            break;
903 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
905 d4e8164f bellard
        tb_reset_jump(tb1, n1);
906 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
907 d4e8164f bellard
        tb1 = tb2;
908 d4e8164f bellard
    }
909 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
910 9fa3e853 bellard
911 e3db7226 bellard
    tb_phys_invalidate_count++;
912 9fa3e853 bellard
}
913 9fa3e853 bellard
914 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
915 9fa3e853 bellard
{
916 9fa3e853 bellard
    int end, mask, end1;
917 9fa3e853 bellard
918 9fa3e853 bellard
    end = start + len;
919 9fa3e853 bellard
    tab += start >> 3;
920 9fa3e853 bellard
    mask = 0xff << (start & 7);
921 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
922 9fa3e853 bellard
        if (start < end) {
923 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
924 9fa3e853 bellard
            *tab |= mask;
925 9fa3e853 bellard
        }
926 9fa3e853 bellard
    } else {
927 9fa3e853 bellard
        *tab++ |= mask;
928 9fa3e853 bellard
        start = (start + 8) & ~7;
929 9fa3e853 bellard
        end1 = end & ~7;
930 9fa3e853 bellard
        while (start < end1) {
931 9fa3e853 bellard
            *tab++ = 0xff;
932 9fa3e853 bellard
            start += 8;
933 9fa3e853 bellard
        }
934 9fa3e853 bellard
        if (start < end) {
935 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
936 9fa3e853 bellard
            *tab |= mask;
937 9fa3e853 bellard
        }
938 9fa3e853 bellard
    }
939 9fa3e853 bellard
}
940 9fa3e853 bellard
941 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
942 9fa3e853 bellard
{
943 9fa3e853 bellard
    int n, tb_start, tb_end;
944 9fa3e853 bellard
    TranslationBlock *tb;
945 3b46e624 ths
946 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
947 9fa3e853 bellard
948 9fa3e853 bellard
    tb = p->first_tb;
949 9fa3e853 bellard
    while (tb != NULL) {
950 9fa3e853 bellard
        n = (long)tb & 3;
951 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
952 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
953 9fa3e853 bellard
        if (n == 0) {
954 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
955 9fa3e853 bellard
               it is not a problem */
956 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 9fa3e853 bellard
            tb_end = tb_start + tb->size;
958 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
959 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
960 9fa3e853 bellard
        } else {
961 9fa3e853 bellard
            tb_start = 0;
962 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 9fa3e853 bellard
        }
964 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 9fa3e853 bellard
        tb = tb->page_next[n];
966 9fa3e853 bellard
    }
967 9fa3e853 bellard
}
968 9fa3e853 bellard
969 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
970 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
971 2e70f6ef pbrook
                              int flags, int cflags)
972 d720b93d bellard
{
973 d720b93d bellard
    TranslationBlock *tb;
974 d720b93d bellard
    uint8_t *tc_ptr;
975 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
976 41c1b1c9 Paul Brook
    target_ulong virt_page2;
977 d720b93d bellard
    int code_gen_size;
978 d720b93d bellard
979 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
980 c27004ec bellard
    tb = tb_alloc(pc);
981 d720b93d bellard
    if (!tb) {
982 d720b93d bellard
        /* flush must be done */
983 d720b93d bellard
        tb_flush(env);
984 d720b93d bellard
        /* cannot fail at this point */
985 c27004ec bellard
        tb = tb_alloc(pc);
986 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
987 2e70f6ef pbrook
        tb_invalidated_flag = 1;
988 d720b93d bellard
    }
989 d720b93d bellard
    tc_ptr = code_gen_ptr;
990 d720b93d bellard
    tb->tc_ptr = tc_ptr;
991 d720b93d bellard
    tb->cs_base = cs_base;
992 d720b93d bellard
    tb->flags = flags;
993 d720b93d bellard
    tb->cflags = cflags;
994 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
995 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
996 3b46e624 ths
997 d720b93d bellard
    /* check next page if needed */
998 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
999 d720b93d bellard
    phys_page2 = -1;
1000 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1001 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1002 d720b93d bellard
    }
1003 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1004 2e70f6ef pbrook
    return tb;
1005 d720b93d bellard
}
1006 3b46e624 ths
1007 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1008 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1009 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1010 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1011 d720b93d bellard
   TB if code is modified inside this TB. */
1012 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1013 d720b93d bellard
                                   int is_cpu_write_access)
1014 d720b93d bellard
{
1015 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1016 d720b93d bellard
    CPUState *env = cpu_single_env;
1017 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1018 6b917547 aliguori
    PageDesc *p;
1019 6b917547 aliguori
    int n;
1020 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1021 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1022 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1023 6b917547 aliguori
    int current_tb_modified = 0;
1024 6b917547 aliguori
    target_ulong current_pc = 0;
1025 6b917547 aliguori
    target_ulong current_cs_base = 0;
1026 6b917547 aliguori
    int current_flags = 0;
1027 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1028 9fa3e853 bellard
1029 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1030 5fafdf24 ths
    if (!p)
1031 9fa3e853 bellard
        return;
1032 5fafdf24 ths
    if (!p->code_bitmap &&
1033 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 d720b93d bellard
        is_cpu_write_access) {
1035 9fa3e853 bellard
        /* build code bitmap */
1036 9fa3e853 bellard
        build_page_bitmap(p);
1037 9fa3e853 bellard
    }
1038 9fa3e853 bellard
1039 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1040 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 9fa3e853 bellard
    tb = p->first_tb;
1042 9fa3e853 bellard
    while (tb != NULL) {
1043 9fa3e853 bellard
        n = (long)tb & 3;
1044 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1045 9fa3e853 bellard
        tb_next = tb->page_next[n];
1046 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1047 9fa3e853 bellard
        if (n == 0) {
1048 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1049 9fa3e853 bellard
               it is not a problem */
1050 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1052 9fa3e853 bellard
        } else {
1053 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1054 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 9fa3e853 bellard
        }
1056 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1057 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1058 d720b93d bellard
            if (current_tb_not_found) {
1059 d720b93d bellard
                current_tb_not_found = 0;
1060 d720b93d bellard
                current_tb = NULL;
1061 2e70f6ef pbrook
                if (env->mem_io_pc) {
1062 d720b93d bellard
                    /* now we have a real cpu fault */
1063 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1064 d720b93d bellard
                }
1065 d720b93d bellard
            }
1066 d720b93d bellard
            if (current_tb == tb &&
1067 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1069 d720b93d bellard
                its execution. We could be more precise by checking
1070 d720b93d bellard
                that the modification is after the current PC, but it
1071 d720b93d bellard
                would require a specialized function to partially
1072 d720b93d bellard
                restore the CPU state */
1073 3b46e624 ths
1074 d720b93d bellard
                current_tb_modified = 1;
1075 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1076 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 6b917547 aliguori
                                     &current_flags);
1078 d720b93d bellard
            }
1079 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1080 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1081 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1082 6f5a9f7e bellard
            saved_tb = NULL;
1083 6f5a9f7e bellard
            if (env) {
1084 6f5a9f7e bellard
                saved_tb = env->current_tb;
1085 6f5a9f7e bellard
                env->current_tb = NULL;
1086 6f5a9f7e bellard
            }
1087 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1088 6f5a9f7e bellard
            if (env) {
1089 6f5a9f7e bellard
                env->current_tb = saved_tb;
1090 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1091 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1092 6f5a9f7e bellard
            }
1093 9fa3e853 bellard
        }
1094 9fa3e853 bellard
        tb = tb_next;
1095 9fa3e853 bellard
    }
1096 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1097 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1098 9fa3e853 bellard
    if (!p->first_tb) {
1099 9fa3e853 bellard
        invalidate_page_bitmap(p);
1100 d720b93d bellard
        if (is_cpu_write_access) {
1101 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1102 d720b93d bellard
        }
1103 d720b93d bellard
    }
1104 d720b93d bellard
#endif
1105 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1106 d720b93d bellard
    if (current_tb_modified) {
1107 d720b93d bellard
        /* we generate a block containing just the instruction
1108 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1109 d720b93d bellard
           itself */
1110 ea1c1802 bellard
        env->current_tb = NULL;
1111 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1112 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1113 9fa3e853 bellard
    }
1114 fd6ce8f6 bellard
#endif
1115 9fa3e853 bellard
}
1116 fd6ce8f6 bellard
1117 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1118 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1119 9fa3e853 bellard
{
1120 9fa3e853 bellard
    PageDesc *p;
1121 9fa3e853 bellard
    int offset, b;
1122 59817ccb bellard
#if 0
1123 a4193c8a bellard
    if (1) {
1124 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1126 93fcfe39 aliguori
                  cpu_single_env->eip,
1127 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1128 59817ccb bellard
    }
1129 59817ccb bellard
#endif
1130 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1131 5fafdf24 ths
    if (!p)
1132 9fa3e853 bellard
        return;
1133 9fa3e853 bellard
    if (p->code_bitmap) {
1134 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1135 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1137 9fa3e853 bellard
            goto do_invalidate;
1138 9fa3e853 bellard
    } else {
1139 9fa3e853 bellard
    do_invalidate:
1140 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1141 9fa3e853 bellard
    }
1142 9fa3e853 bellard
}
1143 9fa3e853 bellard
1144 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1145 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1146 d720b93d bellard
                                    unsigned long pc, void *puc)
1147 9fa3e853 bellard
{
1148 6b917547 aliguori
    TranslationBlock *tb;
1149 9fa3e853 bellard
    PageDesc *p;
1150 6b917547 aliguori
    int n;
1151 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1152 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1153 d720b93d bellard
    CPUState *env = cpu_single_env;
1154 6b917547 aliguori
    int current_tb_modified = 0;
1155 6b917547 aliguori
    target_ulong current_pc = 0;
1156 6b917547 aliguori
    target_ulong current_cs_base = 0;
1157 6b917547 aliguori
    int current_flags = 0;
1158 d720b93d bellard
#endif
1159 9fa3e853 bellard
1160 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1161 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1162 5fafdf24 ths
    if (!p)
1163 9fa3e853 bellard
        return;
1164 9fa3e853 bellard
    tb = p->first_tb;
1165 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1166 d720b93d bellard
    if (tb && pc != 0) {
1167 d720b93d bellard
        current_tb = tb_find_pc(pc);
1168 d720b93d bellard
    }
1169 d720b93d bellard
#endif
1170 9fa3e853 bellard
    while (tb != NULL) {
1171 9fa3e853 bellard
        n = (long)tb & 3;
1172 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1173 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1174 d720b93d bellard
        if (current_tb == tb &&
1175 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1177 d720b93d bellard
                   its execution. We could be more precise by checking
1178 d720b93d bellard
                   that the modification is after the current PC, but it
1179 d720b93d bellard
                   would require a specialized function to partially
1180 d720b93d bellard
                   restore the CPU state */
1181 3b46e624 ths
1182 d720b93d bellard
            current_tb_modified = 1;
1183 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1184 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 6b917547 aliguori
                                 &current_flags);
1186 d720b93d bellard
        }
1187 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1188 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1189 9fa3e853 bellard
        tb = tb->page_next[n];
1190 9fa3e853 bellard
    }
1191 fd6ce8f6 bellard
    p->first_tb = NULL;
1192 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1193 d720b93d bellard
    if (current_tb_modified) {
1194 d720b93d bellard
        /* we generate a block containing just the instruction
1195 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1196 d720b93d bellard
           itself */
1197 ea1c1802 bellard
        env->current_tb = NULL;
1198 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1200 d720b93d bellard
    }
1201 d720b93d bellard
#endif
1202 fd6ce8f6 bellard
}
1203 9fa3e853 bellard
#endif
1204 fd6ce8f6 bellard
1205 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1206 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1207 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1208 fd6ce8f6 bellard
{
1209 fd6ce8f6 bellard
    PageDesc *p;
1210 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1211 4429ab44 Juan Quintela
    bool page_already_protected;
1212 4429ab44 Juan Quintela
#endif
1213 9fa3e853 bellard
1214 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1215 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1216 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1217 4429ab44 Juan Quintela
#ifndef CONFIG_USER_ONLY
1218 4429ab44 Juan Quintela
    page_already_protected = p->first_tb != NULL;
1219 4429ab44 Juan Quintela
#endif
1220 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1221 9fa3e853 bellard
    invalidate_page_bitmap(p);
1222 fd6ce8f6 bellard
1223 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1224 d720b93d bellard
1225 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1226 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1227 53a5960a pbrook
        target_ulong addr;
1228 53a5960a pbrook
        PageDesc *p2;
1229 9fa3e853 bellard
        int prot;
1230 9fa3e853 bellard
1231 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1232 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1233 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1234 fd6ce8f6 bellard
        prot = 0;
1235 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1236 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1237 53a5960a pbrook
1238 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1239 53a5960a pbrook
            if (!p2)
1240 53a5960a pbrook
                continue;
1241 53a5960a pbrook
            prot |= p2->flags;
1242 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1243 53a5960a pbrook
          }
1244 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1245 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1246 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1247 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1248 53a5960a pbrook
               page_addr);
1249 fd6ce8f6 bellard
#endif
1250 fd6ce8f6 bellard
    }
1251 9fa3e853 bellard
#else
1252 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1253 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1254 9fa3e853 bellard
       allocated in a physical page */
1255 4429ab44 Juan Quintela
    if (!page_already_protected) {
1256 6a00d601 bellard
        tlb_protect_code(page_addr);
1257 9fa3e853 bellard
    }
1258 9fa3e853 bellard
#endif
1259 d720b93d bellard
1260 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1261 fd6ce8f6 bellard
}
1262 fd6ce8f6 bellard
1263 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1264 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1265 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1266 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1267 d4e8164f bellard
{
1268 9fa3e853 bellard
    unsigned int h;
1269 9fa3e853 bellard
    TranslationBlock **ptb;
1270 9fa3e853 bellard
1271 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1272 c8a706fe pbrook
       before we are done.  */
1273 c8a706fe pbrook
    mmap_lock();
1274 9fa3e853 bellard
    /* add in the physical hash table */
1275 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1276 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1277 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1278 9fa3e853 bellard
    *ptb = tb;
1279 fd6ce8f6 bellard
1280 fd6ce8f6 bellard
    /* add in the page list */
1281 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1282 9fa3e853 bellard
    if (phys_page2 != -1)
1283 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1284 9fa3e853 bellard
    else
1285 9fa3e853 bellard
        tb->page_addr[1] = -1;
1286 9fa3e853 bellard
1287 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1288 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1289 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1290 d4e8164f bellard
1291 d4e8164f bellard
    /* init original jump addresses */
1292 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1293 d4e8164f bellard
        tb_reset_jump(tb, 0);
1294 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1295 d4e8164f bellard
        tb_reset_jump(tb, 1);
1296 8a40a180 bellard
1297 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1298 8a40a180 bellard
    tb_page_check();
1299 8a40a180 bellard
#endif
1300 c8a706fe pbrook
    mmap_unlock();
1301 fd6ce8f6 bellard
}
1302 fd6ce8f6 bellard
1303 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1304 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1305 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1306 fd6ce8f6 bellard
{
1307 9fa3e853 bellard
    int m_min, m_max, m;
1308 9fa3e853 bellard
    unsigned long v;
1309 9fa3e853 bellard
    TranslationBlock *tb;
1310 a513fe19 bellard
1311 a513fe19 bellard
    if (nb_tbs <= 0)
1312 a513fe19 bellard
        return NULL;
1313 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1314 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1315 a513fe19 bellard
        return NULL;
1316 a513fe19 bellard
    /* binary search (cf Knuth) */
1317 a513fe19 bellard
    m_min = 0;
1318 a513fe19 bellard
    m_max = nb_tbs - 1;
1319 a513fe19 bellard
    while (m_min <= m_max) {
1320 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1321 a513fe19 bellard
        tb = &tbs[m];
1322 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1323 a513fe19 bellard
        if (v == tc_ptr)
1324 a513fe19 bellard
            return tb;
1325 a513fe19 bellard
        else if (tc_ptr < v) {
1326 a513fe19 bellard
            m_max = m - 1;
1327 a513fe19 bellard
        } else {
1328 a513fe19 bellard
            m_min = m + 1;
1329 a513fe19 bellard
        }
1330 5fafdf24 ths
    }
1331 a513fe19 bellard
    return &tbs[m_max];
1332 a513fe19 bellard
}
1333 7501267e bellard
1334 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1335 ea041c0e bellard
1336 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1337 ea041c0e bellard
{
1338 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1339 ea041c0e bellard
    unsigned int n1;
1340 ea041c0e bellard
1341 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1342 ea041c0e bellard
    if (tb1 != NULL) {
1343 ea041c0e bellard
        /* find head of list */
1344 ea041c0e bellard
        for(;;) {
1345 ea041c0e bellard
            n1 = (long)tb1 & 3;
1346 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1347 ea041c0e bellard
            if (n1 == 2)
1348 ea041c0e bellard
                break;
1349 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1350 ea041c0e bellard
        }
1351 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1352 ea041c0e bellard
        tb_next = tb1;
1353 ea041c0e bellard
1354 ea041c0e bellard
        /* remove tb from the jmp_first list */
1355 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1356 ea041c0e bellard
        for(;;) {
1357 ea041c0e bellard
            tb1 = *ptb;
1358 ea041c0e bellard
            n1 = (long)tb1 & 3;
1359 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1361 ea041c0e bellard
                break;
1362 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1363 ea041c0e bellard
        }
1364 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1365 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1366 3b46e624 ths
1367 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1368 ea041c0e bellard
        tb_reset_jump(tb, n);
1369 ea041c0e bellard
1370 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1371 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1372 ea041c0e bellard
    }
1373 ea041c0e bellard
}
1374 ea041c0e bellard
1375 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1376 ea041c0e bellard
{
1377 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1378 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1379 ea041c0e bellard
}
1380 ea041c0e bellard
1381 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1382 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1383 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384 94df27fd Paul Brook
{
1385 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1386 94df27fd Paul Brook
}
1387 94df27fd Paul Brook
#else
1388 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389 d720b93d bellard
{
1390 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1391 9b3c35e0 j_mayer
    target_ulong pd;
1392 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1393 c2f07f81 pbrook
    PhysPageDesc *p;
1394 d720b93d bellard
1395 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1396 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1397 c2f07f81 pbrook
    if (!p) {
1398 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1399 c2f07f81 pbrook
    } else {
1400 c2f07f81 pbrook
        pd = p->phys_offset;
1401 c2f07f81 pbrook
    }
1402 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1403 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1404 d720b93d bellard
}
1405 c27004ec bellard
#endif
1406 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1407 d720b93d bellard
1408 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1409 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410 c527ee8f Paul Brook
1411 c527ee8f Paul Brook
{
1412 c527ee8f Paul Brook
}
1413 c527ee8f Paul Brook
1414 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1415 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1416 c527ee8f Paul Brook
{
1417 c527ee8f Paul Brook
    return -ENOSYS;
1418 c527ee8f Paul Brook
}
1419 c527ee8f Paul Brook
#else
1420 6658ffb8 pbrook
/* Add a watchpoint.  */
1421 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1423 6658ffb8 pbrook
{
1424 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1425 c0ce998e aliguori
    CPUWatchpoint *wp;
1426 6658ffb8 pbrook
1427 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1428 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1429 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1430 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1431 b4051334 aliguori
        return -EINVAL;
1432 b4051334 aliguori
    }
1433 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1434 a1d1bb31 aliguori
1435 a1d1bb31 aliguori
    wp->vaddr = addr;
1436 b4051334 aliguori
    wp->len_mask = len_mask;
1437 a1d1bb31 aliguori
    wp->flags = flags;
1438 a1d1bb31 aliguori
1439 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1440 c0ce998e aliguori
    if (flags & BP_GDB)
1441 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1442 c0ce998e aliguori
    else
1443 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1444 6658ffb8 pbrook
1445 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1446 a1d1bb31 aliguori
1447 a1d1bb31 aliguori
    if (watchpoint)
1448 a1d1bb31 aliguori
        *watchpoint = wp;
1449 a1d1bb31 aliguori
    return 0;
1450 6658ffb8 pbrook
}
1451 6658ffb8 pbrook
1452 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1453 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1454 a1d1bb31 aliguori
                          int flags)
1455 6658ffb8 pbrook
{
1456 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1457 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1458 6658ffb8 pbrook
1459 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1460 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1461 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1462 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1463 6658ffb8 pbrook
            return 0;
1464 6658ffb8 pbrook
        }
1465 6658ffb8 pbrook
    }
1466 a1d1bb31 aliguori
    return -ENOENT;
1467 6658ffb8 pbrook
}
1468 6658ffb8 pbrook
1469 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1470 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1471 a1d1bb31 aliguori
{
1472 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1473 7d03f82f edgar_igl
1474 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1475 a1d1bb31 aliguori
1476 a1d1bb31 aliguori
    qemu_free(watchpoint);
1477 a1d1bb31 aliguori
}
1478 a1d1bb31 aliguori
1479 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1480 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1481 a1d1bb31 aliguori
{
1482 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1483 a1d1bb31 aliguori
1484 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1485 a1d1bb31 aliguori
        if (wp->flags & mask)
1486 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1487 c0ce998e aliguori
    }
1488 7d03f82f edgar_igl
}
1489 c527ee8f Paul Brook
#endif
1490 7d03f82f edgar_igl
1491 a1d1bb31 aliguori
/* Add a breakpoint.  */
1492 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1493 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1494 4c3a88a2 bellard
{
1495 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1496 c0ce998e aliguori
    CPUBreakpoint *bp;
1497 3b46e624 ths
1498 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1499 4c3a88a2 bellard
1500 a1d1bb31 aliguori
    bp->pc = pc;
1501 a1d1bb31 aliguori
    bp->flags = flags;
1502 a1d1bb31 aliguori
1503 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1504 c0ce998e aliguori
    if (flags & BP_GDB)
1505 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1506 c0ce998e aliguori
    else
1507 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1508 3b46e624 ths
1509 d720b93d bellard
    breakpoint_invalidate(env, pc);
1510 a1d1bb31 aliguori
1511 a1d1bb31 aliguori
    if (breakpoint)
1512 a1d1bb31 aliguori
        *breakpoint = bp;
1513 4c3a88a2 bellard
    return 0;
1514 4c3a88a2 bellard
#else
1515 a1d1bb31 aliguori
    return -ENOSYS;
1516 4c3a88a2 bellard
#endif
1517 4c3a88a2 bellard
}
1518 4c3a88a2 bellard
1519 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1520 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1521 a1d1bb31 aliguori
{
1522 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1523 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1524 a1d1bb31 aliguori
1525 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1526 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1527 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1528 a1d1bb31 aliguori
            return 0;
1529 a1d1bb31 aliguori
        }
1530 7d03f82f edgar_igl
    }
1531 a1d1bb31 aliguori
    return -ENOENT;
1532 a1d1bb31 aliguori
#else
1533 a1d1bb31 aliguori
    return -ENOSYS;
1534 7d03f82f edgar_igl
#endif
1535 7d03f82f edgar_igl
}
1536 7d03f82f edgar_igl
1537 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1538 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1539 4c3a88a2 bellard
{
1540 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1541 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1542 d720b93d bellard
1543 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1544 a1d1bb31 aliguori
1545 a1d1bb31 aliguori
    qemu_free(breakpoint);
1546 a1d1bb31 aliguori
#endif
1547 a1d1bb31 aliguori
}
1548 a1d1bb31 aliguori
1549 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1550 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1551 a1d1bb31 aliguori
{
1552 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1553 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1554 a1d1bb31 aliguori
1555 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1556 a1d1bb31 aliguori
        if (bp->flags & mask)
1557 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1558 c0ce998e aliguori
    }
1559 4c3a88a2 bellard
#endif
1560 4c3a88a2 bellard
}
1561 4c3a88a2 bellard
1562 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1563 c33a346e bellard
   CPU loop after each instruction */
1564 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1565 c33a346e bellard
{
1566 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1567 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1568 c33a346e bellard
        env->singlestep_enabled = enabled;
1569 e22a25c9 aliguori
        if (kvm_enabled())
1570 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1571 e22a25c9 aliguori
        else {
1572 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1573 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1574 e22a25c9 aliguori
            tb_flush(env);
1575 e22a25c9 aliguori
        }
1576 c33a346e bellard
    }
1577 c33a346e bellard
#endif
1578 c33a346e bellard
}
1579 c33a346e bellard
1580 34865134 bellard
/* enable or disable low levels log */
1581 34865134 bellard
void cpu_set_log(int log_flags)
1582 34865134 bellard
{
1583 34865134 bellard
    loglevel = log_flags;
1584 34865134 bellard
    if (loglevel && !logfile) {
1585 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1586 34865134 bellard
        if (!logfile) {
1587 34865134 bellard
            perror(logfilename);
1588 34865134 bellard
            _exit(1);
1589 34865134 bellard
        }
1590 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1591 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1592 9fa3e853 bellard
        {
1593 b55266b5 blueswir1
            static char logfile_buf[4096];
1594 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1595 9fa3e853 bellard
        }
1596 bf65f53f Filip Navara
#elif !defined(_WIN32)
1597 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1598 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1599 9fa3e853 bellard
#endif
1600 e735b91c pbrook
        log_append = 1;
1601 e735b91c pbrook
    }
1602 e735b91c pbrook
    if (!loglevel && logfile) {
1603 e735b91c pbrook
        fclose(logfile);
1604 e735b91c pbrook
        logfile = NULL;
1605 34865134 bellard
    }
1606 34865134 bellard
}
1607 34865134 bellard
1608 34865134 bellard
void cpu_set_log_filename(const char *filename)
1609 34865134 bellard
{
1610 34865134 bellard
    logfilename = strdup(filename);
1611 e735b91c pbrook
    if (logfile) {
1612 e735b91c pbrook
        fclose(logfile);
1613 e735b91c pbrook
        logfile = NULL;
1614 e735b91c pbrook
    }
1615 e735b91c pbrook
    cpu_set_log(loglevel);
1616 34865134 bellard
}
1617 c33a346e bellard
1618 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1619 ea041c0e bellard
{
1620 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1621 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1622 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1623 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1624 ea041c0e bellard
    TranslationBlock *tb;
1625 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1626 59817ccb bellard
1627 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1628 3098dba0 aurel32
    tb = env->current_tb;
1629 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1630 3098dba0 aurel32
       all the potentially executing TB */
1631 f76cfe56 Riku Voipio
    if (tb) {
1632 3098dba0 aurel32
        env->current_tb = NULL;
1633 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1634 be214e6c aurel32
    }
1635 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1636 3098dba0 aurel32
}
1637 3098dba0 aurel32
1638 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1639 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1640 ec6959d0 Jan Kiszka
static void tcg_handle_interrupt(CPUState *env, int mask)
1641 3098dba0 aurel32
{
1642 3098dba0 aurel32
    int old_mask;
1643 be214e6c aurel32
1644 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1645 68a79315 bellard
    env->interrupt_request |= mask;
1646 3098dba0 aurel32
1647 8edac960 aliguori
    /*
1648 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1649 8edac960 aliguori
     * case its halted.
1650 8edac960 aliguori
     */
1651 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1652 8edac960 aliguori
        qemu_cpu_kick(env);
1653 8edac960 aliguori
        return;
1654 8edac960 aliguori
    }
1655 8edac960 aliguori
1656 2e70f6ef pbrook
    if (use_icount) {
1657 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1658 2e70f6ef pbrook
        if (!can_do_io(env)
1659 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1660 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1661 2e70f6ef pbrook
        }
1662 2e70f6ef pbrook
    } else {
1663 3098dba0 aurel32
        cpu_unlink_tb(env);
1664 ea041c0e bellard
    }
1665 ea041c0e bellard
}
1666 ea041c0e bellard
1667 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1668 ec6959d0 Jan Kiszka
1669 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1670 97ffbd8d Jan Kiszka
1671 97ffbd8d Jan Kiszka
void cpu_interrupt(CPUState *env, int mask)
1672 97ffbd8d Jan Kiszka
{
1673 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1674 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1675 97ffbd8d Jan Kiszka
}
1676 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1677 97ffbd8d Jan Kiszka
1678 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1679 b54ad049 bellard
{
1680 b54ad049 bellard
    env->interrupt_request &= ~mask;
1681 b54ad049 bellard
}
1682 b54ad049 bellard
1683 3098dba0 aurel32
void cpu_exit(CPUState *env)
1684 3098dba0 aurel32
{
1685 3098dba0 aurel32
    env->exit_request = 1;
1686 3098dba0 aurel32
    cpu_unlink_tb(env);
1687 3098dba0 aurel32
}
1688 3098dba0 aurel32
1689 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1690 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1691 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1692 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1693 f193c797 bellard
      "show target assembly code for each compiled TB" },
1694 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1695 57fec1fe bellard
      "show micro ops for each compiled TB" },
1696 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1697 e01a1157 blueswir1
      "show micro ops "
1698 e01a1157 blueswir1
#ifdef TARGET_I386
1699 e01a1157 blueswir1
      "before eflags optimization and "
1700 f193c797 bellard
#endif
1701 e01a1157 blueswir1
      "after liveness analysis" },
1702 f193c797 bellard
    { CPU_LOG_INT, "int",
1703 f193c797 bellard
      "show interrupts/exceptions in short format" },
1704 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1705 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1706 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1707 e91c8a77 ths
      "show CPU state before block translation" },
1708 f193c797 bellard
#ifdef TARGET_I386
1709 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1710 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1711 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1712 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1713 f193c797 bellard
#endif
1714 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1715 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1716 fd872598 bellard
      "show all i/o ports accesses" },
1717 8e3a9fd2 bellard
#endif
1718 f193c797 bellard
    { 0, NULL, NULL },
1719 f193c797 bellard
};
1720 f193c797 bellard
1721 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1722 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1723 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1724 f6f3fbca Michael S. Tsirkin
1725 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1726 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1727 0fd542fb Michael S. Tsirkin
                                  ram_addr_t phys_offset,
1728 0fd542fb Michael S. Tsirkin
                                  bool log_dirty)
1729 f6f3fbca Michael S. Tsirkin
{
1730 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1731 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1732 0fd542fb Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1733 f6f3fbca Michael S. Tsirkin
    }
1734 f6f3fbca Michael S. Tsirkin
}
1735 f6f3fbca Michael S. Tsirkin
1736 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1737 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1738 f6f3fbca Michael S. Tsirkin
{
1739 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1740 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1741 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1742 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1743 f6f3fbca Michael S. Tsirkin
            return r;
1744 f6f3fbca Michael S. Tsirkin
    }
1745 f6f3fbca Michael S. Tsirkin
    return 0;
1746 f6f3fbca Michael S. Tsirkin
}
1747 f6f3fbca Michael S. Tsirkin
1748 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1749 f6f3fbca Michael S. Tsirkin
{
1750 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1751 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1752 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1753 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1754 f6f3fbca Michael S. Tsirkin
            return r;
1755 f6f3fbca Michael S. Tsirkin
    }
1756 f6f3fbca Michael S. Tsirkin
    return 0;
1757 f6f3fbca Michael S. Tsirkin
}
1758 f6f3fbca Michael S. Tsirkin
1759 2173a75f Alex Williamson
struct last_map {
1760 2173a75f Alex Williamson
    target_phys_addr_t start_addr;
1761 2173a75f Alex Williamson
    ram_addr_t size;
1762 2173a75f Alex Williamson
    ram_addr_t phys_offset;
1763 2173a75f Alex Williamson
};
1764 2173a75f Alex Williamson
1765 8d4c78e7 Alex Williamson
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1766 8d4c78e7 Alex Williamson
 * address.  Each intermediate table provides the next L2_BITs of guest
1767 8d4c78e7 Alex Williamson
 * physical address space.  The number of levels vary based on host and
1768 8d4c78e7 Alex Williamson
 * guest configuration, making it efficient to build the final guest
1769 8d4c78e7 Alex Williamson
 * physical address by seeding the L1 offset and shifting and adding in
1770 8d4c78e7 Alex Williamson
 * each L2 offset as we recurse through them. */
1771 2173a75f Alex Williamson
static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1772 2173a75f Alex Williamson
                                 void **lp, target_phys_addr_t addr,
1773 2173a75f Alex Williamson
                                 struct last_map *map)
1774 f6f3fbca Michael S. Tsirkin
{
1775 5cd2c5b6 Richard Henderson
    int i;
1776 f6f3fbca Michael S. Tsirkin
1777 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1778 5cd2c5b6 Richard Henderson
        return;
1779 5cd2c5b6 Richard Henderson
    }
1780 5cd2c5b6 Richard Henderson
    if (level == 0) {
1781 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1782 8d4c78e7 Alex Williamson
        addr <<= L2_BITS + TARGET_PAGE_BITS;
1783 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1784 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1785 2173a75f Alex Williamson
                target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1786 2173a75f Alex Williamson
1787 2173a75f Alex Williamson
                if (map->size &&
1788 2173a75f Alex Williamson
                    start_addr == map->start_addr + map->size &&
1789 2173a75f Alex Williamson
                    pd[i].phys_offset == map->phys_offset + map->size) {
1790 2173a75f Alex Williamson
1791 2173a75f Alex Williamson
                    map->size += TARGET_PAGE_SIZE;
1792 2173a75f Alex Williamson
                    continue;
1793 2173a75f Alex Williamson
                } else if (map->size) {
1794 2173a75f Alex Williamson
                    client->set_memory(client, map->start_addr,
1795 2173a75f Alex Williamson
                                       map->size, map->phys_offset, false);
1796 2173a75f Alex Williamson
                }
1797 2173a75f Alex Williamson
1798 2173a75f Alex Williamson
                map->start_addr = start_addr;
1799 2173a75f Alex Williamson
                map->size = TARGET_PAGE_SIZE;
1800 2173a75f Alex Williamson
                map->phys_offset = pd[i].phys_offset;
1801 f6f3fbca Michael S. Tsirkin
            }
1802 5cd2c5b6 Richard Henderson
        }
1803 5cd2c5b6 Richard Henderson
    } else {
1804 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1805 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1806 8d4c78e7 Alex Williamson
            phys_page_for_each_1(client, level - 1, pp + i,
1807 2173a75f Alex Williamson
                                 (addr << L2_BITS) | i, map);
1808 f6f3fbca Michael S. Tsirkin
        }
1809 f6f3fbca Michael S. Tsirkin
    }
1810 f6f3fbca Michael S. Tsirkin
}
1811 f6f3fbca Michael S. Tsirkin
1812 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1813 f6f3fbca Michael S. Tsirkin
{
1814 5cd2c5b6 Richard Henderson
    int i;
1815 2173a75f Alex Williamson
    struct last_map map = { };
1816 2173a75f Alex Williamson
1817 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1818 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1819 2173a75f Alex Williamson
                             l1_phys_map + i, i, &map);
1820 2173a75f Alex Williamson
    }
1821 2173a75f Alex Williamson
    if (map.size) {
1822 2173a75f Alex Williamson
        client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1823 2173a75f Alex Williamson
                           false);
1824 f6f3fbca Michael S. Tsirkin
    }
1825 f6f3fbca Michael S. Tsirkin
}
1826 f6f3fbca Michael S. Tsirkin
1827 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1828 f6f3fbca Michael S. Tsirkin
{
1829 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1830 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1831 f6f3fbca Michael S. Tsirkin
}
1832 f6f3fbca Michael S. Tsirkin
1833 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1834 f6f3fbca Michael S. Tsirkin
{
1835 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1836 f6f3fbca Michael S. Tsirkin
}
1837 f6f3fbca Michael S. Tsirkin
#endif
1838 f6f3fbca Michael S. Tsirkin
1839 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1840 f193c797 bellard
{
1841 f193c797 bellard
    if (strlen(s2) != n)
1842 f193c797 bellard
        return 0;
1843 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1844 f193c797 bellard
}
1845 3b46e624 ths
1846 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1847 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1848 f193c797 bellard
{
1849 c7cd6a37 blueswir1
    const CPULogItem *item;
1850 f193c797 bellard
    int mask;
1851 f193c797 bellard
    const char *p, *p1;
1852 f193c797 bellard
1853 f193c797 bellard
    p = str;
1854 f193c797 bellard
    mask = 0;
1855 f193c797 bellard
    for(;;) {
1856 f193c797 bellard
        p1 = strchr(p, ',');
1857 f193c797 bellard
        if (!p1)
1858 f193c797 bellard
            p1 = p + strlen(p);
1859 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1860 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1861 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1862 9742bf26 Yoshiaki Tamura
            }
1863 9742bf26 Yoshiaki Tamura
        } else {
1864 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1865 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1866 9742bf26 Yoshiaki Tamura
                    goto found;
1867 9742bf26 Yoshiaki Tamura
            }
1868 9742bf26 Yoshiaki Tamura
            return 0;
1869 f193c797 bellard
        }
1870 f193c797 bellard
    found:
1871 f193c797 bellard
        mask |= item->mask;
1872 f193c797 bellard
        if (*p1 != ',')
1873 f193c797 bellard
            break;
1874 f193c797 bellard
        p = p1 + 1;
1875 f193c797 bellard
    }
1876 f193c797 bellard
    return mask;
1877 f193c797 bellard
}
1878 ea041c0e bellard
1879 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1880 7501267e bellard
{
1881 7501267e bellard
    va_list ap;
1882 493ae1f0 pbrook
    va_list ap2;
1883 7501267e bellard
1884 7501267e bellard
    va_start(ap, fmt);
1885 493ae1f0 pbrook
    va_copy(ap2, ap);
1886 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1887 7501267e bellard
    vfprintf(stderr, fmt, ap);
1888 7501267e bellard
    fprintf(stderr, "\n");
1889 7501267e bellard
#ifdef TARGET_I386
1890 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1891 7fe48483 bellard
#else
1892 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1893 7501267e bellard
#endif
1894 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1895 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1896 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1897 93fcfe39 aliguori
        qemu_log("\n");
1898 f9373291 j_mayer
#ifdef TARGET_I386
1899 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1900 f9373291 j_mayer
#else
1901 93fcfe39 aliguori
        log_cpu_state(env, 0);
1902 f9373291 j_mayer
#endif
1903 31b1a7b4 aliguori
        qemu_log_flush();
1904 93fcfe39 aliguori
        qemu_log_close();
1905 924edcae balrog
    }
1906 493ae1f0 pbrook
    va_end(ap2);
1907 f9373291 j_mayer
    va_end(ap);
1908 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1909 fd052bf6 Riku Voipio
    {
1910 fd052bf6 Riku Voipio
        struct sigaction act;
1911 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1912 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1913 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1914 fd052bf6 Riku Voipio
    }
1915 fd052bf6 Riku Voipio
#endif
1916 7501267e bellard
    abort();
1917 7501267e bellard
}
1918 7501267e bellard
1919 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1920 c5be9f08 ths
{
1921 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1922 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1923 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1924 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1925 5a38f081 aliguori
    CPUBreakpoint *bp;
1926 5a38f081 aliguori
    CPUWatchpoint *wp;
1927 5a38f081 aliguori
#endif
1928 5a38f081 aliguori
1929 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1930 5a38f081 aliguori
1931 5a38f081 aliguori
    /* Preserve chaining and index. */
1932 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1933 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1934 5a38f081 aliguori
1935 5a38f081 aliguori
    /* Clone all break/watchpoints.
1936 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1937 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1938 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1939 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1940 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1941 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1942 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1943 5a38f081 aliguori
    }
1944 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1945 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1946 5a38f081 aliguori
                              wp->flags, NULL);
1947 5a38f081 aliguori
    }
1948 5a38f081 aliguori
#endif
1949 5a38f081 aliguori
1950 c5be9f08 ths
    return new_env;
1951 c5be9f08 ths
}
1952 c5be9f08 ths
1953 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1954 0124311e bellard
1955 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1956 5c751e99 edgar_igl
{
1957 5c751e99 edgar_igl
    unsigned int i;
1958 5c751e99 edgar_igl
1959 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1960 5c751e99 edgar_igl
       overlap the flushed page.  */
1961 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1962 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1963 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1964 5c751e99 edgar_igl
1965 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1966 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1967 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1968 5c751e99 edgar_igl
}
1969 5c751e99 edgar_igl
1970 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1971 08738984 Igor Kovalenko
    .addr_read  = -1,
1972 08738984 Igor Kovalenko
    .addr_write = -1,
1973 08738984 Igor Kovalenko
    .addr_code  = -1,
1974 08738984 Igor Kovalenko
    .addend     = -1,
1975 08738984 Igor Kovalenko
};
1976 08738984 Igor Kovalenko
1977 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1978 ee8b7021 bellard
   implemented yet) */
1979 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1980 33417e70 bellard
{
1981 33417e70 bellard
    int i;
1982 0124311e bellard
1983 9fa3e853 bellard
#if defined(DEBUG_TLB)
1984 9fa3e853 bellard
    printf("tlb_flush:\n");
1985 9fa3e853 bellard
#endif
1986 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1987 0124311e bellard
       links while we are modifying them */
1988 0124311e bellard
    env->current_tb = NULL;
1989 0124311e bellard
1990 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1991 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1992 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1993 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1994 cfde4bd9 Isaku Yamahata
        }
1995 33417e70 bellard
    }
1996 9fa3e853 bellard
1997 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1998 9fa3e853 bellard
1999 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
2000 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
2001 e3db7226 bellard
    tlb_flush_count++;
2002 33417e70 bellard
}
2003 33417e70 bellard
2004 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2005 61382a50 bellard
{
2006 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
2007 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2008 5fafdf24 ths
        addr == (tlb_entry->addr_write &
2009 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2010 5fafdf24 ths
        addr == (tlb_entry->addr_code &
2011 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2012 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
2013 84b7b8e7 bellard
    }
2014 61382a50 bellard
}
2015 61382a50 bellard
2016 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2017 33417e70 bellard
{
2018 8a40a180 bellard
    int i;
2019 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2020 0124311e bellard
2021 9fa3e853 bellard
#if defined(DEBUG_TLB)
2022 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2023 9fa3e853 bellard
#endif
2024 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
2025 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2026 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
2027 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
2028 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2029 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
2030 d4c430a8 Paul Brook
#endif
2031 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2032 d4c430a8 Paul Brook
        return;
2033 d4c430a8 Paul Brook
    }
2034 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2035 0124311e bellard
       links while we are modifying them */
2036 0124311e bellard
    env->current_tb = NULL;
2037 61382a50 bellard
2038 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2039 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2040 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2041 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2042 0124311e bellard
2043 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2044 9fa3e853 bellard
}
2045 9fa3e853 bellard
2046 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2047 9fa3e853 bellard
   can be detected */
2048 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2049 9fa3e853 bellard
{
2050 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2051 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2052 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2053 9fa3e853 bellard
}
2054 9fa3e853 bellard
2055 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2056 3a7d929e bellard
   tested for self modifying code */
2057 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2058 3a7d929e bellard
                                    target_ulong vaddr)
2059 9fa3e853 bellard
{
2060 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2061 1ccde1cb bellard
}
2062 1ccde1cb bellard
2063 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2064 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2065 1ccde1cb bellard
{
2066 1ccde1cb bellard
    unsigned long addr;
2067 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2068 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2069 1ccde1cb bellard
        if ((addr - start) < length) {
2070 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2071 1ccde1cb bellard
        }
2072 1ccde1cb bellard
    }
2073 1ccde1cb bellard
}
2074 1ccde1cb bellard
2075 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2076 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2077 0a962c02 bellard
                                     int dirty_flags)
2078 1ccde1cb bellard
{
2079 1ccde1cb bellard
    CPUState *env;
2080 4f2ac237 bellard
    unsigned long length, start1;
2081 f7c11b53 Yoshiaki Tamura
    int i;
2082 1ccde1cb bellard
2083 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2084 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2085 1ccde1cb bellard
2086 1ccde1cb bellard
    length = end - start;
2087 1ccde1cb bellard
    if (length == 0)
2088 1ccde1cb bellard
        return;
2089 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2090 f23db169 bellard
2091 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2092 1ccde1cb bellard
       when accessing the range */
2093 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2094 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
2095 5579c7f3 pbrook
       address comparisons below.  */
2096 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2097 5579c7f3 pbrook
            != (end - 1) - start) {
2098 5579c7f3 pbrook
        abort();
2099 5579c7f3 pbrook
    }
2100 5579c7f3 pbrook
2101 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2102 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2103 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2104 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2105 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2106 cfde4bd9 Isaku Yamahata
                                      start1, length);
2107 cfde4bd9 Isaku Yamahata
        }
2108 6a00d601 bellard
    }
2109 1ccde1cb bellard
}
2110 1ccde1cb bellard
2111 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2112 74576198 aliguori
{
2113 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2114 74576198 aliguori
    in_migration = enable;
2115 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2116 f6f3fbca Michael S. Tsirkin
    return ret;
2117 74576198 aliguori
}
2118 74576198 aliguori
2119 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2120 74576198 aliguori
{
2121 74576198 aliguori
    return in_migration;
2122 74576198 aliguori
}
2123 74576198 aliguori
2124 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2125 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2126 2bec46dc aliguori
{
2127 7b8f3b78 Michael S. Tsirkin
    int ret;
2128 151f7749 Jan Kiszka
2129 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2130 151f7749 Jan Kiszka
    return ret;
2131 2bec46dc aliguori
}
2132 2bec46dc aliguori
2133 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2134 e5896b12 Anthony PERARD
                           ram_addr_t size)
2135 e5896b12 Anthony PERARD
{
2136 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2137 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2138 e5896b12 Anthony PERARD
        if (client->log_start) {
2139 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2140 e5896b12 Anthony PERARD
            if (r < 0) {
2141 e5896b12 Anthony PERARD
                return r;
2142 e5896b12 Anthony PERARD
            }
2143 e5896b12 Anthony PERARD
        }
2144 e5896b12 Anthony PERARD
    }
2145 e5896b12 Anthony PERARD
    return 0;
2146 e5896b12 Anthony PERARD
}
2147 e5896b12 Anthony PERARD
2148 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2149 e5896b12 Anthony PERARD
                          ram_addr_t size)
2150 e5896b12 Anthony PERARD
{
2151 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2152 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2153 e5896b12 Anthony PERARD
        if (client->log_stop) {
2154 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2155 e5896b12 Anthony PERARD
            if (r < 0) {
2156 e5896b12 Anthony PERARD
                return r;
2157 e5896b12 Anthony PERARD
            }
2158 e5896b12 Anthony PERARD
        }
2159 e5896b12 Anthony PERARD
    }
2160 e5896b12 Anthony PERARD
    return 0;
2161 e5896b12 Anthony PERARD
}
2162 e5896b12 Anthony PERARD
2163 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2164 3a7d929e bellard
{
2165 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2166 5579c7f3 pbrook
    void *p;
2167 3a7d929e bellard
2168 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2169 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2170 5579c7f3 pbrook
            + tlb_entry->addend);
2171 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2172 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2173 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2174 3a7d929e bellard
        }
2175 3a7d929e bellard
    }
2176 3a7d929e bellard
}
2177 3a7d929e bellard
2178 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2179 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2180 3a7d929e bellard
{
2181 3a7d929e bellard
    int i;
2182 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2183 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2184 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2185 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2186 cfde4bd9 Isaku Yamahata
    }
2187 3a7d929e bellard
}
2188 3a7d929e bellard
2189 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2190 1ccde1cb bellard
{
2191 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2192 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2193 1ccde1cb bellard
}
2194 1ccde1cb bellard
2195 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2196 0f459d16 pbrook
   so that it is no longer dirty */
2197 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2198 1ccde1cb bellard
{
2199 1ccde1cb bellard
    int i;
2200 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2201 1ccde1cb bellard
2202 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2203 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2204 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2205 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2206 9fa3e853 bellard
}
2207 9fa3e853 bellard
2208 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2209 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2210 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2211 d4c430a8 Paul Brook
                               target_ulong size)
2212 d4c430a8 Paul Brook
{
2213 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2214 d4c430a8 Paul Brook
2215 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2216 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2217 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2218 d4c430a8 Paul Brook
        return;
2219 d4c430a8 Paul Brook
    }
2220 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2221 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2222 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2223 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2224 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2225 d4c430a8 Paul Brook
        mask <<= 1;
2226 d4c430a8 Paul Brook
    }
2227 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2228 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2229 d4c430a8 Paul Brook
}
2230 d4c430a8 Paul Brook
2231 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2232 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2233 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2234 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2235 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2236 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2237 9fa3e853 bellard
{
2238 92e873b9 bellard
    PhysPageDesc *p;
2239 4f2ac237 bellard
    unsigned long pd;
2240 9fa3e853 bellard
    unsigned int index;
2241 4f2ac237 bellard
    target_ulong address;
2242 0f459d16 pbrook
    target_ulong code_address;
2243 355b1943 Paul Brook
    unsigned long addend;
2244 84b7b8e7 bellard
    CPUTLBEntry *te;
2245 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2246 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2247 9fa3e853 bellard
2248 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2249 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2250 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2251 d4c430a8 Paul Brook
    }
2252 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2253 9fa3e853 bellard
    if (!p) {
2254 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2255 9fa3e853 bellard
    } else {
2256 9fa3e853 bellard
        pd = p->phys_offset;
2257 9fa3e853 bellard
    }
2258 9fa3e853 bellard
#if defined(DEBUG_TLB)
2259 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2260 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2261 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2262 9fa3e853 bellard
#endif
2263 9fa3e853 bellard
2264 0f459d16 pbrook
    address = vaddr;
2265 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2266 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2267 0f459d16 pbrook
        address |= TLB_MMIO;
2268 0f459d16 pbrook
    }
2269 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2270 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2271 0f459d16 pbrook
        /* Normal RAM.  */
2272 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2273 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2274 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2275 0f459d16 pbrook
        else
2276 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2277 0f459d16 pbrook
    } else {
2278 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2279 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2280 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2281 0f459d16 pbrook
           and avoid full address decoding in every device.
2282 0f459d16 pbrook
           We can't use the high bits of pd for this because
2283 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2284 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2285 8da3ff18 pbrook
        if (p) {
2286 8da3ff18 pbrook
            iotlb += p->region_offset;
2287 8da3ff18 pbrook
        } else {
2288 8da3ff18 pbrook
            iotlb += paddr;
2289 8da3ff18 pbrook
        }
2290 0f459d16 pbrook
    }
2291 0f459d16 pbrook
2292 0f459d16 pbrook
    code_address = address;
2293 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2294 0f459d16 pbrook
       watchpoint trap routines.  */
2295 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2296 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2297 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2298 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2299 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2300 bf298f83 Jun Koi
                address |= TLB_MMIO;
2301 bf298f83 Jun Koi
                break;
2302 bf298f83 Jun Koi
            }
2303 6658ffb8 pbrook
        }
2304 0f459d16 pbrook
    }
2305 d79acba4 balrog
2306 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2307 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2308 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2309 0f459d16 pbrook
    te->addend = addend - vaddr;
2310 0f459d16 pbrook
    if (prot & PAGE_READ) {
2311 0f459d16 pbrook
        te->addr_read = address;
2312 0f459d16 pbrook
    } else {
2313 0f459d16 pbrook
        te->addr_read = -1;
2314 0f459d16 pbrook
    }
2315 5c751e99 edgar_igl
2316 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2317 0f459d16 pbrook
        te->addr_code = code_address;
2318 0f459d16 pbrook
    } else {
2319 0f459d16 pbrook
        te->addr_code = -1;
2320 0f459d16 pbrook
    }
2321 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2322 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2323 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2324 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2325 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2326 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2327 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2328 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2329 9fa3e853 bellard
        } else {
2330 0f459d16 pbrook
            te->addr_write = address;
2331 9fa3e853 bellard
        }
2332 0f459d16 pbrook
    } else {
2333 0f459d16 pbrook
        te->addr_write = -1;
2334 9fa3e853 bellard
    }
2335 9fa3e853 bellard
}
2336 9fa3e853 bellard
2337 0124311e bellard
#else
2338 0124311e bellard
2339 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2340 0124311e bellard
{
2341 0124311e bellard
}
2342 0124311e bellard
2343 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2344 0124311e bellard
{
2345 0124311e bellard
}
2346 0124311e bellard
2347 edf8e2af Mika Westerberg
/*
2348 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2349 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2350 edf8e2af Mika Westerberg
 */
2351 5cd2c5b6 Richard Henderson
2352 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2353 5cd2c5b6 Richard Henderson
{
2354 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2355 5cd2c5b6 Richard Henderson
    void *priv;
2356 5cd2c5b6 Richard Henderson
    unsigned long start;
2357 5cd2c5b6 Richard Henderson
    int prot;
2358 5cd2c5b6 Richard Henderson
};
2359 5cd2c5b6 Richard Henderson
2360 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2361 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2362 5cd2c5b6 Richard Henderson
{
2363 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2364 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2365 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2366 5cd2c5b6 Richard Henderson
            return rc;
2367 5cd2c5b6 Richard Henderson
        }
2368 5cd2c5b6 Richard Henderson
    }
2369 5cd2c5b6 Richard Henderson
2370 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2371 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2372 5cd2c5b6 Richard Henderson
2373 5cd2c5b6 Richard Henderson
    return 0;
2374 5cd2c5b6 Richard Henderson
}
2375 5cd2c5b6 Richard Henderson
2376 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2377 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2378 5cd2c5b6 Richard Henderson
{
2379 b480d9b7 Paul Brook
    abi_ulong pa;
2380 5cd2c5b6 Richard Henderson
    int i, rc;
2381 5cd2c5b6 Richard Henderson
2382 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2383 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2384 5cd2c5b6 Richard Henderson
    }
2385 5cd2c5b6 Richard Henderson
2386 5cd2c5b6 Richard Henderson
    if (level == 0) {
2387 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2388 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2389 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2390 5cd2c5b6 Richard Henderson
2391 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2392 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2393 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2394 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2395 5cd2c5b6 Richard Henderson
                    return rc;
2396 9fa3e853 bellard
                }
2397 9fa3e853 bellard
            }
2398 5cd2c5b6 Richard Henderson
        }
2399 5cd2c5b6 Richard Henderson
    } else {
2400 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2401 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2402 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2403 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2404 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2405 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2406 5cd2c5b6 Richard Henderson
                return rc;
2407 5cd2c5b6 Richard Henderson
            }
2408 5cd2c5b6 Richard Henderson
        }
2409 5cd2c5b6 Richard Henderson
    }
2410 5cd2c5b6 Richard Henderson
2411 5cd2c5b6 Richard Henderson
    return 0;
2412 5cd2c5b6 Richard Henderson
}
2413 5cd2c5b6 Richard Henderson
2414 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2415 5cd2c5b6 Richard Henderson
{
2416 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2417 5cd2c5b6 Richard Henderson
    unsigned long i;
2418 5cd2c5b6 Richard Henderson
2419 5cd2c5b6 Richard Henderson
    data.fn = fn;
2420 5cd2c5b6 Richard Henderson
    data.priv = priv;
2421 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2422 5cd2c5b6 Richard Henderson
    data.prot = 0;
2423 5cd2c5b6 Richard Henderson
2424 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2425 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2426 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2427 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2428 5cd2c5b6 Richard Henderson
            return rc;
2429 9fa3e853 bellard
        }
2430 33417e70 bellard
    }
2431 5cd2c5b6 Richard Henderson
2432 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2433 edf8e2af Mika Westerberg
}
2434 edf8e2af Mika Westerberg
2435 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2436 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2437 edf8e2af Mika Westerberg
{
2438 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2439 edf8e2af Mika Westerberg
2440 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2441 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2442 edf8e2af Mika Westerberg
        start, end, end - start,
2443 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2444 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2445 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2446 edf8e2af Mika Westerberg
2447 edf8e2af Mika Westerberg
    return (0);
2448 edf8e2af Mika Westerberg
}
2449 edf8e2af Mika Westerberg
2450 edf8e2af Mika Westerberg
/* dump memory mappings */
2451 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2452 edf8e2af Mika Westerberg
{
2453 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2454 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2455 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2456 33417e70 bellard
}
2457 33417e70 bellard
2458 53a5960a pbrook
int page_get_flags(target_ulong address)
2459 33417e70 bellard
{
2460 9fa3e853 bellard
    PageDesc *p;
2461 9fa3e853 bellard
2462 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2463 33417e70 bellard
    if (!p)
2464 9fa3e853 bellard
        return 0;
2465 9fa3e853 bellard
    return p->flags;
2466 9fa3e853 bellard
}
2467 9fa3e853 bellard
2468 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2469 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2470 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2471 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2472 9fa3e853 bellard
{
2473 376a7909 Richard Henderson
    target_ulong addr, len;
2474 376a7909 Richard Henderson
2475 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2476 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2477 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2478 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2479 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2480 376a7909 Richard Henderson
#endif
2481 376a7909 Richard Henderson
    assert(start < end);
2482 9fa3e853 bellard
2483 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2484 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2485 376a7909 Richard Henderson
2486 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2487 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2488 376a7909 Richard Henderson
    }
2489 376a7909 Richard Henderson
2490 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2491 376a7909 Richard Henderson
         len != 0;
2492 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2493 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2494 376a7909 Richard Henderson
2495 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2496 376a7909 Richard Henderson
           the code inside.  */
2497 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2498 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2499 9fa3e853 bellard
            p->first_tb) {
2500 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2501 9fa3e853 bellard
        }
2502 9fa3e853 bellard
        p->flags = flags;
2503 9fa3e853 bellard
    }
2504 33417e70 bellard
}
2505 33417e70 bellard
2506 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2507 3d97b40b ths
{
2508 3d97b40b ths
    PageDesc *p;
2509 3d97b40b ths
    target_ulong end;
2510 3d97b40b ths
    target_ulong addr;
2511 3d97b40b ths
2512 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2513 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2514 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2515 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2516 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2517 376a7909 Richard Henderson
#endif
2518 376a7909 Richard Henderson
2519 3e0650a9 Richard Henderson
    if (len == 0) {
2520 3e0650a9 Richard Henderson
        return 0;
2521 3e0650a9 Richard Henderson
    }
2522 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2523 376a7909 Richard Henderson
        /* We've wrapped around.  */
2524 55f280c9 balrog
        return -1;
2525 376a7909 Richard Henderson
    }
2526 55f280c9 balrog
2527 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2528 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2529 3d97b40b ths
2530 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2531 376a7909 Richard Henderson
         len != 0;
2532 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2533 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2534 3d97b40b ths
        if( !p )
2535 3d97b40b ths
            return -1;
2536 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2537 3d97b40b ths
            return -1;
2538 3d97b40b ths
2539 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2540 3d97b40b ths
            return -1;
2541 dae3270c bellard
        if (flags & PAGE_WRITE) {
2542 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2543 dae3270c bellard
                return -1;
2544 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2545 dae3270c bellard
               contains translated code */
2546 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2547 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2548 dae3270c bellard
                    return -1;
2549 dae3270c bellard
            }
2550 dae3270c bellard
            return 0;
2551 dae3270c bellard
        }
2552 3d97b40b ths
    }
2553 3d97b40b ths
    return 0;
2554 3d97b40b ths
}
2555 3d97b40b ths
2556 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2557 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2558 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2559 9fa3e853 bellard
{
2560 45d679d6 Aurelien Jarno
    unsigned int prot;
2561 45d679d6 Aurelien Jarno
    PageDesc *p;
2562 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2563 9fa3e853 bellard
2564 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2565 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2566 c8a706fe pbrook
       practice it seems to be ok.  */
2567 c8a706fe pbrook
    mmap_lock();
2568 c8a706fe pbrook
2569 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2570 45d679d6 Aurelien Jarno
    if (!p) {
2571 c8a706fe pbrook
        mmap_unlock();
2572 9fa3e853 bellard
        return 0;
2573 c8a706fe pbrook
    }
2574 45d679d6 Aurelien Jarno
2575 9fa3e853 bellard
    /* if the page was really writable, then we change its
2576 9fa3e853 bellard
       protection back to writable */
2577 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2578 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2579 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2580 45d679d6 Aurelien Jarno
2581 45d679d6 Aurelien Jarno
        prot = 0;
2582 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2583 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2584 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2585 45d679d6 Aurelien Jarno
            prot |= p->flags;
2586 45d679d6 Aurelien Jarno
2587 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2588 9fa3e853 bellard
               the corresponding translated code. */
2589 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2590 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2591 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2592 9fa3e853 bellard
#endif
2593 9fa3e853 bellard
        }
2594 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2595 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2596 45d679d6 Aurelien Jarno
2597 45d679d6 Aurelien Jarno
        mmap_unlock();
2598 45d679d6 Aurelien Jarno
        return 1;
2599 9fa3e853 bellard
    }
2600 c8a706fe pbrook
    mmap_unlock();
2601 9fa3e853 bellard
    return 0;
2602 9fa3e853 bellard
}
2603 9fa3e853 bellard
2604 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2605 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2606 1ccde1cb bellard
{
2607 1ccde1cb bellard
}
2608 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2609 9fa3e853 bellard
2610 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2611 8da3ff18 pbrook
2612 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2613 c04b2b78 Paul Brook
typedef struct subpage_t {
2614 c04b2b78 Paul Brook
    target_phys_addr_t base;
2615 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2616 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2617 c04b2b78 Paul Brook
} subpage_t;
2618 c04b2b78 Paul Brook
2619 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2620 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2621 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2623 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2624 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2625 db7b5426 blueswir1
                      need_subpage)                                     \
2626 db7b5426 blueswir1
    do {                                                                \
2627 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2628 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2629 db7b5426 blueswir1
        else {                                                          \
2630 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2631 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2632 db7b5426 blueswir1
                need_subpage = 1;                                       \
2633 db7b5426 blueswir1
        }                                                               \
2634 db7b5426 blueswir1
                                                                        \
2635 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2636 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2637 db7b5426 blueswir1
        else {                                                          \
2638 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2639 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2640 db7b5426 blueswir1
                need_subpage = 1;                                       \
2641 db7b5426 blueswir1
        }                                                               \
2642 db7b5426 blueswir1
    } while (0)
2643 db7b5426 blueswir1
2644 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2645 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2646 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2647 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2648 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2649 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2650 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2651 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2652 0fd542fb Michael S. Tsirkin
void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2653 c227f099 Anthony Liguori
                                         ram_addr_t size,
2654 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2655 0fd542fb Michael S. Tsirkin
                                         ram_addr_t region_offset,
2656 0fd542fb Michael S. Tsirkin
                                         bool log_dirty)
2657 33417e70 bellard
{
2658 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2659 92e873b9 bellard
    PhysPageDesc *p;
2660 9d42037b bellard
    CPUState *env;
2661 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2662 f6405247 Richard Henderson
    subpage_t *subpage;
2663 33417e70 bellard
2664 3b8e6a2d Edgar E. Iglesias
    assert(size);
2665 0fd542fb Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2666 f6f3fbca Michael S. Tsirkin
2667 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2668 67c4d23c pbrook
        region_offset = start_addr;
2669 67c4d23c pbrook
    }
2670 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2671 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2672 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2673 3b8e6a2d Edgar E. Iglesias
2674 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2675 3b8e6a2d Edgar E. Iglesias
    do {
2676 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2678 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2679 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2680 db7b5426 blueswir1
            int need_subpage = 0;
2681 db7b5426 blueswir1
2682 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2683 db7b5426 blueswir1
                          need_subpage);
2684 f6405247 Richard Henderson
            if (need_subpage) {
2685 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2686 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2687 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2688 8da3ff18 pbrook
                                           p->region_offset);
2689 db7b5426 blueswir1
                } else {
2690 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2691 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2692 db7b5426 blueswir1
                }
2693 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2694 8da3ff18 pbrook
                                 region_offset);
2695 8da3ff18 pbrook
                p->region_offset = 0;
2696 db7b5426 blueswir1
            } else {
2697 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2698 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2700 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2701 db7b5426 blueswir1
            }
2702 db7b5426 blueswir1
        } else {
2703 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2704 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2705 8da3ff18 pbrook
            p->region_offset = region_offset;
2706 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2707 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2708 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2709 0e8f0967 pbrook
            } else {
2710 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2711 db7b5426 blueswir1
                int need_subpage = 0;
2712 db7b5426 blueswir1
2713 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2714 db7b5426 blueswir1
                              end_addr2, need_subpage);
2715 db7b5426 blueswir1
2716 f6405247 Richard Henderson
                if (need_subpage) {
2717 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2718 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2719 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2720 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2721 8da3ff18 pbrook
                                     phys_offset, region_offset);
2722 8da3ff18 pbrook
                    p->region_offset = 0;
2723 db7b5426 blueswir1
                }
2724 db7b5426 blueswir1
            }
2725 db7b5426 blueswir1
        }
2726 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2727 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2728 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2729 3b46e624 ths
2730 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2731 9d42037b bellard
       reset the modified entries */
2732 9d42037b bellard
    /* XXX: slow ! */
2733 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2734 9d42037b bellard
        tlb_flush(env, 1);
2735 9d42037b bellard
    }
2736 33417e70 bellard
}
2737 33417e70 bellard
2738 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2739 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2740 ba863458 bellard
{
2741 ba863458 bellard
    PhysPageDesc *p;
2742 ba863458 bellard
2743 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 ba863458 bellard
    if (!p)
2745 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2746 ba863458 bellard
    return p->phys_offset;
2747 ba863458 bellard
}
2748 ba863458 bellard
2749 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2750 f65ed4c1 aliguori
{
2751 f65ed4c1 aliguori
    if (kvm_enabled())
2752 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2753 f65ed4c1 aliguori
}
2754 f65ed4c1 aliguori
2755 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2756 f65ed4c1 aliguori
{
2757 f65ed4c1 aliguori
    if (kvm_enabled())
2758 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2759 f65ed4c1 aliguori
}
2760 f65ed4c1 aliguori
2761 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2762 62a2744c Sheng Yang
{
2763 62a2744c Sheng Yang
    if (kvm_enabled())
2764 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2765 62a2744c Sheng Yang
}
2766 62a2744c Sheng Yang
2767 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2768 c902760f Marcelo Tosatti
2769 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2770 c902760f Marcelo Tosatti
2771 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2772 c902760f Marcelo Tosatti
2773 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2774 c902760f Marcelo Tosatti
{
2775 c902760f Marcelo Tosatti
    struct statfs fs;
2776 c902760f Marcelo Tosatti
    int ret;
2777 c902760f Marcelo Tosatti
2778 c902760f Marcelo Tosatti
    do {
2779 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2780 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2781 c902760f Marcelo Tosatti
2782 c902760f Marcelo Tosatti
    if (ret != 0) {
2783 9742bf26 Yoshiaki Tamura
        perror(path);
2784 9742bf26 Yoshiaki Tamura
        return 0;
2785 c902760f Marcelo Tosatti
    }
2786 c902760f Marcelo Tosatti
2787 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2788 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2789 c902760f Marcelo Tosatti
2790 c902760f Marcelo Tosatti
    return fs.f_bsize;
2791 c902760f Marcelo Tosatti
}
2792 c902760f Marcelo Tosatti
2793 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2794 04b16653 Alex Williamson
                            ram_addr_t memory,
2795 04b16653 Alex Williamson
                            const char *path)
2796 c902760f Marcelo Tosatti
{
2797 c902760f Marcelo Tosatti
    char *filename;
2798 c902760f Marcelo Tosatti
    void *area;
2799 c902760f Marcelo Tosatti
    int fd;
2800 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2801 c902760f Marcelo Tosatti
    int flags;
2802 c902760f Marcelo Tosatti
#endif
2803 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2804 c902760f Marcelo Tosatti
2805 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2806 c902760f Marcelo Tosatti
    if (!hpagesize) {
2807 9742bf26 Yoshiaki Tamura
        return NULL;
2808 c902760f Marcelo Tosatti
    }
2809 c902760f Marcelo Tosatti
2810 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2811 c902760f Marcelo Tosatti
        return NULL;
2812 c902760f Marcelo Tosatti
    }
2813 c902760f Marcelo Tosatti
2814 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2815 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2816 c902760f Marcelo Tosatti
        return NULL;
2817 c902760f Marcelo Tosatti
    }
2818 c902760f Marcelo Tosatti
2819 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2820 9742bf26 Yoshiaki Tamura
        return NULL;
2821 c902760f Marcelo Tosatti
    }
2822 c902760f Marcelo Tosatti
2823 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2824 c902760f Marcelo Tosatti
    if (fd < 0) {
2825 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2826 9742bf26 Yoshiaki Tamura
        free(filename);
2827 9742bf26 Yoshiaki Tamura
        return NULL;
2828 c902760f Marcelo Tosatti
    }
2829 c902760f Marcelo Tosatti
    unlink(filename);
2830 c902760f Marcelo Tosatti
    free(filename);
2831 c902760f Marcelo Tosatti
2832 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2833 c902760f Marcelo Tosatti
2834 c902760f Marcelo Tosatti
    /*
2835 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2836 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2837 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2838 c902760f Marcelo Tosatti
     * mmap will fail.
2839 c902760f Marcelo Tosatti
     */
2840 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2841 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2842 c902760f Marcelo Tosatti
2843 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2844 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2845 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2846 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2847 c902760f Marcelo Tosatti
     */
2848 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2849 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2850 c902760f Marcelo Tosatti
#else
2851 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2852 c902760f Marcelo Tosatti
#endif
2853 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2854 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2855 9742bf26 Yoshiaki Tamura
        close(fd);
2856 9742bf26 Yoshiaki Tamura
        return (NULL);
2857 c902760f Marcelo Tosatti
    }
2858 04b16653 Alex Williamson
    block->fd = fd;
2859 c902760f Marcelo Tosatti
    return area;
2860 c902760f Marcelo Tosatti
}
2861 c902760f Marcelo Tosatti
#endif
2862 c902760f Marcelo Tosatti
2863 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2864 d17b5288 Alex Williamson
{
2865 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2866 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2867 04b16653 Alex Williamson
2868 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2869 04b16653 Alex Williamson
        return 0;
2870 04b16653 Alex Williamson
2871 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2873 04b16653 Alex Williamson
2874 04b16653 Alex Williamson
        end = block->offset + block->length;
2875 04b16653 Alex Williamson
2876 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2877 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2878 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2879 04b16653 Alex Williamson
            }
2880 04b16653 Alex Williamson
        }
2881 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2882 04b16653 Alex Williamson
            offset =  end;
2883 04b16653 Alex Williamson
            mingap = next - end;
2884 04b16653 Alex Williamson
        }
2885 04b16653 Alex Williamson
    }
2886 04b16653 Alex Williamson
    return offset;
2887 04b16653 Alex Williamson
}
2888 04b16653 Alex Williamson
2889 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2890 04b16653 Alex Williamson
{
2891 d17b5288 Alex Williamson
    RAMBlock *block;
2892 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2893 d17b5288 Alex Williamson
2894 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2895 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2896 d17b5288 Alex Williamson
2897 d17b5288 Alex Williamson
    return last;
2898 d17b5288 Alex Williamson
}
2899 d17b5288 Alex Williamson
2900 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2901 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2902 84b89d78 Cam Macdonell
{
2903 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2904 84b89d78 Cam Macdonell
2905 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2906 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2907 84b89d78 Cam Macdonell
2908 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2909 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2910 84b89d78 Cam Macdonell
        if (id) {
2911 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2912 84b89d78 Cam Macdonell
            qemu_free(id);
2913 84b89d78 Cam Macdonell
        }
2914 84b89d78 Cam Macdonell
    }
2915 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2916 84b89d78 Cam Macdonell
2917 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2918 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2919 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2920 84b89d78 Cam Macdonell
                    new_block->idstr);
2921 84b89d78 Cam Macdonell
            abort();
2922 84b89d78 Cam Macdonell
        }
2923 84b89d78 Cam Macdonell
    }
2924 84b89d78 Cam Macdonell
2925 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2926 6977dfe6 Yoshiaki Tamura
    if (host) {
2927 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2928 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2929 6977dfe6 Yoshiaki Tamura
    } else {
2930 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2931 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2932 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2933 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2934 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2935 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2936 6977dfe6 Yoshiaki Tamura
            }
2937 c902760f Marcelo Tosatti
#else
2938 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2939 6977dfe6 Yoshiaki Tamura
            exit(1);
2940 c902760f Marcelo Tosatti
#endif
2941 6977dfe6 Yoshiaki Tamura
        } else {
2942 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2943 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2944 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2945 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2946 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2947 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2948 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2949 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2950 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2951 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2952 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2953 fb8b2735 Alexander Graf
                abort();
2954 fb8b2735 Alexander Graf
            }
2955 6b02494d Alexander Graf
#else
2956 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
2957 432d268c Jun Nakajima
                xen_ram_alloc(new_block->offset, size);
2958 432d268c Jun Nakajima
            } else {
2959 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2960 432d268c Jun Nakajima
            }
2961 6b02494d Alexander Graf
#endif
2962 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2963 6977dfe6 Yoshiaki Tamura
        }
2964 c902760f Marcelo Tosatti
    }
2965 94a6b54f pbrook
    new_block->length = size;
2966 94a6b54f pbrook
2967 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2968 94a6b54f pbrook
2969 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2970 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2971 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2972 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2973 94a6b54f pbrook
2974 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2975 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2976 6f0437e8 Jan Kiszka
2977 94a6b54f pbrook
    return new_block->offset;
2978 94a6b54f pbrook
}
2979 e9a1ab19 bellard
2980 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2981 6977dfe6 Yoshiaki Tamura
{
2982 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2983 6977dfe6 Yoshiaki Tamura
}
2984 6977dfe6 Yoshiaki Tamura
2985 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2986 1f2e98b6 Alex Williamson
{
2987 1f2e98b6 Alex Williamson
    RAMBlock *block;
2988 1f2e98b6 Alex Williamson
2989 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2990 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2991 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2992 1f2e98b6 Alex Williamson
            qemu_free(block);
2993 1f2e98b6 Alex Williamson
            return;
2994 1f2e98b6 Alex Williamson
        }
2995 1f2e98b6 Alex Williamson
    }
2996 1f2e98b6 Alex Williamson
}
2997 1f2e98b6 Alex Williamson
2998 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2999 e9a1ab19 bellard
{
3000 04b16653 Alex Williamson
    RAMBlock *block;
3001 04b16653 Alex Williamson
3002 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3003 04b16653 Alex Williamson
        if (addr == block->offset) {
3004 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
3005 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3006 cd19cfa2 Huang Ying
                ;
3007 cd19cfa2 Huang Ying
            } else if (mem_path) {
3008 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
3009 04b16653 Alex Williamson
                if (block->fd) {
3010 04b16653 Alex Williamson
                    munmap(block->host, block->length);
3011 04b16653 Alex Williamson
                    close(block->fd);
3012 04b16653 Alex Williamson
                } else {
3013 04b16653 Alex Williamson
                    qemu_vfree(block->host);
3014 04b16653 Alex Williamson
                }
3015 fd28aa13 Jan Kiszka
#else
3016 fd28aa13 Jan Kiszka
                abort();
3017 04b16653 Alex Williamson
#endif
3018 04b16653 Alex Williamson
            } else {
3019 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3020 04b16653 Alex Williamson
                munmap(block->host, block->length);
3021 04b16653 Alex Williamson
#else
3022 432d268c Jun Nakajima
                if (xen_mapcache_enabled()) {
3023 432d268c Jun Nakajima
                    qemu_invalidate_entry(block->host);
3024 432d268c Jun Nakajima
                } else {
3025 432d268c Jun Nakajima
                    qemu_vfree(block->host);
3026 432d268c Jun Nakajima
                }
3027 04b16653 Alex Williamson
#endif
3028 04b16653 Alex Williamson
            }
3029 04b16653 Alex Williamson
            qemu_free(block);
3030 04b16653 Alex Williamson
            return;
3031 04b16653 Alex Williamson
        }
3032 04b16653 Alex Williamson
    }
3033 04b16653 Alex Williamson
3034 e9a1ab19 bellard
}
3035 e9a1ab19 bellard
3036 cd19cfa2 Huang Ying
#ifndef _WIN32
3037 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3038 cd19cfa2 Huang Ying
{
3039 cd19cfa2 Huang Ying
    RAMBlock *block;
3040 cd19cfa2 Huang Ying
    ram_addr_t offset;
3041 cd19cfa2 Huang Ying
    int flags;
3042 cd19cfa2 Huang Ying
    void *area, *vaddr;
3043 cd19cfa2 Huang Ying
3044 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3045 cd19cfa2 Huang Ying
        offset = addr - block->offset;
3046 cd19cfa2 Huang Ying
        if (offset < block->length) {
3047 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
3048 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3049 cd19cfa2 Huang Ying
                ;
3050 cd19cfa2 Huang Ying
            } else {
3051 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
3052 cd19cfa2 Huang Ying
                munmap(vaddr, length);
3053 cd19cfa2 Huang Ying
                if (mem_path) {
3054 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
3055 cd19cfa2 Huang Ying
                    if (block->fd) {
3056 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
3057 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3058 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
3059 cd19cfa2 Huang Ying
#else
3060 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
3061 cd19cfa2 Huang Ying
#endif
3062 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
3064 cd19cfa2 Huang Ying
                    } else {
3065 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3066 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3067 cd19cfa2 Huang Ying
                                    flags, -1, 0);
3068 cd19cfa2 Huang Ying
                    }
3069 fd28aa13 Jan Kiszka
#else
3070 fd28aa13 Jan Kiszka
                    abort();
3071 cd19cfa2 Huang Ying
#endif
3072 cd19cfa2 Huang Ying
                } else {
3073 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3074 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
3075 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3076 cd19cfa2 Huang Ying
                                flags, -1, 0);
3077 cd19cfa2 Huang Ying
#else
3078 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3079 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 cd19cfa2 Huang Ying
                                flags, -1, 0);
3081 cd19cfa2 Huang Ying
#endif
3082 cd19cfa2 Huang Ying
                }
3083 cd19cfa2 Huang Ying
                if (area != vaddr) {
3084 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3085 cd19cfa2 Huang Ying
                            length, addr);
3086 cd19cfa2 Huang Ying
                    exit(1);
3087 cd19cfa2 Huang Ying
                }
3088 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3089 cd19cfa2 Huang Ying
            }
3090 cd19cfa2 Huang Ying
            return;
3091 cd19cfa2 Huang Ying
        }
3092 cd19cfa2 Huang Ying
    }
3093 cd19cfa2 Huang Ying
}
3094 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3095 cd19cfa2 Huang Ying
3096 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3097 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3098 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3099 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3100 5579c7f3 pbrook

3101 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3102 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3103 5579c7f3 pbrook
 */
3104 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3105 dc828ca1 pbrook
{
3106 94a6b54f pbrook
    RAMBlock *block;
3107 94a6b54f pbrook
3108 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3109 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3110 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3111 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3112 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3113 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3114 7d82af38 Vincent Palatin
            }
3115 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3116 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3117 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3118 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3119 432d268c Jun Nakajima
                 */
3120 432d268c Jun Nakajima
                if (block->offset == 0) {
3121 712c2b41 Stefano Stabellini
                    return qemu_map_cache(addr, 0, 0);
3122 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3123 6506e4f9 Stefano Stabellini
                    block->host = qemu_map_cache(block->offset, block->length, 1);
3124 432d268c Jun Nakajima
                }
3125 432d268c Jun Nakajima
            }
3126 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3127 f471a17e Alex Williamson
        }
3128 94a6b54f pbrook
    }
3129 f471a17e Alex Williamson
3130 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3131 f471a17e Alex Williamson
    abort();
3132 f471a17e Alex Williamson
3133 f471a17e Alex Williamson
    return NULL;
3134 dc828ca1 pbrook
}
3135 dc828ca1 pbrook
3136 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3137 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3138 b2e0a138 Michael S. Tsirkin
 */
3139 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3140 b2e0a138 Michael S. Tsirkin
{
3141 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3142 b2e0a138 Michael S. Tsirkin
3143 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3144 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3145 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3146 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3147 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3148 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
3149 432d268c Jun Nakajima
                 */
3150 432d268c Jun Nakajima
                if (block->offset == 0) {
3151 712c2b41 Stefano Stabellini
                    return qemu_map_cache(addr, 0, 0);
3152 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3153 6506e4f9 Stefano Stabellini
                    block->host = qemu_map_cache(block->offset, block->length, 1);
3154 432d268c Jun Nakajima
                }
3155 432d268c Jun Nakajima
            }
3156 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3157 b2e0a138 Michael S. Tsirkin
        }
3158 b2e0a138 Michael S. Tsirkin
    }
3159 b2e0a138 Michael S. Tsirkin
3160 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3161 b2e0a138 Michael S. Tsirkin
    abort();
3162 b2e0a138 Michael S. Tsirkin
3163 b2e0a138 Michael S. Tsirkin
    return NULL;
3164 b2e0a138 Michael S. Tsirkin
}
3165 b2e0a138 Michael S. Tsirkin
3166 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3167 38bee5dc Stefano Stabellini
 * but takes a size argument */
3168 38bee5dc Stefano Stabellini
void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
3169 38bee5dc Stefano Stabellini
{
3170 38bee5dc Stefano Stabellini
    if (xen_mapcache_enabled())
3171 38bee5dc Stefano Stabellini
        return qemu_map_cache(addr, *size, 1);
3172 38bee5dc Stefano Stabellini
    else {
3173 38bee5dc Stefano Stabellini
        RAMBlock *block;
3174 38bee5dc Stefano Stabellini
3175 38bee5dc Stefano Stabellini
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3176 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
3177 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
3178 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
3179 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
3180 38bee5dc Stefano Stabellini
            }
3181 38bee5dc Stefano Stabellini
        }
3182 38bee5dc Stefano Stabellini
3183 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3184 38bee5dc Stefano Stabellini
        abort();
3185 38bee5dc Stefano Stabellini
3186 38bee5dc Stefano Stabellini
        *size = 0;
3187 38bee5dc Stefano Stabellini
        return NULL;
3188 38bee5dc Stefano Stabellini
    }
3189 38bee5dc Stefano Stabellini
}
3190 38bee5dc Stefano Stabellini
3191 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
3192 050a0ddf Anthony PERARD
{
3193 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
3194 050a0ddf Anthony PERARD
}
3195 050a0ddf Anthony PERARD
3196 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3197 5579c7f3 pbrook
{
3198 94a6b54f pbrook
    RAMBlock *block;
3199 94a6b54f pbrook
    uint8_t *host = ptr;
3200 94a6b54f pbrook
3201 712c2b41 Stefano Stabellini
    if (xen_mapcache_enabled()) {
3202 712c2b41 Stefano Stabellini
        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3203 712c2b41 Stefano Stabellini
        return 0;
3204 712c2b41 Stefano Stabellini
    }
3205 712c2b41 Stefano Stabellini
3206 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3207 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
3208 432d268c Jun Nakajima
        if (block->host == NULL) {
3209 432d268c Jun Nakajima
            continue;
3210 432d268c Jun Nakajima
        }
3211 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3212 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3213 e890261f Marcelo Tosatti
            return 0;
3214 f471a17e Alex Williamson
        }
3215 94a6b54f pbrook
    }
3216 432d268c Jun Nakajima
3217 e890261f Marcelo Tosatti
    return -1;
3218 e890261f Marcelo Tosatti
}
3219 f471a17e Alex Williamson
3220 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3221 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3222 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3223 e890261f Marcelo Tosatti
{
3224 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3225 f471a17e Alex Williamson
3226 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3227 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3228 e890261f Marcelo Tosatti
        abort();
3229 e890261f Marcelo Tosatti
    }
3230 e890261f Marcelo Tosatti
    return ram_addr;
3231 5579c7f3 pbrook
}
3232 5579c7f3 pbrook
3233 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3234 33417e70 bellard
{
3235 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3236 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3237 67d3b957 pbrook
#endif
3238 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3239 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3240 e18231a3 blueswir1
#endif
3241 e18231a3 blueswir1
    return 0;
3242 e18231a3 blueswir1
}
3243 e18231a3 blueswir1
3244 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3245 e18231a3 blueswir1
{
3246 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3247 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3248 e18231a3 blueswir1
#endif
3249 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3250 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3251 e18231a3 blueswir1
#endif
3252 e18231a3 blueswir1
    return 0;
3253 e18231a3 blueswir1
}
3254 e18231a3 blueswir1
3255 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3256 e18231a3 blueswir1
{
3257 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3258 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3259 e18231a3 blueswir1
#endif
3260 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3261 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3262 b4f0a316 blueswir1
#endif
3263 33417e70 bellard
    return 0;
3264 33417e70 bellard
}
3265 33417e70 bellard
3266 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3267 33417e70 bellard
{
3268 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3269 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3270 67d3b957 pbrook
#endif
3271 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3272 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3273 e18231a3 blueswir1
#endif
3274 e18231a3 blueswir1
}
3275 e18231a3 blueswir1
3276 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3277 e18231a3 blueswir1
{
3278 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3279 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3280 e18231a3 blueswir1
#endif
3281 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3282 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3283 e18231a3 blueswir1
#endif
3284 e18231a3 blueswir1
}
3285 e18231a3 blueswir1
3286 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3287 e18231a3 blueswir1
{
3288 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3289 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3290 e18231a3 blueswir1
#endif
3291 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3292 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3293 b4f0a316 blueswir1
#endif
3294 33417e70 bellard
}
3295 33417e70 bellard
3296 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3297 33417e70 bellard
    unassigned_mem_readb,
3298 e18231a3 blueswir1
    unassigned_mem_readw,
3299 e18231a3 blueswir1
    unassigned_mem_readl,
3300 33417e70 bellard
};
3301 33417e70 bellard
3302 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3303 33417e70 bellard
    unassigned_mem_writeb,
3304 e18231a3 blueswir1
    unassigned_mem_writew,
3305 e18231a3 blueswir1
    unassigned_mem_writel,
3306 33417e70 bellard
};
3307 33417e70 bellard
3308 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3309 0f459d16 pbrook
                                uint32_t val)
3310 9fa3e853 bellard
{
3311 3a7d929e bellard
    int dirty_flags;
3312 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3313 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3314 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3315 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3316 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3317 9fa3e853 bellard
#endif
3318 3a7d929e bellard
    }
3319 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3320 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3321 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3322 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3323 f23db169 bellard
       flushed */
3324 f23db169 bellard
    if (dirty_flags == 0xff)
3325 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3326 9fa3e853 bellard
}
3327 9fa3e853 bellard
3328 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3329 0f459d16 pbrook
                                uint32_t val)
3330 9fa3e853 bellard
{
3331 3a7d929e bellard
    int dirty_flags;
3332 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3333 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3334 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3335 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3336 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3337 9fa3e853 bellard
#endif
3338 3a7d929e bellard
    }
3339 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3340 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3341 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3342 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3343 f23db169 bellard
       flushed */
3344 f23db169 bellard
    if (dirty_flags == 0xff)
3345 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3346 9fa3e853 bellard
}
3347 9fa3e853 bellard
3348 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3349 0f459d16 pbrook
                                uint32_t val)
3350 9fa3e853 bellard
{
3351 3a7d929e bellard
    int dirty_flags;
3352 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3353 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3354 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3355 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3356 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3357 9fa3e853 bellard
#endif
3358 3a7d929e bellard
    }
3359 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3360 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3361 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3362 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3363 f23db169 bellard
       flushed */
3364 f23db169 bellard
    if (dirty_flags == 0xff)
3365 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3366 9fa3e853 bellard
}
3367 9fa3e853 bellard
3368 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3369 9fa3e853 bellard
    NULL, /* never used */
3370 9fa3e853 bellard
    NULL, /* never used */
3371 9fa3e853 bellard
    NULL, /* never used */
3372 9fa3e853 bellard
};
3373 9fa3e853 bellard
3374 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3375 1ccde1cb bellard
    notdirty_mem_writeb,
3376 1ccde1cb bellard
    notdirty_mem_writew,
3377 1ccde1cb bellard
    notdirty_mem_writel,
3378 1ccde1cb bellard
};
3379 1ccde1cb bellard
3380 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3381 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3382 0f459d16 pbrook
{
3383 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3384 06d55cc1 aliguori
    target_ulong pc, cs_base;
3385 06d55cc1 aliguori
    TranslationBlock *tb;
3386 0f459d16 pbrook
    target_ulong vaddr;
3387 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3388 06d55cc1 aliguori
    int cpu_flags;
3389 0f459d16 pbrook
3390 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3391 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3392 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3393 06d55cc1 aliguori
         * current instruction. */
3394 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3395 06d55cc1 aliguori
        return;
3396 06d55cc1 aliguori
    }
3397 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3398 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3399 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3400 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3401 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3402 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3403 6e140f28 aliguori
                env->watchpoint_hit = wp;
3404 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3405 6e140f28 aliguori
                if (!tb) {
3406 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3407 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3408 6e140f28 aliguori
                }
3409 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3410 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3411 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3412 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3413 6e140f28 aliguori
                } else {
3414 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3415 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3416 6e140f28 aliguori
                }
3417 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3418 06d55cc1 aliguori
            }
3419 6e140f28 aliguori
        } else {
3420 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3421 0f459d16 pbrook
        }
3422 0f459d16 pbrook
    }
3423 0f459d16 pbrook
}
3424 0f459d16 pbrook
3425 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3426 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3427 6658ffb8 pbrook
   phys routines.  */
3428 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3429 6658ffb8 pbrook
{
3430 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3431 6658ffb8 pbrook
    return ldub_phys(addr);
3432 6658ffb8 pbrook
}
3433 6658ffb8 pbrook
3434 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3435 6658ffb8 pbrook
{
3436 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3437 6658ffb8 pbrook
    return lduw_phys(addr);
3438 6658ffb8 pbrook
}
3439 6658ffb8 pbrook
3440 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3441 6658ffb8 pbrook
{
3442 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3443 6658ffb8 pbrook
    return ldl_phys(addr);
3444 6658ffb8 pbrook
}
3445 6658ffb8 pbrook
3446 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3447 6658ffb8 pbrook
                             uint32_t val)
3448 6658ffb8 pbrook
{
3449 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3450 6658ffb8 pbrook
    stb_phys(addr, val);
3451 6658ffb8 pbrook
}
3452 6658ffb8 pbrook
3453 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3454 6658ffb8 pbrook
                             uint32_t val)
3455 6658ffb8 pbrook
{
3456 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3457 6658ffb8 pbrook
    stw_phys(addr, val);
3458 6658ffb8 pbrook
}
3459 6658ffb8 pbrook
3460 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3461 6658ffb8 pbrook
                             uint32_t val)
3462 6658ffb8 pbrook
{
3463 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3464 6658ffb8 pbrook
    stl_phys(addr, val);
3465 6658ffb8 pbrook
}
3466 6658ffb8 pbrook
3467 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3468 6658ffb8 pbrook
    watch_mem_readb,
3469 6658ffb8 pbrook
    watch_mem_readw,
3470 6658ffb8 pbrook
    watch_mem_readl,
3471 6658ffb8 pbrook
};
3472 6658ffb8 pbrook
3473 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3474 6658ffb8 pbrook
    watch_mem_writeb,
3475 6658ffb8 pbrook
    watch_mem_writew,
3476 6658ffb8 pbrook
    watch_mem_writel,
3477 6658ffb8 pbrook
};
3478 6658ffb8 pbrook
3479 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3480 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3481 f6405247 Richard Henderson
                                        unsigned int len)
3482 db7b5426 blueswir1
{
3483 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3484 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3485 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3486 db7b5426 blueswir1
           mmio, len, addr, idx);
3487 db7b5426 blueswir1
#endif
3488 db7b5426 blueswir1
3489 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3490 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3491 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3492 db7b5426 blueswir1
}
3493 db7b5426 blueswir1
3494 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3495 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3496 db7b5426 blueswir1
{
3497 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3498 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3499 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3500 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3501 db7b5426 blueswir1
#endif
3502 f6405247 Richard Henderson
3503 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3504 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3505 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3506 db7b5426 blueswir1
}
3507 db7b5426 blueswir1
3508 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3509 db7b5426 blueswir1
{
3510 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3511 db7b5426 blueswir1
}
3512 db7b5426 blueswir1
3513 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3514 db7b5426 blueswir1
                            uint32_t value)
3515 db7b5426 blueswir1
{
3516 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3517 db7b5426 blueswir1
}
3518 db7b5426 blueswir1
3519 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3520 db7b5426 blueswir1
{
3521 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3522 db7b5426 blueswir1
}
3523 db7b5426 blueswir1
3524 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3525 db7b5426 blueswir1
                            uint32_t value)
3526 db7b5426 blueswir1
{
3527 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3528 db7b5426 blueswir1
}
3529 db7b5426 blueswir1
3530 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3531 db7b5426 blueswir1
{
3532 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3533 db7b5426 blueswir1
}
3534 db7b5426 blueswir1
3535 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3536 f6405247 Richard Henderson
                            uint32_t value)
3537 db7b5426 blueswir1
{
3538 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3539 db7b5426 blueswir1
}
3540 db7b5426 blueswir1
3541 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3542 db7b5426 blueswir1
    &subpage_readb,
3543 db7b5426 blueswir1
    &subpage_readw,
3544 db7b5426 blueswir1
    &subpage_readl,
3545 db7b5426 blueswir1
};
3546 db7b5426 blueswir1
3547 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3548 db7b5426 blueswir1
    &subpage_writeb,
3549 db7b5426 blueswir1
    &subpage_writew,
3550 db7b5426 blueswir1
    &subpage_writel,
3551 db7b5426 blueswir1
};
3552 db7b5426 blueswir1
3553 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3554 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3555 db7b5426 blueswir1
{
3556 db7b5426 blueswir1
    int idx, eidx;
3557 db7b5426 blueswir1
3558 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3559 db7b5426 blueswir1
        return -1;
3560 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3561 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3562 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3563 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3564 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3565 db7b5426 blueswir1
#endif
3566 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3567 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3568 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3569 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3570 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3571 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3572 db7b5426 blueswir1
    }
3573 db7b5426 blueswir1
3574 db7b5426 blueswir1
    return 0;
3575 db7b5426 blueswir1
}
3576 db7b5426 blueswir1
3577 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3578 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3579 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3580 db7b5426 blueswir1
{
3581 c227f099 Anthony Liguori
    subpage_t *mmio;
3582 db7b5426 blueswir1
    int subpage_memory;
3583 db7b5426 blueswir1
3584 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3585 1eec614b aliguori
3586 1eec614b aliguori
    mmio->base = base;
3587 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3588 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3589 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3590 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3591 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3592 db7b5426 blueswir1
#endif
3593 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3594 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3595 db7b5426 blueswir1
3596 db7b5426 blueswir1
    return mmio;
3597 db7b5426 blueswir1
}
3598 db7b5426 blueswir1
3599 88715657 aliguori
static int get_free_io_mem_idx(void)
3600 88715657 aliguori
{
3601 88715657 aliguori
    int i;
3602 88715657 aliguori
3603 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3604 88715657 aliguori
        if (!io_mem_used[i]) {
3605 88715657 aliguori
            io_mem_used[i] = 1;
3606 88715657 aliguori
            return i;
3607 88715657 aliguori
        }
3608 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3609 88715657 aliguori
    return -1;
3610 88715657 aliguori
}
3611 88715657 aliguori
3612 dd310534 Alexander Graf
/*
3613 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3614 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3615 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3616 dd310534 Alexander Graf
 *
3617 dd310534 Alexander Graf
 * CPU          Device           swap?
3618 dd310534 Alexander Graf
 *
3619 dd310534 Alexander Graf
 * little       little           no
3620 dd310534 Alexander Graf
 * little       big              yes
3621 dd310534 Alexander Graf
 * big          little           yes
3622 dd310534 Alexander Graf
 * big          big              no
3623 dd310534 Alexander Graf
 */
3624 dd310534 Alexander Graf
3625 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3626 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3627 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3628 dd310534 Alexander Graf
    void *opaque;
3629 dd310534 Alexander Graf
} SwapEndianContainer;
3630 dd310534 Alexander Graf
3631 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3632 dd310534 Alexander Graf
{
3633 dd310534 Alexander Graf
    uint32_t val;
3634 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3635 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3636 dd310534 Alexander Graf
    return val;
3637 dd310534 Alexander Graf
}
3638 dd310534 Alexander Graf
3639 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3640 dd310534 Alexander Graf
{
3641 dd310534 Alexander Graf
    uint32_t val;
3642 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3643 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3644 dd310534 Alexander Graf
    return val;
3645 dd310534 Alexander Graf
}
3646 dd310534 Alexander Graf
3647 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3648 dd310534 Alexander Graf
{
3649 dd310534 Alexander Graf
    uint32_t val;
3650 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3651 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3652 dd310534 Alexander Graf
    return val;
3653 dd310534 Alexander Graf
}
3654 dd310534 Alexander Graf
3655 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3656 dd310534 Alexander Graf
    swapendian_mem_readb,
3657 dd310534 Alexander Graf
    swapendian_mem_readw,
3658 dd310534 Alexander Graf
    swapendian_mem_readl
3659 dd310534 Alexander Graf
};
3660 dd310534 Alexander Graf
3661 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3662 dd310534 Alexander Graf
                                  uint32_t val)
3663 dd310534 Alexander Graf
{
3664 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3665 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3666 dd310534 Alexander Graf
}
3667 dd310534 Alexander Graf
3668 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3669 dd310534 Alexander Graf
                                  uint32_t val)
3670 dd310534 Alexander Graf
{
3671 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3672 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3673 dd310534 Alexander Graf
}
3674 dd310534 Alexander Graf
3675 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3676 dd310534 Alexander Graf
                                  uint32_t val)
3677 dd310534 Alexander Graf
{
3678 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3679 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3680 dd310534 Alexander Graf
}
3681 dd310534 Alexander Graf
3682 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3683 dd310534 Alexander Graf
    swapendian_mem_writeb,
3684 dd310534 Alexander Graf
    swapendian_mem_writew,
3685 dd310534 Alexander Graf
    swapendian_mem_writel
3686 dd310534 Alexander Graf
};
3687 dd310534 Alexander Graf
3688 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3689 dd310534 Alexander Graf
{
3690 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3691 dd310534 Alexander Graf
    int i;
3692 dd310534 Alexander Graf
3693 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3694 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3695 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3696 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3697 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3698 dd310534 Alexander Graf
3699 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3700 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3701 dd310534 Alexander Graf
    }
3702 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3703 dd310534 Alexander Graf
}
3704 dd310534 Alexander Graf
3705 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3706 dd310534 Alexander Graf
{
3707 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3708 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3709 dd310534 Alexander Graf
    }
3710 dd310534 Alexander Graf
}
3711 dd310534 Alexander Graf
3712 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3713 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3714 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3715 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3716 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3717 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3718 4254fab8 blueswir1
   returned if error. */
3719 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3720 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3721 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3722 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3723 33417e70 bellard
{
3724 3cab721d Richard Henderson
    int i;
3725 3cab721d Richard Henderson
3726 33417e70 bellard
    if (io_index <= 0) {
3727 88715657 aliguori
        io_index = get_free_io_mem_idx();
3728 88715657 aliguori
        if (io_index == -1)
3729 88715657 aliguori
            return io_index;
3730 33417e70 bellard
    } else {
3731 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3732 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3733 33417e70 bellard
            return -1;
3734 33417e70 bellard
    }
3735 b5ff1b31 bellard
3736 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3737 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3738 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3739 3cab721d Richard Henderson
    }
3740 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3741 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3742 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3743 3cab721d Richard Henderson
    }
3744 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3745 f6405247 Richard Henderson
3746 dd310534 Alexander Graf
    switch (endian) {
3747 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3748 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3749 dd310534 Alexander Graf
        swapendian_init(io_index);
3750 dd310534 Alexander Graf
#endif
3751 dd310534 Alexander Graf
        break;
3752 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3753 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3754 dd310534 Alexander Graf
        swapendian_init(io_index);
3755 dd310534 Alexander Graf
#endif
3756 dd310534 Alexander Graf
        break;
3757 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3758 dd310534 Alexander Graf
    default:
3759 dd310534 Alexander Graf
        break;
3760 dd310534 Alexander Graf
    }
3761 dd310534 Alexander Graf
3762 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3763 33417e70 bellard
}
3764 61382a50 bellard
3765 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3766 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3767 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3768 1eed09cb Avi Kivity
{
3769 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3770 1eed09cb Avi Kivity
}
3771 1eed09cb Avi Kivity
3772 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3773 88715657 aliguori
{
3774 88715657 aliguori
    int i;
3775 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3776 88715657 aliguori
3777 dd310534 Alexander Graf
    swapendian_del(io_index);
3778 dd310534 Alexander Graf
3779 88715657 aliguori
    for (i=0;i < 3; i++) {
3780 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3781 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3782 88715657 aliguori
    }
3783 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3784 88715657 aliguori
    io_mem_used[io_index] = 0;
3785 88715657 aliguori
}
3786 88715657 aliguori
3787 e9179ce1 Avi Kivity
static void io_mem_init(void)
3788 e9179ce1 Avi Kivity
{
3789 e9179ce1 Avi Kivity
    int i;
3790 e9179ce1 Avi Kivity
3791 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3792 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3793 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3794 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3795 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3796 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3797 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3798 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3799 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3800 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3801 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3802 e9179ce1 Avi Kivity
3803 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3804 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3805 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3806 e9179ce1 Avi Kivity
}
3807 e9179ce1 Avi Kivity
3808 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3809 e2eef170 pbrook
3810 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3811 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3812 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3813 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3814 13eb76e0 bellard
{
3815 13eb76e0 bellard
    int l, flags;
3816 13eb76e0 bellard
    target_ulong page;
3817 53a5960a pbrook
    void * p;
3818 13eb76e0 bellard
3819 13eb76e0 bellard
    while (len > 0) {
3820 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3821 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3822 13eb76e0 bellard
        if (l > len)
3823 13eb76e0 bellard
            l = len;
3824 13eb76e0 bellard
        flags = page_get_flags(page);
3825 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3826 a68fe89c Paul Brook
            return -1;
3827 13eb76e0 bellard
        if (is_write) {
3828 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3829 a68fe89c Paul Brook
                return -1;
3830 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3831 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3832 a68fe89c Paul Brook
                return -1;
3833 72fb7daa aurel32
            memcpy(p, buf, l);
3834 72fb7daa aurel32
            unlock_user(p, addr, l);
3835 13eb76e0 bellard
        } else {
3836 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3837 a68fe89c Paul Brook
                return -1;
3838 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3839 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3840 a68fe89c Paul Brook
                return -1;
3841 72fb7daa aurel32
            memcpy(buf, p, l);
3842 5b257578 aurel32
            unlock_user(p, addr, 0);
3843 13eb76e0 bellard
        }
3844 13eb76e0 bellard
        len -= l;
3845 13eb76e0 bellard
        buf += l;
3846 13eb76e0 bellard
        addr += l;
3847 13eb76e0 bellard
    }
3848 a68fe89c Paul Brook
    return 0;
3849 13eb76e0 bellard
}
3850 8df1cd07 bellard
3851 13eb76e0 bellard
#else
3852 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3853 13eb76e0 bellard
                            int len, int is_write)
3854 13eb76e0 bellard
{
3855 13eb76e0 bellard
    int l, io_index;
3856 13eb76e0 bellard
    uint8_t *ptr;
3857 13eb76e0 bellard
    uint32_t val;
3858 c227f099 Anthony Liguori
    target_phys_addr_t page;
3859 2e12669a bellard
    unsigned long pd;
3860 92e873b9 bellard
    PhysPageDesc *p;
3861 3b46e624 ths
3862 13eb76e0 bellard
    while (len > 0) {
3863 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3864 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3865 13eb76e0 bellard
        if (l > len)
3866 13eb76e0 bellard
            l = len;
3867 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3868 13eb76e0 bellard
        if (!p) {
3869 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3870 13eb76e0 bellard
        } else {
3871 13eb76e0 bellard
            pd = p->phys_offset;
3872 13eb76e0 bellard
        }
3873 3b46e624 ths
3874 13eb76e0 bellard
        if (is_write) {
3875 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3876 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3877 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3878 8da3ff18 pbrook
                if (p)
3879 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3880 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3881 6a00d601 bellard
                   potential bugs */
3882 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3883 1c213d19 bellard
                    /* 32 bit write access */
3884 c27004ec bellard
                    val = ldl_p(buf);
3885 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3886 13eb76e0 bellard
                    l = 4;
3887 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3888 1c213d19 bellard
                    /* 16 bit write access */
3889 c27004ec bellard
                    val = lduw_p(buf);
3890 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3891 13eb76e0 bellard
                    l = 2;
3892 13eb76e0 bellard
                } else {
3893 1c213d19 bellard
                    /* 8 bit write access */
3894 c27004ec bellard
                    val = ldub_p(buf);
3895 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3896 13eb76e0 bellard
                    l = 1;
3897 13eb76e0 bellard
                }
3898 13eb76e0 bellard
            } else {
3899 b448f2f3 bellard
                unsigned long addr1;
3900 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3901 13eb76e0 bellard
                /* RAM case */
3902 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3903 13eb76e0 bellard
                memcpy(ptr, buf, l);
3904 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3905 3a7d929e bellard
                    /* invalidate code */
3906 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3907 3a7d929e bellard
                    /* set dirty bit */
3908 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3909 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3910 3a7d929e bellard
                }
3911 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3912 13eb76e0 bellard
            }
3913 13eb76e0 bellard
        } else {
3914 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3915 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3916 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3917 13eb76e0 bellard
                /* I/O case */
3918 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3919 8da3ff18 pbrook
                if (p)
3920 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3921 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3922 13eb76e0 bellard
                    /* 32 bit read access */
3923 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3924 c27004ec bellard
                    stl_p(buf, val);
3925 13eb76e0 bellard
                    l = 4;
3926 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3927 13eb76e0 bellard
                    /* 16 bit read access */
3928 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3929 c27004ec bellard
                    stw_p(buf, val);
3930 13eb76e0 bellard
                    l = 2;
3931 13eb76e0 bellard
                } else {
3932 1c213d19 bellard
                    /* 8 bit read access */
3933 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3934 c27004ec bellard
                    stb_p(buf, val);
3935 13eb76e0 bellard
                    l = 1;
3936 13eb76e0 bellard
                }
3937 13eb76e0 bellard
            } else {
3938 13eb76e0 bellard
                /* RAM case */
3939 050a0ddf Anthony PERARD
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3940 050a0ddf Anthony PERARD
                memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3941 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3942 13eb76e0 bellard
            }
3943 13eb76e0 bellard
        }
3944 13eb76e0 bellard
        len -= l;
3945 13eb76e0 bellard
        buf += l;
3946 13eb76e0 bellard
        addr += l;
3947 13eb76e0 bellard
    }
3948 13eb76e0 bellard
}
3949 8df1cd07 bellard
3950 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3951 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3952 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3953 d0ecd2aa bellard
{
3954 d0ecd2aa bellard
    int l;
3955 d0ecd2aa bellard
    uint8_t *ptr;
3956 c227f099 Anthony Liguori
    target_phys_addr_t page;
3957 d0ecd2aa bellard
    unsigned long pd;
3958 d0ecd2aa bellard
    PhysPageDesc *p;
3959 3b46e624 ths
3960 d0ecd2aa bellard
    while (len > 0) {
3961 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3962 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3963 d0ecd2aa bellard
        if (l > len)
3964 d0ecd2aa bellard
            l = len;
3965 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3966 d0ecd2aa bellard
        if (!p) {
3967 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3968 d0ecd2aa bellard
        } else {
3969 d0ecd2aa bellard
            pd = p->phys_offset;
3970 d0ecd2aa bellard
        }
3971 3b46e624 ths
3972 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3973 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3974 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3975 d0ecd2aa bellard
            /* do nothing */
3976 d0ecd2aa bellard
        } else {
3977 d0ecd2aa bellard
            unsigned long addr1;
3978 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3979 d0ecd2aa bellard
            /* ROM/RAM case */
3980 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3981 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3982 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3983 d0ecd2aa bellard
        }
3984 d0ecd2aa bellard
        len -= l;
3985 d0ecd2aa bellard
        buf += l;
3986 d0ecd2aa bellard
        addr += l;
3987 d0ecd2aa bellard
    }
3988 d0ecd2aa bellard
}
3989 d0ecd2aa bellard
3990 6d16c2f8 aliguori
typedef struct {
3991 6d16c2f8 aliguori
    void *buffer;
3992 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3993 c227f099 Anthony Liguori
    target_phys_addr_t len;
3994 6d16c2f8 aliguori
} BounceBuffer;
3995 6d16c2f8 aliguori
3996 6d16c2f8 aliguori
static BounceBuffer bounce;
3997 6d16c2f8 aliguori
3998 ba223c29 aliguori
typedef struct MapClient {
3999 ba223c29 aliguori
    void *opaque;
4000 ba223c29 aliguori
    void (*callback)(void *opaque);
4001 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
4002 ba223c29 aliguori
} MapClient;
4003 ba223c29 aliguori
4004 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
4005 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
4006 ba223c29 aliguori
4007 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4008 ba223c29 aliguori
{
4009 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
4010 ba223c29 aliguori
4011 ba223c29 aliguori
    client->opaque = opaque;
4012 ba223c29 aliguori
    client->callback = callback;
4013 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
4014 ba223c29 aliguori
    return client;
4015 ba223c29 aliguori
}
4016 ba223c29 aliguori
4017 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
4018 ba223c29 aliguori
{
4019 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
4020 ba223c29 aliguori
4021 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
4022 34d5e948 Isaku Yamahata
    qemu_free(client);
4023 ba223c29 aliguori
}
4024 ba223c29 aliguori
4025 ba223c29 aliguori
static void cpu_notify_map_clients(void)
4026 ba223c29 aliguori
{
4027 ba223c29 aliguori
    MapClient *client;
4028 ba223c29 aliguori
4029 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
4030 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
4031 ba223c29 aliguori
        client->callback(client->opaque);
4032 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
4033 ba223c29 aliguori
    }
4034 ba223c29 aliguori
}
4035 ba223c29 aliguori
4036 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
4037 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
4038 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
4039 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
4040 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
4041 ba223c29 aliguori
 * likely to succeed.
4042 6d16c2f8 aliguori
 */
4043 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
4044 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
4045 6d16c2f8 aliguori
                              int is_write)
4046 6d16c2f8 aliguori
{
4047 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
4048 38bee5dc Stefano Stabellini
    target_phys_addr_t todo = 0;
4049 6d16c2f8 aliguori
    int l;
4050 c227f099 Anthony Liguori
    target_phys_addr_t page;
4051 6d16c2f8 aliguori
    unsigned long pd;
4052 6d16c2f8 aliguori
    PhysPageDesc *p;
4053 38bee5dc Stefano Stabellini
    target_phys_addr_t addr1 = addr;
4054 6d16c2f8 aliguori
4055 6d16c2f8 aliguori
    while (len > 0) {
4056 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
4057 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
4058 6d16c2f8 aliguori
        if (l > len)
4059 6d16c2f8 aliguori
            l = len;
4060 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
4061 6d16c2f8 aliguori
        if (!p) {
4062 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
4063 6d16c2f8 aliguori
        } else {
4064 6d16c2f8 aliguori
            pd = p->phys_offset;
4065 6d16c2f8 aliguori
        }
4066 6d16c2f8 aliguori
4067 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4068 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
4069 6d16c2f8 aliguori
                break;
4070 6d16c2f8 aliguori
            }
4071 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4072 6d16c2f8 aliguori
            bounce.addr = addr;
4073 6d16c2f8 aliguori
            bounce.len = l;
4074 6d16c2f8 aliguori
            if (!is_write) {
4075 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
4076 6d16c2f8 aliguori
            }
4077 38bee5dc Stefano Stabellini
4078 38bee5dc Stefano Stabellini
            *plen = l;
4079 38bee5dc Stefano Stabellini
            return bounce.buffer;
4080 6d16c2f8 aliguori
        }
4081 6d16c2f8 aliguori
4082 6d16c2f8 aliguori
        len -= l;
4083 6d16c2f8 aliguori
        addr += l;
4084 38bee5dc Stefano Stabellini
        todo += l;
4085 6d16c2f8 aliguori
    }
4086 38bee5dc Stefano Stabellini
    *plen = todo;
4087 38bee5dc Stefano Stabellini
    return qemu_ram_ptr_length(addr1, plen);
4088 6d16c2f8 aliguori
}
4089 6d16c2f8 aliguori
4090 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4091 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
4092 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
4093 6d16c2f8 aliguori
 */
4094 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4095 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
4096 6d16c2f8 aliguori
{
4097 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
4098 6d16c2f8 aliguori
        if (is_write) {
4099 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4100 6d16c2f8 aliguori
            while (access_len) {
4101 6d16c2f8 aliguori
                unsigned l;
4102 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
4103 6d16c2f8 aliguori
                if (l > access_len)
4104 6d16c2f8 aliguori
                    l = access_len;
4105 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
4106 6d16c2f8 aliguori
                    /* invalidate code */
4107 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4108 6d16c2f8 aliguori
                    /* set dirty bit */
4109 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
4110 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
4111 6d16c2f8 aliguori
                }
4112 6d16c2f8 aliguori
                addr1 += l;
4113 6d16c2f8 aliguori
                access_len -= l;
4114 6d16c2f8 aliguori
            }
4115 6d16c2f8 aliguori
        }
4116 050a0ddf Anthony PERARD
        if (xen_mapcache_enabled()) {
4117 712c2b41 Stefano Stabellini
            qemu_invalidate_entry(buffer);
4118 050a0ddf Anthony PERARD
        }
4119 6d16c2f8 aliguori
        return;
4120 6d16c2f8 aliguori
    }
4121 6d16c2f8 aliguori
    if (is_write) {
4122 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4123 6d16c2f8 aliguori
    }
4124 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
4125 6d16c2f8 aliguori
    bounce.buffer = NULL;
4126 ba223c29 aliguori
    cpu_notify_map_clients();
4127 6d16c2f8 aliguori
}
4128 d0ecd2aa bellard
4129 8df1cd07 bellard
/* warning: addr must be aligned */
4130 1e78bcc1 Alexander Graf
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4131 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
4132 8df1cd07 bellard
{
4133 8df1cd07 bellard
    int io_index;
4134 8df1cd07 bellard
    uint8_t *ptr;
4135 8df1cd07 bellard
    uint32_t val;
4136 8df1cd07 bellard
    unsigned long pd;
4137 8df1cd07 bellard
    PhysPageDesc *p;
4138 8df1cd07 bellard
4139 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4140 8df1cd07 bellard
    if (!p) {
4141 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4142 8df1cd07 bellard
    } else {
4143 8df1cd07 bellard
        pd = p->phys_offset;
4144 8df1cd07 bellard
    }
4145 3b46e624 ths
4146 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4147 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4148 8df1cd07 bellard
        /* I/O case */
4149 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4150 8da3ff18 pbrook
        if (p)
4151 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4152 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4153 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4154 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4155 1e78bcc1 Alexander Graf
            val = bswap32(val);
4156 1e78bcc1 Alexander Graf
        }
4157 1e78bcc1 Alexander Graf
#else
4158 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4159 1e78bcc1 Alexander Graf
            val = bswap32(val);
4160 1e78bcc1 Alexander Graf
        }
4161 1e78bcc1 Alexander Graf
#endif
4162 8df1cd07 bellard
    } else {
4163 8df1cd07 bellard
        /* RAM case */
4164 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4165 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4166 1e78bcc1 Alexander Graf
        switch (endian) {
4167 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4168 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
4169 1e78bcc1 Alexander Graf
            break;
4170 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4171 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
4172 1e78bcc1 Alexander Graf
            break;
4173 1e78bcc1 Alexander Graf
        default:
4174 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
4175 1e78bcc1 Alexander Graf
            break;
4176 1e78bcc1 Alexander Graf
        }
4177 8df1cd07 bellard
    }
4178 8df1cd07 bellard
    return val;
4179 8df1cd07 bellard
}
4180 8df1cd07 bellard
4181 1e78bcc1 Alexander Graf
uint32_t ldl_phys(target_phys_addr_t addr)
4182 1e78bcc1 Alexander Graf
{
4183 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4184 1e78bcc1 Alexander Graf
}
4185 1e78bcc1 Alexander Graf
4186 1e78bcc1 Alexander Graf
uint32_t ldl_le_phys(target_phys_addr_t addr)
4187 1e78bcc1 Alexander Graf
{
4188 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4189 1e78bcc1 Alexander Graf
}
4190 1e78bcc1 Alexander Graf
4191 1e78bcc1 Alexander Graf
uint32_t ldl_be_phys(target_phys_addr_t addr)
4192 1e78bcc1 Alexander Graf
{
4193 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4194 1e78bcc1 Alexander Graf
}
4195 1e78bcc1 Alexander Graf
4196 84b7b8e7 bellard
/* warning: addr must be aligned */
4197 1e78bcc1 Alexander Graf
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4198 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
4199 84b7b8e7 bellard
{
4200 84b7b8e7 bellard
    int io_index;
4201 84b7b8e7 bellard
    uint8_t *ptr;
4202 84b7b8e7 bellard
    uint64_t val;
4203 84b7b8e7 bellard
    unsigned long pd;
4204 84b7b8e7 bellard
    PhysPageDesc *p;
4205 84b7b8e7 bellard
4206 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4207 84b7b8e7 bellard
    if (!p) {
4208 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4209 84b7b8e7 bellard
    } else {
4210 84b7b8e7 bellard
        pd = p->phys_offset;
4211 84b7b8e7 bellard
    }
4212 3b46e624 ths
4213 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4214 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4215 84b7b8e7 bellard
        /* I/O case */
4216 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4217 8da3ff18 pbrook
        if (p)
4218 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4219 1e78bcc1 Alexander Graf
4220 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
4221 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
4222 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4223 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4224 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4225 84b7b8e7 bellard
#else
4226 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4227 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4228 84b7b8e7 bellard
#endif
4229 84b7b8e7 bellard
    } else {
4230 84b7b8e7 bellard
        /* RAM case */
4231 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4232 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4233 1e78bcc1 Alexander Graf
        switch (endian) {
4234 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4235 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
4236 1e78bcc1 Alexander Graf
            break;
4237 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4238 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
4239 1e78bcc1 Alexander Graf
            break;
4240 1e78bcc1 Alexander Graf
        default:
4241 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
4242 1e78bcc1 Alexander Graf
            break;
4243 1e78bcc1 Alexander Graf
        }
4244 84b7b8e7 bellard
    }
4245 84b7b8e7 bellard
    return val;
4246 84b7b8e7 bellard
}
4247 84b7b8e7 bellard
4248 1e78bcc1 Alexander Graf
uint64_t ldq_phys(target_phys_addr_t addr)
4249 1e78bcc1 Alexander Graf
{
4250 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4251 1e78bcc1 Alexander Graf
}
4252 1e78bcc1 Alexander Graf
4253 1e78bcc1 Alexander Graf
uint64_t ldq_le_phys(target_phys_addr_t addr)
4254 1e78bcc1 Alexander Graf
{
4255 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4256 1e78bcc1 Alexander Graf
}
4257 1e78bcc1 Alexander Graf
4258 1e78bcc1 Alexander Graf
uint64_t ldq_be_phys(target_phys_addr_t addr)
4259 1e78bcc1 Alexander Graf
{
4260 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4261 1e78bcc1 Alexander Graf
}
4262 1e78bcc1 Alexander Graf
4263 aab33094 bellard
/* XXX: optimize */
4264 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4265 aab33094 bellard
{
4266 aab33094 bellard
    uint8_t val;
4267 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4268 aab33094 bellard
    return val;
4269 aab33094 bellard
}
4270 aab33094 bellard
4271 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4272 1e78bcc1 Alexander Graf
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4273 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
4274 aab33094 bellard
{
4275 733f0b02 Michael S. Tsirkin
    int io_index;
4276 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4277 733f0b02 Michael S. Tsirkin
    uint64_t val;
4278 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4279 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4280 733f0b02 Michael S. Tsirkin
4281 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4282 733f0b02 Michael S. Tsirkin
    if (!p) {
4283 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4284 733f0b02 Michael S. Tsirkin
    } else {
4285 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4286 733f0b02 Michael S. Tsirkin
    }
4287 733f0b02 Michael S. Tsirkin
4288 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4289 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4290 733f0b02 Michael S. Tsirkin
        /* I/O case */
4291 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4292 733f0b02 Michael S. Tsirkin
        if (p)
4293 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4294 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4295 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4296 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4297 1e78bcc1 Alexander Graf
            val = bswap16(val);
4298 1e78bcc1 Alexander Graf
        }
4299 1e78bcc1 Alexander Graf
#else
4300 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4301 1e78bcc1 Alexander Graf
            val = bswap16(val);
4302 1e78bcc1 Alexander Graf
        }
4303 1e78bcc1 Alexander Graf
#endif
4304 733f0b02 Michael S. Tsirkin
    } else {
4305 733f0b02 Michael S. Tsirkin
        /* RAM case */
4306 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4307 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4308 1e78bcc1 Alexander Graf
        switch (endian) {
4309 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4310 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
4311 1e78bcc1 Alexander Graf
            break;
4312 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4313 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
4314 1e78bcc1 Alexander Graf
            break;
4315 1e78bcc1 Alexander Graf
        default:
4316 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
4317 1e78bcc1 Alexander Graf
            break;
4318 1e78bcc1 Alexander Graf
        }
4319 733f0b02 Michael S. Tsirkin
    }
4320 733f0b02 Michael S. Tsirkin
    return val;
4321 aab33094 bellard
}
4322 aab33094 bellard
4323 1e78bcc1 Alexander Graf
uint32_t lduw_phys(target_phys_addr_t addr)
4324 1e78bcc1 Alexander Graf
{
4325 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4326 1e78bcc1 Alexander Graf
}
4327 1e78bcc1 Alexander Graf
4328 1e78bcc1 Alexander Graf
uint32_t lduw_le_phys(target_phys_addr_t addr)
4329 1e78bcc1 Alexander Graf
{
4330 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4331 1e78bcc1 Alexander Graf
}
4332 1e78bcc1 Alexander Graf
4333 1e78bcc1 Alexander Graf
uint32_t lduw_be_phys(target_phys_addr_t addr)
4334 1e78bcc1 Alexander Graf
{
4335 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4336 1e78bcc1 Alexander Graf
}
4337 1e78bcc1 Alexander Graf
4338 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4339 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4340 8df1cd07 bellard
   bits are used to track modified PTEs */
4341 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4342 8df1cd07 bellard
{
4343 8df1cd07 bellard
    int io_index;
4344 8df1cd07 bellard
    uint8_t *ptr;
4345 8df1cd07 bellard
    unsigned long pd;
4346 8df1cd07 bellard
    PhysPageDesc *p;
4347 8df1cd07 bellard
4348 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4349 8df1cd07 bellard
    if (!p) {
4350 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4351 8df1cd07 bellard
    } else {
4352 8df1cd07 bellard
        pd = p->phys_offset;
4353 8df1cd07 bellard
    }
4354 3b46e624 ths
4355 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4356 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4357 8da3ff18 pbrook
        if (p)
4358 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4359 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4360 8df1cd07 bellard
    } else {
4361 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4362 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4363 8df1cd07 bellard
        stl_p(ptr, val);
4364 74576198 aliguori
4365 74576198 aliguori
        if (unlikely(in_migration)) {
4366 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4367 74576198 aliguori
                /* invalidate code */
4368 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4369 74576198 aliguori
                /* set dirty bit */
4370 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4371 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4372 74576198 aliguori
            }
4373 74576198 aliguori
        }
4374 8df1cd07 bellard
    }
4375 8df1cd07 bellard
}
4376 8df1cd07 bellard
4377 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4378 bc98a7ef j_mayer
{
4379 bc98a7ef j_mayer
    int io_index;
4380 bc98a7ef j_mayer
    uint8_t *ptr;
4381 bc98a7ef j_mayer
    unsigned long pd;
4382 bc98a7ef j_mayer
    PhysPageDesc *p;
4383 bc98a7ef j_mayer
4384 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4385 bc98a7ef j_mayer
    if (!p) {
4386 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4387 bc98a7ef j_mayer
    } else {
4388 bc98a7ef j_mayer
        pd = p->phys_offset;
4389 bc98a7ef j_mayer
    }
4390 3b46e624 ths
4391 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4392 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4393 8da3ff18 pbrook
        if (p)
4394 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4395 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4396 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4397 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4398 bc98a7ef j_mayer
#else
4399 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4400 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4401 bc98a7ef j_mayer
#endif
4402 bc98a7ef j_mayer
    } else {
4403 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4404 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4405 bc98a7ef j_mayer
        stq_p(ptr, val);
4406 bc98a7ef j_mayer
    }
4407 bc98a7ef j_mayer
}
4408 bc98a7ef j_mayer
4409 8df1cd07 bellard
/* warning: addr must be aligned */
4410 1e78bcc1 Alexander Graf
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4411 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4412 8df1cd07 bellard
{
4413 8df1cd07 bellard
    int io_index;
4414 8df1cd07 bellard
    uint8_t *ptr;
4415 8df1cd07 bellard
    unsigned long pd;
4416 8df1cd07 bellard
    PhysPageDesc *p;
4417 8df1cd07 bellard
4418 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4419 8df1cd07 bellard
    if (!p) {
4420 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4421 8df1cd07 bellard
    } else {
4422 8df1cd07 bellard
        pd = p->phys_offset;
4423 8df1cd07 bellard
    }
4424 3b46e624 ths
4425 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4426 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4427 8da3ff18 pbrook
        if (p)
4428 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4429 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4430 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4431 1e78bcc1 Alexander Graf
            val = bswap32(val);
4432 1e78bcc1 Alexander Graf
        }
4433 1e78bcc1 Alexander Graf
#else
4434 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4435 1e78bcc1 Alexander Graf
            val = bswap32(val);
4436 1e78bcc1 Alexander Graf
        }
4437 1e78bcc1 Alexander Graf
#endif
4438 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4439 8df1cd07 bellard
    } else {
4440 8df1cd07 bellard
        unsigned long addr1;
4441 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4442 8df1cd07 bellard
        /* RAM case */
4443 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4444 1e78bcc1 Alexander Graf
        switch (endian) {
4445 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4446 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
4447 1e78bcc1 Alexander Graf
            break;
4448 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4449 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
4450 1e78bcc1 Alexander Graf
            break;
4451 1e78bcc1 Alexander Graf
        default:
4452 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
4453 1e78bcc1 Alexander Graf
            break;
4454 1e78bcc1 Alexander Graf
        }
4455 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4456 3a7d929e bellard
            /* invalidate code */
4457 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4458 3a7d929e bellard
            /* set dirty bit */
4459 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4460 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4461 3a7d929e bellard
        }
4462 8df1cd07 bellard
    }
4463 8df1cd07 bellard
}
4464 8df1cd07 bellard
4465 1e78bcc1 Alexander Graf
void stl_phys(target_phys_addr_t addr, uint32_t val)
4466 1e78bcc1 Alexander Graf
{
4467 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4468 1e78bcc1 Alexander Graf
}
4469 1e78bcc1 Alexander Graf
4470 1e78bcc1 Alexander Graf
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4471 1e78bcc1 Alexander Graf
{
4472 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4473 1e78bcc1 Alexander Graf
}
4474 1e78bcc1 Alexander Graf
4475 1e78bcc1 Alexander Graf
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4476 1e78bcc1 Alexander Graf
{
4477 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4478 1e78bcc1 Alexander Graf
}
4479 1e78bcc1 Alexander Graf
4480 aab33094 bellard
/* XXX: optimize */
4481 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4482 aab33094 bellard
{
4483 aab33094 bellard
    uint8_t v = val;
4484 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4485 aab33094 bellard
}
4486 aab33094 bellard
4487 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4488 1e78bcc1 Alexander Graf
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4489 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
4490 aab33094 bellard
{
4491 733f0b02 Michael S. Tsirkin
    int io_index;
4492 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4493 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4494 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4495 733f0b02 Michael S. Tsirkin
4496 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4497 733f0b02 Michael S. Tsirkin
    if (!p) {
4498 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4499 733f0b02 Michael S. Tsirkin
    } else {
4500 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4501 733f0b02 Michael S. Tsirkin
    }
4502 733f0b02 Michael S. Tsirkin
4503 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4504 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4505 733f0b02 Michael S. Tsirkin
        if (p)
4506 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4507 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
4508 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
4509 1e78bcc1 Alexander Graf
            val = bswap16(val);
4510 1e78bcc1 Alexander Graf
        }
4511 1e78bcc1 Alexander Graf
#else
4512 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
4513 1e78bcc1 Alexander Graf
            val = bswap16(val);
4514 1e78bcc1 Alexander Graf
        }
4515 1e78bcc1 Alexander Graf
#endif
4516 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4517 733f0b02 Michael S. Tsirkin
    } else {
4518 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4519 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4520 733f0b02 Michael S. Tsirkin
        /* RAM case */
4521 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4522 1e78bcc1 Alexander Graf
        switch (endian) {
4523 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
4524 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
4525 1e78bcc1 Alexander Graf
            break;
4526 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
4527 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
4528 1e78bcc1 Alexander Graf
            break;
4529 1e78bcc1 Alexander Graf
        default:
4530 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
4531 1e78bcc1 Alexander Graf
            break;
4532 1e78bcc1 Alexander Graf
        }
4533 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4534 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4535 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4536 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4537 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4538 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4539 733f0b02 Michael S. Tsirkin
        }
4540 733f0b02 Michael S. Tsirkin
    }
4541 aab33094 bellard
}
4542 aab33094 bellard
4543 1e78bcc1 Alexander Graf
void stw_phys(target_phys_addr_t addr, uint32_t val)
4544 1e78bcc1 Alexander Graf
{
4545 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4546 1e78bcc1 Alexander Graf
}
4547 1e78bcc1 Alexander Graf
4548 1e78bcc1 Alexander Graf
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4549 1e78bcc1 Alexander Graf
{
4550 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4551 1e78bcc1 Alexander Graf
}
4552 1e78bcc1 Alexander Graf
4553 1e78bcc1 Alexander Graf
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4554 1e78bcc1 Alexander Graf
{
4555 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4556 1e78bcc1 Alexander Graf
}
4557 1e78bcc1 Alexander Graf
4558 aab33094 bellard
/* XXX: optimize */
4559 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4560 aab33094 bellard
{
4561 aab33094 bellard
    val = tswap64(val);
4562 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4563 aab33094 bellard
}
4564 aab33094 bellard
4565 1e78bcc1 Alexander Graf
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4566 1e78bcc1 Alexander Graf
{
4567 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
4568 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4569 1e78bcc1 Alexander Graf
}
4570 1e78bcc1 Alexander Graf
4571 1e78bcc1 Alexander Graf
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4572 1e78bcc1 Alexander Graf
{
4573 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
4574 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
4575 1e78bcc1 Alexander Graf
}
4576 1e78bcc1 Alexander Graf
4577 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4578 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4579 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4580 13eb76e0 bellard
{
4581 13eb76e0 bellard
    int l;
4582 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4583 9b3c35e0 j_mayer
    target_ulong page;
4584 13eb76e0 bellard
4585 13eb76e0 bellard
    while (len > 0) {
4586 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4587 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4588 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4589 13eb76e0 bellard
        if (phys_addr == -1)
4590 13eb76e0 bellard
            return -1;
4591 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4592 13eb76e0 bellard
        if (l > len)
4593 13eb76e0 bellard
            l = len;
4594 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4595 5e2972fd aliguori
        if (is_write)
4596 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4597 5e2972fd aliguori
        else
4598 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4599 13eb76e0 bellard
        len -= l;
4600 13eb76e0 bellard
        buf += l;
4601 13eb76e0 bellard
        addr += l;
4602 13eb76e0 bellard
    }
4603 13eb76e0 bellard
    return 0;
4604 13eb76e0 bellard
}
4605 a68fe89c Paul Brook
#endif
4606 13eb76e0 bellard
4607 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4608 2e70f6ef pbrook
   must be at the end of the TB */
4609 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4610 2e70f6ef pbrook
{
4611 2e70f6ef pbrook
    TranslationBlock *tb;
4612 2e70f6ef pbrook
    uint32_t n, cflags;
4613 2e70f6ef pbrook
    target_ulong pc, cs_base;
4614 2e70f6ef pbrook
    uint64_t flags;
4615 2e70f6ef pbrook
4616 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4617 2e70f6ef pbrook
    if (!tb) {
4618 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4619 2e70f6ef pbrook
                  retaddr);
4620 2e70f6ef pbrook
    }
4621 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4622 618ba8e6 Stefan Weil
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4623 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4624 bf20dc07 ths
       occurred.  */
4625 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4626 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4627 2e70f6ef pbrook
    n++;
4628 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4629 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4630 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4631 2e70f6ef pbrook
       branch.  */
4632 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4633 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4634 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4635 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4636 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4637 2e70f6ef pbrook
    }
4638 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4639 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4640 2e70f6ef pbrook
            && n > 1) {
4641 2e70f6ef pbrook
        env->pc -= 2;
4642 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4643 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4644 2e70f6ef pbrook
    }
4645 2e70f6ef pbrook
#endif
4646 2e70f6ef pbrook
    /* This should never happen.  */
4647 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4648 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4649 2e70f6ef pbrook
4650 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4651 2e70f6ef pbrook
    pc = tb->pc;
4652 2e70f6ef pbrook
    cs_base = tb->cs_base;
4653 2e70f6ef pbrook
    flags = tb->flags;
4654 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4655 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4656 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4657 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4658 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4659 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4660 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4661 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4662 2e70f6ef pbrook
       second new TB.  */
4663 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4664 2e70f6ef pbrook
}
4665 2e70f6ef pbrook
4666 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4667 b3755a91 Paul Brook
4668 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4669 e3db7226 bellard
{
4670 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4671 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4672 e3db7226 bellard
    TranslationBlock *tb;
4673 3b46e624 ths
4674 e3db7226 bellard
    target_code_size = 0;
4675 e3db7226 bellard
    max_target_code_size = 0;
4676 e3db7226 bellard
    cross_page = 0;
4677 e3db7226 bellard
    direct_jmp_count = 0;
4678 e3db7226 bellard
    direct_jmp2_count = 0;
4679 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4680 e3db7226 bellard
        tb = &tbs[i];
4681 e3db7226 bellard
        target_code_size += tb->size;
4682 e3db7226 bellard
        if (tb->size > max_target_code_size)
4683 e3db7226 bellard
            max_target_code_size = tb->size;
4684 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4685 e3db7226 bellard
            cross_page++;
4686 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4687 e3db7226 bellard
            direct_jmp_count++;
4688 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4689 e3db7226 bellard
                direct_jmp2_count++;
4690 e3db7226 bellard
            }
4691 e3db7226 bellard
        }
4692 e3db7226 bellard
    }
4693 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4694 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4695 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4696 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4697 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4698 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4699 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4700 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4701 e3db7226 bellard
                max_target_code_size);
4702 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4703 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4704 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4705 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4706 5fafdf24 ths
            cross_page,
4707 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4708 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4709 5fafdf24 ths
                direct_jmp_count,
4710 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4711 e3db7226 bellard
                direct_jmp2_count,
4712 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4713 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4714 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4715 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4716 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4717 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4718 e3db7226 bellard
}
4719 e3db7226 bellard
4720 61382a50 bellard
#define MMUSUFFIX _cmmu
4721 61382a50 bellard
#define GETPC() NULL
4722 61382a50 bellard
#define env cpu_single_env
4723 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4724 61382a50 bellard
4725 61382a50 bellard
#define SHIFT 0
4726 61382a50 bellard
#include "softmmu_template.h"
4727 61382a50 bellard
4728 61382a50 bellard
#define SHIFT 1
4729 61382a50 bellard
#include "softmmu_template.h"
4730 61382a50 bellard
4731 61382a50 bellard
#define SHIFT 2
4732 61382a50 bellard
#include "softmmu_template.h"
4733 61382a50 bellard
4734 61382a50 bellard
#define SHIFT 3
4735 61382a50 bellard
#include "softmmu_template.h"
4736 61382a50 bellard
4737 61382a50 bellard
#undef env
4738 61382a50 bellard
4739 61382a50 bellard
#endif