Statistics
| Branch: | Revision:

root / exec.c @ 2173a75f

History | View | Annotate | Download (131.1 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 29e922b6 Blue Swirl
#include "qemu-timer.h"
36 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
37 53a5960a pbrook
#include <qemu.h>
38 fd052bf6 Riku Voipio
#include <signal.h>
39 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 f01576f1 Juergen Lock
#include <sys/param.h>
41 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
42 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
43 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44 f01576f1 Juergen Lock
#include <sys/time.h>
45 f01576f1 Juergen Lock
#include <sys/proc.h>
46 f01576f1 Juergen Lock
#include <machine/profile.h>
47 f01576f1 Juergen Lock
#define _KERNEL
48 f01576f1 Juergen Lock
#include <sys/user.h>
49 f01576f1 Juergen Lock
#undef _KERNEL
50 f01576f1 Juergen Lock
#undef sigqueue
51 f01576f1 Juergen Lock
#include <libutil.h>
52 f01576f1 Juergen Lock
#endif
53 f01576f1 Juergen Lock
#endif
54 53a5960a pbrook
#endif
55 54936004 bellard
56 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
57 66e85a21 bellard
//#define DEBUG_FLUSH
58 9fa3e853 bellard
//#define DEBUG_TLB
59 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
60 fd6ce8f6 bellard
61 fd6ce8f6 bellard
/* make various TB consistency checks */
62 5fafdf24 ths
//#define DEBUG_TB_CHECK
63 5fafdf24 ths
//#define DEBUG_TLB_CHECK
64 fd6ce8f6 bellard
65 1196be37 ths
//#define DEBUG_IOPORT
66 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
67 1196be37 ths
68 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
69 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
70 99773bd4 pbrook
#undef DEBUG_TB_CHECK
71 99773bd4 pbrook
#endif
72 99773bd4 pbrook
73 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
74 9fa3e853 bellard
75 bdaf78e0 blueswir1
static TranslationBlock *tbs;
76 24ab68ac Stefan Weil
static int code_gen_max_blocks;
77 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 bdaf78e0 blueswir1
static int nb_tbs;
79 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
80 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81 fd6ce8f6 bellard
82 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
83 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
85 d03d860b blueswir1
 section close to code segment. */
86 d03d860b blueswir1
#define code_gen_section                                \
87 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
88 d03d860b blueswir1
    __attribute__((aligned (32)))
89 f8e2af11 Stefan Weil
#elif defined(_WIN32)
90 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
91 f8e2af11 Stefan Weil
#define code_gen_section                                \
92 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
93 d03d860b blueswir1
#else
94 d03d860b blueswir1
#define code_gen_section                                \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 d03d860b blueswir1
#endif
97 d03d860b blueswir1
98 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
99 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
100 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
101 26a5f13b bellard
/* threshold to flush the translated code buffer */
102 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
103 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
104 fd6ce8f6 bellard
105 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
106 9fa3e853 bellard
int phys_ram_fd;
107 74576198 aliguori
static int in_migration;
108 94a6b54f pbrook
109 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110 e2eef170 pbrook
#endif
111 9fa3e853 bellard
112 6a00d601 bellard
CPUState *first_cpu;
113 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
114 6a00d601 bellard
   cpu_exec() */
115 5fafdf24 ths
CPUState *cpu_single_env;
116 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
117 bf20dc07 ths
   1 = Precise instruction counting.
118 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
119 2e70f6ef pbrook
int use_icount = 0;
120 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
121 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
122 2e70f6ef pbrook
int64_t qemu_icount;
123 6a00d601 bellard
124 54936004 bellard
typedef struct PageDesc {
125 92e873b9 bellard
    /* list of TBs intersecting this ram page */
126 fd6ce8f6 bellard
    TranslationBlock *first_tb;
127 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
128 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
129 9fa3e853 bellard
    unsigned int code_write_count;
130 9fa3e853 bellard
    uint8_t *code_bitmap;
131 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
132 9fa3e853 bellard
    unsigned long flags;
133 9fa3e853 bellard
#endif
134 54936004 bellard
} PageDesc;
135 54936004 bellard
136 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
137 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
138 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
139 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141 41c1b1c9 Paul Brook
#else
142 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143 41c1b1c9 Paul Brook
#endif
144 bedb69ea j_mayer
#else
145 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146 bedb69ea j_mayer
#endif
147 54936004 bellard
148 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
149 5cd2c5b6 Richard Henderson
#define L2_BITS 10
150 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
151 54936004 bellard
152 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
153 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
154 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
158 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
159 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
160 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161 5cd2c5b6 Richard Henderson
#else
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
163 5cd2c5b6 Richard Henderson
#endif
164 5cd2c5b6 Richard Henderson
165 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
166 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167 5cd2c5b6 Richard Henderson
#else
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
169 5cd2c5b6 Richard Henderson
#endif
170 5cd2c5b6 Richard Henderson
171 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173 5cd2c5b6 Richard Henderson
174 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 83fb7adf bellard
unsigned long qemu_real_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_bits;
179 83fb7adf bellard
unsigned long qemu_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_mask;
181 54936004 bellard
182 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
183 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
184 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
185 54936004 bellard
186 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
187 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
188 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
189 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
190 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
191 41c1b1c9 Paul Brook
} PhysPageDesc;
192 41c1b1c9 Paul Brook
193 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
194 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
195 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
196 6d9a1304 Paul Brook
197 e2eef170 pbrook
static void io_mem_init(void);
198 e2eef170 pbrook
199 33417e70 bellard
/* io memory support */
200 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
204 6658ffb8 pbrook
static int io_mem_watch;
205 6658ffb8 pbrook
#endif
206 33417e70 bellard
207 34865134 bellard
/* log support */
208 1e8b27ca Juha Riihimรคki
#ifdef WIN32
209 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
210 1e8b27ca Juha Riihimรคki
#else
211 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
212 1e8b27ca Juha Riihimรคki
#endif
213 34865134 bellard
FILE *logfile;
214 34865134 bellard
int loglevel;
215 e735b91c pbrook
static int log_append = 0;
216 34865134 bellard
217 e3db7226 bellard
/* statistics */
218 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
219 e3db7226 bellard
static int tlb_flush_count;
220 b3755a91 Paul Brook
#endif
221 e3db7226 bellard
static int tb_flush_count;
222 e3db7226 bellard
static int tb_phys_invalidate_count;
223 e3db7226 bellard
224 7cb69cae bellard
#ifdef _WIN32
225 7cb69cae bellard
static void map_exec(void *addr, long size)
226 7cb69cae bellard
{
227 7cb69cae bellard
    DWORD old_protect;
228 7cb69cae bellard
    VirtualProtect(addr, size,
229 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
230 7cb69cae bellard
    
231 7cb69cae bellard
}
232 7cb69cae bellard
#else
233 7cb69cae bellard
static void map_exec(void *addr, long size)
234 7cb69cae bellard
{
235 4369415f bellard
    unsigned long start, end, page_size;
236 7cb69cae bellard
    
237 4369415f bellard
    page_size = getpagesize();
238 7cb69cae bellard
    start = (unsigned long)addr;
239 4369415f bellard
    start &= ~(page_size - 1);
240 7cb69cae bellard
    
241 7cb69cae bellard
    end = (unsigned long)addr + size;
242 4369415f bellard
    end += page_size - 1;
243 4369415f bellard
    end &= ~(page_size - 1);
244 7cb69cae bellard
    
245 7cb69cae bellard
    mprotect((void *)start, end - start,
246 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
247 7cb69cae bellard
}
248 7cb69cae bellard
#endif
249 7cb69cae bellard
250 b346ff46 bellard
static void page_init(void)
251 54936004 bellard
{
252 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
253 54936004 bellard
       TARGET_PAGE_SIZE */
254 c2b48b69 aliguori
#ifdef _WIN32
255 c2b48b69 aliguori
    {
256 c2b48b69 aliguori
        SYSTEM_INFO system_info;
257 c2b48b69 aliguori
258 c2b48b69 aliguori
        GetSystemInfo(&system_info);
259 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
260 c2b48b69 aliguori
    }
261 c2b48b69 aliguori
#else
262 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
263 c2b48b69 aliguori
#endif
264 83fb7adf bellard
    if (qemu_host_page_size == 0)
265 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
266 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
268 83fb7adf bellard
    qemu_host_page_bits = 0;
269 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 83fb7adf bellard
        qemu_host_page_bits++;
271 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272 50a9569b balrog
273 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274 50a9569b balrog
    {
275 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
276 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
277 f01576f1 Juergen Lock
        int i, cnt;
278 f01576f1 Juergen Lock
279 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
280 f01576f1 Juergen Lock
        if (freep) {
281 f01576f1 Juergen Lock
            mmap_lock();
282 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
283 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
284 f01576f1 Juergen Lock
285 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
286 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
287 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
288 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289 f01576f1 Juergen Lock
290 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
291 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
292 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 f01576f1 Juergen Lock
                    } else {
294 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 f01576f1 Juergen Lock
                        endaddr = ~0ul;
296 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297 f01576f1 Juergen Lock
#endif
298 f01576f1 Juergen Lock
                    }
299 f01576f1 Juergen Lock
                }
300 f01576f1 Juergen Lock
            }
301 f01576f1 Juergen Lock
            free(freep);
302 f01576f1 Juergen Lock
            mmap_unlock();
303 f01576f1 Juergen Lock
        }
304 f01576f1 Juergen Lock
#else
305 50a9569b balrog
        FILE *f;
306 50a9569b balrog
307 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
308 5cd2c5b6 Richard Henderson
309 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
310 50a9569b balrog
        if (f) {
311 5cd2c5b6 Richard Henderson
            mmap_lock();
312 5cd2c5b6 Richard Henderson
313 50a9569b balrog
            do {
314 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
315 5cd2c5b6 Richard Henderson
                int n;
316 5cd2c5b6 Richard Henderson
317 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318 5cd2c5b6 Richard Henderson
319 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
320 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321 5cd2c5b6 Richard Henderson
322 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
323 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
324 5cd2c5b6 Richard Henderson
                    } else {
325 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
326 5cd2c5b6 Richard Henderson
                    }
327 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328 50a9569b balrog
                }
329 50a9569b balrog
            } while (!feof(f));
330 5cd2c5b6 Richard Henderson
331 50a9569b balrog
            fclose(f);
332 5cd2c5b6 Richard Henderson
            mmap_unlock();
333 50a9569b balrog
        }
334 f01576f1 Juergen Lock
#endif
335 50a9569b balrog
    }
336 50a9569b balrog
#endif
337 54936004 bellard
}
338 54936004 bellard
339 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340 54936004 bellard
{
341 41c1b1c9 Paul Brook
    PageDesc *pd;
342 41c1b1c9 Paul Brook
    void **lp;
343 41c1b1c9 Paul Brook
    int i;
344 41c1b1c9 Paul Brook
345 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
346 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
348 5cd2c5b6 Richard Henderson
    do {                                                \
349 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351 5cd2c5b6 Richard Henderson
    } while (0)
352 5cd2c5b6 Richard Henderson
#else
353 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
354 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
355 17e2377a pbrook
#endif
356 434929bf aliguori
357 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
358 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359 5cd2c5b6 Richard Henderson
360 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
361 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 5cd2c5b6 Richard Henderson
        void **p = *lp;
363 5cd2c5b6 Richard Henderson
364 5cd2c5b6 Richard Henderson
        if (p == NULL) {
365 5cd2c5b6 Richard Henderson
            if (!alloc) {
366 5cd2c5b6 Richard Henderson
                return NULL;
367 5cd2c5b6 Richard Henderson
            }
368 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
369 5cd2c5b6 Richard Henderson
            *lp = p;
370 17e2377a pbrook
        }
371 5cd2c5b6 Richard Henderson
372 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 5cd2c5b6 Richard Henderson
    }
374 5cd2c5b6 Richard Henderson
375 5cd2c5b6 Richard Henderson
    pd = *lp;
376 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
377 5cd2c5b6 Richard Henderson
        if (!alloc) {
378 5cd2c5b6 Richard Henderson
            return NULL;
379 5cd2c5b6 Richard Henderson
        }
380 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 5cd2c5b6 Richard Henderson
        *lp = pd;
382 54936004 bellard
    }
383 5cd2c5b6 Richard Henderson
384 5cd2c5b6 Richard Henderson
#undef ALLOC
385 5cd2c5b6 Richard Henderson
386 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
387 54936004 bellard
}
388 54936004 bellard
389 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
390 54936004 bellard
{
391 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
392 fd6ce8f6 bellard
}
393 fd6ce8f6 bellard
394 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
395 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396 92e873b9 bellard
{
397 e3f4e2a4 pbrook
    PhysPageDesc *pd;
398 5cd2c5b6 Richard Henderson
    void **lp;
399 5cd2c5b6 Richard Henderson
    int i;
400 92e873b9 bellard
401 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
402 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403 108c49b8 bellard
404 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
405 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 5cd2c5b6 Richard Henderson
        void **p = *lp;
407 5cd2c5b6 Richard Henderson
        if (p == NULL) {
408 5cd2c5b6 Richard Henderson
            if (!alloc) {
409 5cd2c5b6 Richard Henderson
                return NULL;
410 5cd2c5b6 Richard Henderson
            }
411 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 5cd2c5b6 Richard Henderson
        }
413 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414 108c49b8 bellard
    }
415 5cd2c5b6 Richard Henderson
416 e3f4e2a4 pbrook
    pd = *lp;
417 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
418 e3f4e2a4 pbrook
        int i;
419 5cd2c5b6 Richard Henderson
420 5cd2c5b6 Richard Henderson
        if (!alloc) {
421 108c49b8 bellard
            return NULL;
422 5cd2c5b6 Richard Henderson
        }
423 5cd2c5b6 Richard Henderson
424 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425 5cd2c5b6 Richard Henderson
426 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
427 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429 67c4d23c pbrook
        }
430 92e873b9 bellard
    }
431 5cd2c5b6 Richard Henderson
432 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
433 92e873b9 bellard
}
434 92e873b9 bellard
435 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436 92e873b9 bellard
{
437 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
438 92e873b9 bellard
}
439 92e873b9 bellard
440 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
441 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442 3a7d929e bellard
                                    target_ulong vaddr);
443 c8a706fe pbrook
#define mmap_lock() do { } while(0)
444 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
445 9fa3e853 bellard
#endif
446 fd6ce8f6 bellard
447 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448 4369415f bellard
449 4369415f bellard
#if defined(CONFIG_USER_ONLY)
450 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
451 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
452 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
453 4369415f bellard
#endif
454 4369415f bellard
455 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
456 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
458 4369415f bellard
#endif
459 4369415f bellard
460 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
461 26a5f13b bellard
{
462 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
463 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
464 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
466 4369415f bellard
#else
467 26a5f13b bellard
    code_gen_buffer_size = tb_size;
468 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
469 4369415f bellard
#if defined(CONFIG_USER_ONLY)
470 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
471 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 4369415f bellard
#else
473 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
474 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475 4369415f bellard
#endif
476 26a5f13b bellard
    }
477 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
480 26a5f13b bellard
       the host cpu and OS */
481 26a5f13b bellard
#if defined(__linux__) 
482 26a5f13b bellard
    {
483 26a5f13b bellard
        int flags;
484 141ac468 blueswir1
        void *start = NULL;
485 141ac468 blueswir1
486 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487 26a5f13b bellard
#if defined(__x86_64__)
488 26a5f13b bellard
        flags |= MAP_32BIT;
489 26a5f13b bellard
        /* Cannot map more than that */
490 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
492 141ac468 blueswir1
#elif defined(__sparc_v9__)
493 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
494 141ac468 blueswir1
        flags |= MAP_FIXED;
495 141ac468 blueswir1
        start = (void *) 0x60000000UL;
496 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
498 1cb0661e balrog
#elif defined(__arm__)
499 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
500 1cb0661e balrog
        flags |= MAP_FIXED;
501 1cb0661e balrog
        start = (void *) 0x01000000UL;
502 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
504 eba0b893 Richard Henderson
#elif defined(__s390x__)
505 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
506 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
507 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 eba0b893 Richard Henderson
        }
510 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
511 26a5f13b bellard
#endif
512 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514 26a5f13b bellard
                               flags, -1, 0);
515 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
516 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 26a5f13b bellard
            exit(1);
518 26a5f13b bellard
        }
519 26a5f13b bellard
    }
520 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
522 06e67a82 aliguori
    {
523 06e67a82 aliguori
        int flags;
524 06e67a82 aliguori
        void *addr = NULL;
525 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
526 06e67a82 aliguori
#if defined(__x86_64__)
527 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 06e67a82 aliguori
         * 0x40000000 is free */
529 06e67a82 aliguori
        flags |= MAP_FIXED;
530 06e67a82 aliguori
        addr = (void *)0x40000000;
531 06e67a82 aliguori
        /* Cannot map more than that */
532 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
533 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
534 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
535 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
536 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
537 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
538 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
540 4cd31ad2 Blue Swirl
        }
541 06e67a82 aliguori
#endif
542 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
544 06e67a82 aliguori
                               flags, -1, 0);
545 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
546 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 06e67a82 aliguori
            exit(1);
548 06e67a82 aliguori
        }
549 06e67a82 aliguori
    }
550 26a5f13b bellard
#else
551 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
552 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
553 26a5f13b bellard
#endif
554 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
557 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
558 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560 26a5f13b bellard
}
561 26a5f13b bellard
562 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
564 26a5f13b bellard
   size. */
565 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
566 26a5f13b bellard
{
567 26a5f13b bellard
    cpu_gen_init();
568 26a5f13b bellard
    code_gen_alloc(tb_size);
569 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
570 4369415f bellard
    page_init();
571 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
572 26a5f13b bellard
    io_mem_init();
573 e2eef170 pbrook
#endif
574 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
576 9002ec79 Richard Henderson
       initialize the prologue now.  */
577 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
578 9002ec79 Richard Henderson
#endif
579 26a5f13b bellard
}
580 26a5f13b bellard
581 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582 9656f324 pbrook
583 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
584 e7f4eff7 Juan Quintela
{
585 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
586 9656f324 pbrook
587 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 3098dba0 aurel32
       version_id is increased. */
589 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
590 9656f324 pbrook
    tlb_flush(env, 1);
591 9656f324 pbrook
592 9656f324 pbrook
    return 0;
593 9656f324 pbrook
}
594 e7f4eff7 Juan Quintela
595 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
596 e7f4eff7 Juan Quintela
    .name = "cpu_common",
597 e7f4eff7 Juan Quintela
    .version_id = 1,
598 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
599 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
600 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
601 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
602 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
603 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
604 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
605 e7f4eff7 Juan Quintela
    }
606 e7f4eff7 Juan Quintela
};
607 9656f324 pbrook
#endif
608 9656f324 pbrook
609 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
610 950f1472 Glauber Costa
{
611 950f1472 Glauber Costa
    CPUState *env = first_cpu;
612 950f1472 Glauber Costa
613 950f1472 Glauber Costa
    while (env) {
614 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
615 950f1472 Glauber Costa
            break;
616 950f1472 Glauber Costa
        env = env->next_cpu;
617 950f1472 Glauber Costa
    }
618 950f1472 Glauber Costa
619 950f1472 Glauber Costa
    return env;
620 950f1472 Glauber Costa
}
621 950f1472 Glauber Costa
622 6a00d601 bellard
void cpu_exec_init(CPUState *env)
623 fd6ce8f6 bellard
{
624 6a00d601 bellard
    CPUState **penv;
625 6a00d601 bellard
    int cpu_index;
626 6a00d601 bellard
627 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
628 c2764719 pbrook
    cpu_list_lock();
629 c2764719 pbrook
#endif
630 6a00d601 bellard
    env->next_cpu = NULL;
631 6a00d601 bellard
    penv = &first_cpu;
632 6a00d601 bellard
    cpu_index = 0;
633 6a00d601 bellard
    while (*penv != NULL) {
634 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
635 6a00d601 bellard
        cpu_index++;
636 6a00d601 bellard
    }
637 6a00d601 bellard
    env->cpu_index = cpu_index;
638 268a362c aliguori
    env->numa_node = 0;
639 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
640 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
641 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
642 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
643 dc7a09cf Jan Kiszka
#endif
644 6a00d601 bellard
    *penv = env;
645 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
646 c2764719 pbrook
    cpu_list_unlock();
647 c2764719 pbrook
#endif
648 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
651 b3c7724c pbrook
                    cpu_save, cpu_load, env);
652 b3c7724c pbrook
#endif
653 fd6ce8f6 bellard
}
654 fd6ce8f6 bellard
655 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
656 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
657 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
658 d1a1eb74 Tristan Gingold
{
659 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
660 d1a1eb74 Tristan Gingold
661 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
662 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 d1a1eb74 Tristan Gingold
        return NULL;
664 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
665 d1a1eb74 Tristan Gingold
    tb->pc = pc;
666 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
667 d1a1eb74 Tristan Gingold
    return tb;
668 d1a1eb74 Tristan Gingold
}
669 d1a1eb74 Tristan Gingold
670 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
671 d1a1eb74 Tristan Gingold
{
672 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
673 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
674 d1a1eb74 Tristan Gingold
       be the last one generated.  */
675 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
677 d1a1eb74 Tristan Gingold
        nb_tbs--;
678 d1a1eb74 Tristan Gingold
    }
679 d1a1eb74 Tristan Gingold
}
680 d1a1eb74 Tristan Gingold
681 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
682 9fa3e853 bellard
{
683 9fa3e853 bellard
    if (p->code_bitmap) {
684 59817ccb bellard
        qemu_free(p->code_bitmap);
685 9fa3e853 bellard
        p->code_bitmap = NULL;
686 9fa3e853 bellard
    }
687 9fa3e853 bellard
    p->code_write_count = 0;
688 9fa3e853 bellard
}
689 9fa3e853 bellard
690 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691 5cd2c5b6 Richard Henderson
692 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
693 fd6ce8f6 bellard
{
694 5cd2c5b6 Richard Henderson
    int i;
695 fd6ce8f6 bellard
696 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
697 5cd2c5b6 Richard Henderson
        return;
698 5cd2c5b6 Richard Henderson
    }
699 5cd2c5b6 Richard Henderson
    if (level == 0) {
700 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
701 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
702 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
703 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
704 fd6ce8f6 bellard
        }
705 5cd2c5b6 Richard Henderson
    } else {
706 5cd2c5b6 Richard Henderson
        void **pp = *lp;
707 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
708 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
709 5cd2c5b6 Richard Henderson
        }
710 5cd2c5b6 Richard Henderson
    }
711 5cd2c5b6 Richard Henderson
}
712 5cd2c5b6 Richard Henderson
713 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
714 5cd2c5b6 Richard Henderson
{
715 5cd2c5b6 Richard Henderson
    int i;
716 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
717 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
718 fd6ce8f6 bellard
    }
719 fd6ce8f6 bellard
}
720 fd6ce8f6 bellard
721 fd6ce8f6 bellard
/* flush all the translation blocks */
722 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
723 6a00d601 bellard
void tb_flush(CPUState *env1)
724 fd6ce8f6 bellard
{
725 6a00d601 bellard
    CPUState *env;
726 0124311e bellard
#if defined(DEBUG_FLUSH)
727 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
729 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
730 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
731 fd6ce8f6 bellard
#endif
732 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
733 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
734 a208e54a pbrook
735 fd6ce8f6 bellard
    nb_tbs = 0;
736 3b46e624 ths
737 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 6a00d601 bellard
    }
740 9fa3e853 bellard
741 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
742 fd6ce8f6 bellard
    page_flush_tb();
743 9fa3e853 bellard
744 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
745 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
746 d4e8164f bellard
       expensive */
747 e3db7226 bellard
    tb_flush_count++;
748 fd6ce8f6 bellard
}
749 fd6ce8f6 bellard
750 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
751 fd6ce8f6 bellard
752 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
753 fd6ce8f6 bellard
{
754 fd6ce8f6 bellard
    TranslationBlock *tb;
755 fd6ce8f6 bellard
    int i;
756 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
757 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
759 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
761 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
762 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
763 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
764 fd6ce8f6 bellard
            }
765 fd6ce8f6 bellard
        }
766 fd6ce8f6 bellard
    }
767 fd6ce8f6 bellard
}
768 fd6ce8f6 bellard
769 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
770 fd6ce8f6 bellard
static void tb_page_check(void)
771 fd6ce8f6 bellard
{
772 fd6ce8f6 bellard
    TranslationBlock *tb;
773 fd6ce8f6 bellard
    int i, flags1, flags2;
774 3b46e624 ths
775 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
777 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
778 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
779 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
782 fd6ce8f6 bellard
            }
783 fd6ce8f6 bellard
        }
784 fd6ce8f6 bellard
    }
785 fd6ce8f6 bellard
}
786 fd6ce8f6 bellard
787 fd6ce8f6 bellard
#endif
788 fd6ce8f6 bellard
789 fd6ce8f6 bellard
/* invalidate one TB */
790 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 fd6ce8f6 bellard
                             int next_offset)
792 fd6ce8f6 bellard
{
793 fd6ce8f6 bellard
    TranslationBlock *tb1;
794 fd6ce8f6 bellard
    for(;;) {
795 fd6ce8f6 bellard
        tb1 = *ptb;
796 fd6ce8f6 bellard
        if (tb1 == tb) {
797 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 fd6ce8f6 bellard
            break;
799 fd6ce8f6 bellard
        }
800 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 fd6ce8f6 bellard
    }
802 fd6ce8f6 bellard
}
803 fd6ce8f6 bellard
804 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805 9fa3e853 bellard
{
806 9fa3e853 bellard
    TranslationBlock *tb1;
807 9fa3e853 bellard
    unsigned int n1;
808 9fa3e853 bellard
809 9fa3e853 bellard
    for(;;) {
810 9fa3e853 bellard
        tb1 = *ptb;
811 9fa3e853 bellard
        n1 = (long)tb1 & 3;
812 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 9fa3e853 bellard
        if (tb1 == tb) {
814 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
815 9fa3e853 bellard
            break;
816 9fa3e853 bellard
        }
817 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
818 9fa3e853 bellard
    }
819 9fa3e853 bellard
}
820 9fa3e853 bellard
821 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822 d4e8164f bellard
{
823 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
824 d4e8164f bellard
    unsigned int n1;
825 d4e8164f bellard
826 d4e8164f bellard
    ptb = &tb->jmp_next[n];
827 d4e8164f bellard
    tb1 = *ptb;
828 d4e8164f bellard
    if (tb1) {
829 d4e8164f bellard
        /* find tb(n) in circular list */
830 d4e8164f bellard
        for(;;) {
831 d4e8164f bellard
            tb1 = *ptb;
832 d4e8164f bellard
            n1 = (long)tb1 & 3;
833 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 d4e8164f bellard
            if (n1 == n && tb1 == tb)
835 d4e8164f bellard
                break;
836 d4e8164f bellard
            if (n1 == 2) {
837 d4e8164f bellard
                ptb = &tb1->jmp_first;
838 d4e8164f bellard
            } else {
839 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
840 d4e8164f bellard
            }
841 d4e8164f bellard
        }
842 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
843 d4e8164f bellard
        *ptb = tb->jmp_next[n];
844 d4e8164f bellard
845 d4e8164f bellard
        tb->jmp_next[n] = NULL;
846 d4e8164f bellard
    }
847 d4e8164f bellard
}
848 d4e8164f bellard
849 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
850 d4e8164f bellard
   another TB */
851 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
852 d4e8164f bellard
{
853 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854 d4e8164f bellard
}
855 d4e8164f bellard
856 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
857 fd6ce8f6 bellard
{
858 6a00d601 bellard
    CPUState *env;
859 8a40a180 bellard
    PageDesc *p;
860 d4e8164f bellard
    unsigned int h, n1;
861 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
862 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
863 3b46e624 ths
864 8a40a180 bellard
    /* remove the TB from the hash list */
865 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
867 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
868 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
869 8a40a180 bellard
870 8a40a180 bellard
    /* remove the TB from the page list */
871 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
872 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
874 8a40a180 bellard
        invalidate_page_bitmap(p);
875 8a40a180 bellard
    }
876 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
879 8a40a180 bellard
        invalidate_page_bitmap(p);
880 8a40a180 bellard
    }
881 8a40a180 bellard
882 36bdbe54 bellard
    tb_invalidated_flag = 1;
883 59817ccb bellard
884 fd6ce8f6 bellard
    /* remove the TB from the hash list */
885 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
886 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
888 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
889 6a00d601 bellard
    }
890 d4e8164f bellard
891 d4e8164f bellard
    /* suppress this TB from the two jump lists */
892 d4e8164f bellard
    tb_jmp_remove(tb, 0);
893 d4e8164f bellard
    tb_jmp_remove(tb, 1);
894 d4e8164f bellard
895 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
896 d4e8164f bellard
    tb1 = tb->jmp_first;
897 d4e8164f bellard
    for(;;) {
898 d4e8164f bellard
        n1 = (long)tb1 & 3;
899 d4e8164f bellard
        if (n1 == 2)
900 d4e8164f bellard
            break;
901 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
903 d4e8164f bellard
        tb_reset_jump(tb1, n1);
904 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
905 d4e8164f bellard
        tb1 = tb2;
906 d4e8164f bellard
    }
907 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
908 9fa3e853 bellard
909 e3db7226 bellard
    tb_phys_invalidate_count++;
910 9fa3e853 bellard
}
911 9fa3e853 bellard
912 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
913 9fa3e853 bellard
{
914 9fa3e853 bellard
    int end, mask, end1;
915 9fa3e853 bellard
916 9fa3e853 bellard
    end = start + len;
917 9fa3e853 bellard
    tab += start >> 3;
918 9fa3e853 bellard
    mask = 0xff << (start & 7);
919 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
920 9fa3e853 bellard
        if (start < end) {
921 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
922 9fa3e853 bellard
            *tab |= mask;
923 9fa3e853 bellard
        }
924 9fa3e853 bellard
    } else {
925 9fa3e853 bellard
        *tab++ |= mask;
926 9fa3e853 bellard
        start = (start + 8) & ~7;
927 9fa3e853 bellard
        end1 = end & ~7;
928 9fa3e853 bellard
        while (start < end1) {
929 9fa3e853 bellard
            *tab++ = 0xff;
930 9fa3e853 bellard
            start += 8;
931 9fa3e853 bellard
        }
932 9fa3e853 bellard
        if (start < end) {
933 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
934 9fa3e853 bellard
            *tab |= mask;
935 9fa3e853 bellard
        }
936 9fa3e853 bellard
    }
937 9fa3e853 bellard
}
938 9fa3e853 bellard
939 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
940 9fa3e853 bellard
{
941 9fa3e853 bellard
    int n, tb_start, tb_end;
942 9fa3e853 bellard
    TranslationBlock *tb;
943 3b46e624 ths
944 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
945 9fa3e853 bellard
946 9fa3e853 bellard
    tb = p->first_tb;
947 9fa3e853 bellard
    while (tb != NULL) {
948 9fa3e853 bellard
        n = (long)tb & 3;
949 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
950 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
951 9fa3e853 bellard
        if (n == 0) {
952 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
953 9fa3e853 bellard
               it is not a problem */
954 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 9fa3e853 bellard
            tb_end = tb_start + tb->size;
956 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
957 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
958 9fa3e853 bellard
        } else {
959 9fa3e853 bellard
            tb_start = 0;
960 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 9fa3e853 bellard
        }
962 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 9fa3e853 bellard
        tb = tb->page_next[n];
964 9fa3e853 bellard
    }
965 9fa3e853 bellard
}
966 9fa3e853 bellard
967 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
968 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
969 2e70f6ef pbrook
                              int flags, int cflags)
970 d720b93d bellard
{
971 d720b93d bellard
    TranslationBlock *tb;
972 d720b93d bellard
    uint8_t *tc_ptr;
973 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
974 41c1b1c9 Paul Brook
    target_ulong virt_page2;
975 d720b93d bellard
    int code_gen_size;
976 d720b93d bellard
977 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
978 c27004ec bellard
    tb = tb_alloc(pc);
979 d720b93d bellard
    if (!tb) {
980 d720b93d bellard
        /* flush must be done */
981 d720b93d bellard
        tb_flush(env);
982 d720b93d bellard
        /* cannot fail at this point */
983 c27004ec bellard
        tb = tb_alloc(pc);
984 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
985 2e70f6ef pbrook
        tb_invalidated_flag = 1;
986 d720b93d bellard
    }
987 d720b93d bellard
    tc_ptr = code_gen_ptr;
988 d720b93d bellard
    tb->tc_ptr = tc_ptr;
989 d720b93d bellard
    tb->cs_base = cs_base;
990 d720b93d bellard
    tb->flags = flags;
991 d720b93d bellard
    tb->cflags = cflags;
992 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
993 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
994 3b46e624 ths
995 d720b93d bellard
    /* check next page if needed */
996 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
997 d720b93d bellard
    phys_page2 = -1;
998 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
999 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1000 d720b93d bellard
    }
1001 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1002 2e70f6ef pbrook
    return tb;
1003 d720b93d bellard
}
1004 3b46e624 ths
1005 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1006 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1007 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1008 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1009 d720b93d bellard
   TB if code is modified inside this TB. */
1010 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1011 d720b93d bellard
                                   int is_cpu_write_access)
1012 d720b93d bellard
{
1013 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1014 d720b93d bellard
    CPUState *env = cpu_single_env;
1015 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1016 6b917547 aliguori
    PageDesc *p;
1017 6b917547 aliguori
    int n;
1018 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1019 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1020 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1021 6b917547 aliguori
    int current_tb_modified = 0;
1022 6b917547 aliguori
    target_ulong current_pc = 0;
1023 6b917547 aliguori
    target_ulong current_cs_base = 0;
1024 6b917547 aliguori
    int current_flags = 0;
1025 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1026 9fa3e853 bellard
1027 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1028 5fafdf24 ths
    if (!p)
1029 9fa3e853 bellard
        return;
1030 5fafdf24 ths
    if (!p->code_bitmap &&
1031 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 d720b93d bellard
        is_cpu_write_access) {
1033 9fa3e853 bellard
        /* build code bitmap */
1034 9fa3e853 bellard
        build_page_bitmap(p);
1035 9fa3e853 bellard
    }
1036 9fa3e853 bellard
1037 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1038 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 9fa3e853 bellard
    tb = p->first_tb;
1040 9fa3e853 bellard
    while (tb != NULL) {
1041 9fa3e853 bellard
        n = (long)tb & 3;
1042 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1043 9fa3e853 bellard
        tb_next = tb->page_next[n];
1044 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1045 9fa3e853 bellard
        if (n == 0) {
1046 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1047 9fa3e853 bellard
               it is not a problem */
1048 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1050 9fa3e853 bellard
        } else {
1051 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1052 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 9fa3e853 bellard
        }
1054 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1055 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1056 d720b93d bellard
            if (current_tb_not_found) {
1057 d720b93d bellard
                current_tb_not_found = 0;
1058 d720b93d bellard
                current_tb = NULL;
1059 2e70f6ef pbrook
                if (env->mem_io_pc) {
1060 d720b93d bellard
                    /* now we have a real cpu fault */
1061 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1062 d720b93d bellard
                }
1063 d720b93d bellard
            }
1064 d720b93d bellard
            if (current_tb == tb &&
1065 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1066 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1067 d720b93d bellard
                its execution. We could be more precise by checking
1068 d720b93d bellard
                that the modification is after the current PC, but it
1069 d720b93d bellard
                would require a specialized function to partially
1070 d720b93d bellard
                restore the CPU state */
1071 3b46e624 ths
1072 d720b93d bellard
                current_tb_modified = 1;
1073 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1074 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1075 6b917547 aliguori
                                     &current_flags);
1076 d720b93d bellard
            }
1077 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1078 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1079 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1080 6f5a9f7e bellard
            saved_tb = NULL;
1081 6f5a9f7e bellard
            if (env) {
1082 6f5a9f7e bellard
                saved_tb = env->current_tb;
1083 6f5a9f7e bellard
                env->current_tb = NULL;
1084 6f5a9f7e bellard
            }
1085 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1086 6f5a9f7e bellard
            if (env) {
1087 6f5a9f7e bellard
                env->current_tb = saved_tb;
1088 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1089 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1090 6f5a9f7e bellard
            }
1091 9fa3e853 bellard
        }
1092 9fa3e853 bellard
        tb = tb_next;
1093 9fa3e853 bellard
    }
1094 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1095 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1096 9fa3e853 bellard
    if (!p->first_tb) {
1097 9fa3e853 bellard
        invalidate_page_bitmap(p);
1098 d720b93d bellard
        if (is_cpu_write_access) {
1099 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1100 d720b93d bellard
        }
1101 d720b93d bellard
    }
1102 d720b93d bellard
#endif
1103 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1104 d720b93d bellard
    if (current_tb_modified) {
1105 d720b93d bellard
        /* we generate a block containing just the instruction
1106 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1107 d720b93d bellard
           itself */
1108 ea1c1802 bellard
        env->current_tb = NULL;
1109 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1111 9fa3e853 bellard
    }
1112 fd6ce8f6 bellard
#endif
1113 9fa3e853 bellard
}
1114 fd6ce8f6 bellard
1115 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1116 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1117 9fa3e853 bellard
{
1118 9fa3e853 bellard
    PageDesc *p;
1119 9fa3e853 bellard
    int offset, b;
1120 59817ccb bellard
#if 0
1121 a4193c8a bellard
    if (1) {
1122 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1124 93fcfe39 aliguori
                  cpu_single_env->eip,
1125 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1126 59817ccb bellard
    }
1127 59817ccb bellard
#endif
1128 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1129 5fafdf24 ths
    if (!p)
1130 9fa3e853 bellard
        return;
1131 9fa3e853 bellard
    if (p->code_bitmap) {
1132 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1133 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1134 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1135 9fa3e853 bellard
            goto do_invalidate;
1136 9fa3e853 bellard
    } else {
1137 9fa3e853 bellard
    do_invalidate:
1138 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1139 9fa3e853 bellard
    }
1140 9fa3e853 bellard
}
1141 9fa3e853 bellard
1142 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1143 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1144 d720b93d bellard
                                    unsigned long pc, void *puc)
1145 9fa3e853 bellard
{
1146 6b917547 aliguori
    TranslationBlock *tb;
1147 9fa3e853 bellard
    PageDesc *p;
1148 6b917547 aliguori
    int n;
1149 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1150 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1151 d720b93d bellard
    CPUState *env = cpu_single_env;
1152 6b917547 aliguori
    int current_tb_modified = 0;
1153 6b917547 aliguori
    target_ulong current_pc = 0;
1154 6b917547 aliguori
    target_ulong current_cs_base = 0;
1155 6b917547 aliguori
    int current_flags = 0;
1156 d720b93d bellard
#endif
1157 9fa3e853 bellard
1158 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1159 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1160 5fafdf24 ths
    if (!p)
1161 9fa3e853 bellard
        return;
1162 9fa3e853 bellard
    tb = p->first_tb;
1163 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1164 d720b93d bellard
    if (tb && pc != 0) {
1165 d720b93d bellard
        current_tb = tb_find_pc(pc);
1166 d720b93d bellard
    }
1167 d720b93d bellard
#endif
1168 9fa3e853 bellard
    while (tb != NULL) {
1169 9fa3e853 bellard
        n = (long)tb & 3;
1170 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1171 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1172 d720b93d bellard
        if (current_tb == tb &&
1173 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1174 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1175 d720b93d bellard
                   its execution. We could be more precise by checking
1176 d720b93d bellard
                   that the modification is after the current PC, but it
1177 d720b93d bellard
                   would require a specialized function to partially
1178 d720b93d bellard
                   restore the CPU state */
1179 3b46e624 ths
1180 d720b93d bellard
            current_tb_modified = 1;
1181 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1182 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1183 6b917547 aliguori
                                 &current_flags);
1184 d720b93d bellard
        }
1185 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1186 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1187 9fa3e853 bellard
        tb = tb->page_next[n];
1188 9fa3e853 bellard
    }
1189 fd6ce8f6 bellard
    p->first_tb = NULL;
1190 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1191 d720b93d bellard
    if (current_tb_modified) {
1192 d720b93d bellard
        /* we generate a block containing just the instruction
1193 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1194 d720b93d bellard
           itself */
1195 ea1c1802 bellard
        env->current_tb = NULL;
1196 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1197 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1198 d720b93d bellard
    }
1199 d720b93d bellard
#endif
1200 fd6ce8f6 bellard
}
1201 9fa3e853 bellard
#endif
1202 fd6ce8f6 bellard
1203 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1204 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1205 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1206 fd6ce8f6 bellard
{
1207 fd6ce8f6 bellard
    PageDesc *p;
1208 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1209 9fa3e853 bellard
1210 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1211 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1212 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1213 9fa3e853 bellard
    last_first_tb = p->first_tb;
1214 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1215 9fa3e853 bellard
    invalidate_page_bitmap(p);
1216 fd6ce8f6 bellard
1217 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1218 d720b93d bellard
1219 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1220 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1221 53a5960a pbrook
        target_ulong addr;
1222 53a5960a pbrook
        PageDesc *p2;
1223 9fa3e853 bellard
        int prot;
1224 9fa3e853 bellard
1225 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1226 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1227 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1228 fd6ce8f6 bellard
        prot = 0;
1229 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1230 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1231 53a5960a pbrook
1232 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1233 53a5960a pbrook
            if (!p2)
1234 53a5960a pbrook
                continue;
1235 53a5960a pbrook
            prot |= p2->flags;
1236 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1237 53a5960a pbrook
          }
1238 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1239 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1240 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1241 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1242 53a5960a pbrook
               page_addr);
1243 fd6ce8f6 bellard
#endif
1244 fd6ce8f6 bellard
    }
1245 9fa3e853 bellard
#else
1246 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1247 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1248 9fa3e853 bellard
       allocated in a physical page */
1249 9fa3e853 bellard
    if (!last_first_tb) {
1250 6a00d601 bellard
        tlb_protect_code(page_addr);
1251 9fa3e853 bellard
    }
1252 9fa3e853 bellard
#endif
1253 d720b93d bellard
1254 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1255 fd6ce8f6 bellard
}
1256 fd6ce8f6 bellard
1257 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1258 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1259 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1260 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1261 d4e8164f bellard
{
1262 9fa3e853 bellard
    unsigned int h;
1263 9fa3e853 bellard
    TranslationBlock **ptb;
1264 9fa3e853 bellard
1265 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1266 c8a706fe pbrook
       before we are done.  */
1267 c8a706fe pbrook
    mmap_lock();
1268 9fa3e853 bellard
    /* add in the physical hash table */
1269 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1270 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1271 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1272 9fa3e853 bellard
    *ptb = tb;
1273 fd6ce8f6 bellard
1274 fd6ce8f6 bellard
    /* add in the page list */
1275 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1276 9fa3e853 bellard
    if (phys_page2 != -1)
1277 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1278 9fa3e853 bellard
    else
1279 9fa3e853 bellard
        tb->page_addr[1] = -1;
1280 9fa3e853 bellard
1281 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1282 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1283 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1284 d4e8164f bellard
1285 d4e8164f bellard
    /* init original jump addresses */
1286 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1287 d4e8164f bellard
        tb_reset_jump(tb, 0);
1288 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1289 d4e8164f bellard
        tb_reset_jump(tb, 1);
1290 8a40a180 bellard
1291 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1292 8a40a180 bellard
    tb_page_check();
1293 8a40a180 bellard
#endif
1294 c8a706fe pbrook
    mmap_unlock();
1295 fd6ce8f6 bellard
}
1296 fd6ce8f6 bellard
1297 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1298 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1299 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1300 fd6ce8f6 bellard
{
1301 9fa3e853 bellard
    int m_min, m_max, m;
1302 9fa3e853 bellard
    unsigned long v;
1303 9fa3e853 bellard
    TranslationBlock *tb;
1304 a513fe19 bellard
1305 a513fe19 bellard
    if (nb_tbs <= 0)
1306 a513fe19 bellard
        return NULL;
1307 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1308 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1309 a513fe19 bellard
        return NULL;
1310 a513fe19 bellard
    /* binary search (cf Knuth) */
1311 a513fe19 bellard
    m_min = 0;
1312 a513fe19 bellard
    m_max = nb_tbs - 1;
1313 a513fe19 bellard
    while (m_min <= m_max) {
1314 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1315 a513fe19 bellard
        tb = &tbs[m];
1316 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1317 a513fe19 bellard
        if (v == tc_ptr)
1318 a513fe19 bellard
            return tb;
1319 a513fe19 bellard
        else if (tc_ptr < v) {
1320 a513fe19 bellard
            m_max = m - 1;
1321 a513fe19 bellard
        } else {
1322 a513fe19 bellard
            m_min = m + 1;
1323 a513fe19 bellard
        }
1324 5fafdf24 ths
    }
1325 a513fe19 bellard
    return &tbs[m_max];
1326 a513fe19 bellard
}
1327 7501267e bellard
1328 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1329 ea041c0e bellard
1330 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1331 ea041c0e bellard
{
1332 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1333 ea041c0e bellard
    unsigned int n1;
1334 ea041c0e bellard
1335 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1336 ea041c0e bellard
    if (tb1 != NULL) {
1337 ea041c0e bellard
        /* find head of list */
1338 ea041c0e bellard
        for(;;) {
1339 ea041c0e bellard
            n1 = (long)tb1 & 3;
1340 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1341 ea041c0e bellard
            if (n1 == 2)
1342 ea041c0e bellard
                break;
1343 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1344 ea041c0e bellard
        }
1345 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1346 ea041c0e bellard
        tb_next = tb1;
1347 ea041c0e bellard
1348 ea041c0e bellard
        /* remove tb from the jmp_first list */
1349 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1350 ea041c0e bellard
        for(;;) {
1351 ea041c0e bellard
            tb1 = *ptb;
1352 ea041c0e bellard
            n1 = (long)tb1 & 3;
1353 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1354 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1355 ea041c0e bellard
                break;
1356 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1357 ea041c0e bellard
        }
1358 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1359 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1360 3b46e624 ths
1361 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1362 ea041c0e bellard
        tb_reset_jump(tb, n);
1363 ea041c0e bellard
1364 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1365 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1366 ea041c0e bellard
    }
1367 ea041c0e bellard
}
1368 ea041c0e bellard
1369 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1370 ea041c0e bellard
{
1371 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1372 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1373 ea041c0e bellard
}
1374 ea041c0e bellard
1375 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1376 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1377 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1378 94df27fd Paul Brook
{
1379 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1380 94df27fd Paul Brook
}
1381 94df27fd Paul Brook
#else
1382 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1383 d720b93d bellard
{
1384 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1385 9b3c35e0 j_mayer
    target_ulong pd;
1386 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1387 c2f07f81 pbrook
    PhysPageDesc *p;
1388 d720b93d bellard
1389 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1390 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1391 c2f07f81 pbrook
    if (!p) {
1392 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1393 c2f07f81 pbrook
    } else {
1394 c2f07f81 pbrook
        pd = p->phys_offset;
1395 c2f07f81 pbrook
    }
1396 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1397 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1398 d720b93d bellard
}
1399 c27004ec bellard
#endif
1400 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1401 d720b93d bellard
1402 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1403 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1404 c527ee8f Paul Brook
1405 c527ee8f Paul Brook
{
1406 c527ee8f Paul Brook
}
1407 c527ee8f Paul Brook
1408 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1409 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1410 c527ee8f Paul Brook
{
1411 c527ee8f Paul Brook
    return -ENOSYS;
1412 c527ee8f Paul Brook
}
1413 c527ee8f Paul Brook
#else
1414 6658ffb8 pbrook
/* Add a watchpoint.  */
1415 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1416 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1417 6658ffb8 pbrook
{
1418 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1419 c0ce998e aliguori
    CPUWatchpoint *wp;
1420 6658ffb8 pbrook
1421 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1422 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1423 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1424 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1425 b4051334 aliguori
        return -EINVAL;
1426 b4051334 aliguori
    }
1427 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1428 a1d1bb31 aliguori
1429 a1d1bb31 aliguori
    wp->vaddr = addr;
1430 b4051334 aliguori
    wp->len_mask = len_mask;
1431 a1d1bb31 aliguori
    wp->flags = flags;
1432 a1d1bb31 aliguori
1433 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1434 c0ce998e aliguori
    if (flags & BP_GDB)
1435 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1436 c0ce998e aliguori
    else
1437 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1438 6658ffb8 pbrook
1439 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1440 a1d1bb31 aliguori
1441 a1d1bb31 aliguori
    if (watchpoint)
1442 a1d1bb31 aliguori
        *watchpoint = wp;
1443 a1d1bb31 aliguori
    return 0;
1444 6658ffb8 pbrook
}
1445 6658ffb8 pbrook
1446 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1447 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1448 a1d1bb31 aliguori
                          int flags)
1449 6658ffb8 pbrook
{
1450 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1451 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1452 6658ffb8 pbrook
1453 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1454 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1455 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1456 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1457 6658ffb8 pbrook
            return 0;
1458 6658ffb8 pbrook
        }
1459 6658ffb8 pbrook
    }
1460 a1d1bb31 aliguori
    return -ENOENT;
1461 6658ffb8 pbrook
}
1462 6658ffb8 pbrook
1463 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1464 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1465 a1d1bb31 aliguori
{
1466 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1467 7d03f82f edgar_igl
1468 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1469 a1d1bb31 aliguori
1470 a1d1bb31 aliguori
    qemu_free(watchpoint);
1471 a1d1bb31 aliguori
}
1472 a1d1bb31 aliguori
1473 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1474 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1475 a1d1bb31 aliguori
{
1476 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1477 a1d1bb31 aliguori
1478 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1479 a1d1bb31 aliguori
        if (wp->flags & mask)
1480 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1481 c0ce998e aliguori
    }
1482 7d03f82f edgar_igl
}
1483 c527ee8f Paul Brook
#endif
1484 7d03f82f edgar_igl
1485 a1d1bb31 aliguori
/* Add a breakpoint.  */
1486 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1487 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1488 4c3a88a2 bellard
{
1489 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1490 c0ce998e aliguori
    CPUBreakpoint *bp;
1491 3b46e624 ths
1492 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1493 4c3a88a2 bellard
1494 a1d1bb31 aliguori
    bp->pc = pc;
1495 a1d1bb31 aliguori
    bp->flags = flags;
1496 a1d1bb31 aliguori
1497 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1498 c0ce998e aliguori
    if (flags & BP_GDB)
1499 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1500 c0ce998e aliguori
    else
1501 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1502 3b46e624 ths
1503 d720b93d bellard
    breakpoint_invalidate(env, pc);
1504 a1d1bb31 aliguori
1505 a1d1bb31 aliguori
    if (breakpoint)
1506 a1d1bb31 aliguori
        *breakpoint = bp;
1507 4c3a88a2 bellard
    return 0;
1508 4c3a88a2 bellard
#else
1509 a1d1bb31 aliguori
    return -ENOSYS;
1510 4c3a88a2 bellard
#endif
1511 4c3a88a2 bellard
}
1512 4c3a88a2 bellard
1513 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1514 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1515 a1d1bb31 aliguori
{
1516 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1517 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1518 a1d1bb31 aliguori
1519 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1520 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1521 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1522 a1d1bb31 aliguori
            return 0;
1523 a1d1bb31 aliguori
        }
1524 7d03f82f edgar_igl
    }
1525 a1d1bb31 aliguori
    return -ENOENT;
1526 a1d1bb31 aliguori
#else
1527 a1d1bb31 aliguori
    return -ENOSYS;
1528 7d03f82f edgar_igl
#endif
1529 7d03f82f edgar_igl
}
1530 7d03f82f edgar_igl
1531 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1532 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1533 4c3a88a2 bellard
{
1534 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1535 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1536 d720b93d bellard
1537 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1538 a1d1bb31 aliguori
1539 a1d1bb31 aliguori
    qemu_free(breakpoint);
1540 a1d1bb31 aliguori
#endif
1541 a1d1bb31 aliguori
}
1542 a1d1bb31 aliguori
1543 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1544 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1545 a1d1bb31 aliguori
{
1546 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1547 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1548 a1d1bb31 aliguori
1549 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1550 a1d1bb31 aliguori
        if (bp->flags & mask)
1551 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1552 c0ce998e aliguori
    }
1553 4c3a88a2 bellard
#endif
1554 4c3a88a2 bellard
}
1555 4c3a88a2 bellard
1556 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1557 c33a346e bellard
   CPU loop after each instruction */
1558 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1559 c33a346e bellard
{
1560 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1561 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1562 c33a346e bellard
        env->singlestep_enabled = enabled;
1563 e22a25c9 aliguori
        if (kvm_enabled())
1564 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1565 e22a25c9 aliguori
        else {
1566 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1567 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1568 e22a25c9 aliguori
            tb_flush(env);
1569 e22a25c9 aliguori
        }
1570 c33a346e bellard
    }
1571 c33a346e bellard
#endif
1572 c33a346e bellard
}
1573 c33a346e bellard
1574 34865134 bellard
/* enable or disable low levels log */
1575 34865134 bellard
void cpu_set_log(int log_flags)
1576 34865134 bellard
{
1577 34865134 bellard
    loglevel = log_flags;
1578 34865134 bellard
    if (loglevel && !logfile) {
1579 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1580 34865134 bellard
        if (!logfile) {
1581 34865134 bellard
            perror(logfilename);
1582 34865134 bellard
            _exit(1);
1583 34865134 bellard
        }
1584 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1585 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1586 9fa3e853 bellard
        {
1587 b55266b5 blueswir1
            static char logfile_buf[4096];
1588 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1589 9fa3e853 bellard
        }
1590 bf65f53f Filip Navara
#elif !defined(_WIN32)
1591 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1592 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1593 9fa3e853 bellard
#endif
1594 e735b91c pbrook
        log_append = 1;
1595 e735b91c pbrook
    }
1596 e735b91c pbrook
    if (!loglevel && logfile) {
1597 e735b91c pbrook
        fclose(logfile);
1598 e735b91c pbrook
        logfile = NULL;
1599 34865134 bellard
    }
1600 34865134 bellard
}
1601 34865134 bellard
1602 34865134 bellard
void cpu_set_log_filename(const char *filename)
1603 34865134 bellard
{
1604 34865134 bellard
    logfilename = strdup(filename);
1605 e735b91c pbrook
    if (logfile) {
1606 e735b91c pbrook
        fclose(logfile);
1607 e735b91c pbrook
        logfile = NULL;
1608 e735b91c pbrook
    }
1609 e735b91c pbrook
    cpu_set_log(loglevel);
1610 34865134 bellard
}
1611 c33a346e bellard
1612 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1613 ea041c0e bellard
{
1614 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1615 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1616 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1617 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1618 ea041c0e bellard
    TranslationBlock *tb;
1619 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1620 59817ccb bellard
1621 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1622 3098dba0 aurel32
    tb = env->current_tb;
1623 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1624 3098dba0 aurel32
       all the potentially executing TB */
1625 f76cfe56 Riku Voipio
    if (tb) {
1626 3098dba0 aurel32
        env->current_tb = NULL;
1627 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1628 be214e6c aurel32
    }
1629 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1630 3098dba0 aurel32
}
1631 3098dba0 aurel32
1632 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1633 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1634 ec6959d0 Jan Kiszka
static void tcg_handle_interrupt(CPUState *env, int mask)
1635 3098dba0 aurel32
{
1636 3098dba0 aurel32
    int old_mask;
1637 be214e6c aurel32
1638 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1639 68a79315 bellard
    env->interrupt_request |= mask;
1640 3098dba0 aurel32
1641 8edac960 aliguori
    /*
1642 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1643 8edac960 aliguori
     * case its halted.
1644 8edac960 aliguori
     */
1645 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1646 8edac960 aliguori
        qemu_cpu_kick(env);
1647 8edac960 aliguori
        return;
1648 8edac960 aliguori
    }
1649 8edac960 aliguori
1650 2e70f6ef pbrook
    if (use_icount) {
1651 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1652 2e70f6ef pbrook
        if (!can_do_io(env)
1653 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1654 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1655 2e70f6ef pbrook
        }
1656 2e70f6ef pbrook
    } else {
1657 3098dba0 aurel32
        cpu_unlink_tb(env);
1658 ea041c0e bellard
    }
1659 ea041c0e bellard
}
1660 ea041c0e bellard
1661 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1662 ec6959d0 Jan Kiszka
1663 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1664 97ffbd8d Jan Kiszka
1665 97ffbd8d Jan Kiszka
void cpu_interrupt(CPUState *env, int mask)
1666 97ffbd8d Jan Kiszka
{
1667 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1668 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1669 97ffbd8d Jan Kiszka
}
1670 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1671 97ffbd8d Jan Kiszka
1672 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1673 b54ad049 bellard
{
1674 b54ad049 bellard
    env->interrupt_request &= ~mask;
1675 b54ad049 bellard
}
1676 b54ad049 bellard
1677 3098dba0 aurel32
void cpu_exit(CPUState *env)
1678 3098dba0 aurel32
{
1679 3098dba0 aurel32
    env->exit_request = 1;
1680 3098dba0 aurel32
    cpu_unlink_tb(env);
1681 3098dba0 aurel32
}
1682 3098dba0 aurel32
1683 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1684 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1685 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1686 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1687 f193c797 bellard
      "show target assembly code for each compiled TB" },
1688 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1689 57fec1fe bellard
      "show micro ops for each compiled TB" },
1690 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1691 e01a1157 blueswir1
      "show micro ops "
1692 e01a1157 blueswir1
#ifdef TARGET_I386
1693 e01a1157 blueswir1
      "before eflags optimization and "
1694 f193c797 bellard
#endif
1695 e01a1157 blueswir1
      "after liveness analysis" },
1696 f193c797 bellard
    { CPU_LOG_INT, "int",
1697 f193c797 bellard
      "show interrupts/exceptions in short format" },
1698 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1699 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1700 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1701 e91c8a77 ths
      "show CPU state before block translation" },
1702 f193c797 bellard
#ifdef TARGET_I386
1703 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1704 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1705 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1706 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1707 f193c797 bellard
#endif
1708 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1709 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1710 fd872598 bellard
      "show all i/o ports accesses" },
1711 8e3a9fd2 bellard
#endif
1712 f193c797 bellard
    { 0, NULL, NULL },
1713 f193c797 bellard
};
1714 f193c797 bellard
1715 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1716 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1717 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1718 f6f3fbca Michael S. Tsirkin
1719 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1720 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1721 0fd542fb Michael S. Tsirkin
                                  ram_addr_t phys_offset,
1722 0fd542fb Michael S. Tsirkin
                                  bool log_dirty)
1723 f6f3fbca Michael S. Tsirkin
{
1724 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1725 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1726 0fd542fb Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1727 f6f3fbca Michael S. Tsirkin
    }
1728 f6f3fbca Michael S. Tsirkin
}
1729 f6f3fbca Michael S. Tsirkin
1730 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1731 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1732 f6f3fbca Michael S. Tsirkin
{
1733 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1734 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1735 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1736 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1737 f6f3fbca Michael S. Tsirkin
            return r;
1738 f6f3fbca Michael S. Tsirkin
    }
1739 f6f3fbca Michael S. Tsirkin
    return 0;
1740 f6f3fbca Michael S. Tsirkin
}
1741 f6f3fbca Michael S. Tsirkin
1742 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1743 f6f3fbca Michael S. Tsirkin
{
1744 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1745 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1746 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1747 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1748 f6f3fbca Michael S. Tsirkin
            return r;
1749 f6f3fbca Michael S. Tsirkin
    }
1750 f6f3fbca Michael S. Tsirkin
    return 0;
1751 f6f3fbca Michael S. Tsirkin
}
1752 f6f3fbca Michael S. Tsirkin
1753 2173a75f Alex Williamson
struct last_map {
1754 2173a75f Alex Williamson
    target_phys_addr_t start_addr;
1755 2173a75f Alex Williamson
    ram_addr_t size;
1756 2173a75f Alex Williamson
    ram_addr_t phys_offset;
1757 2173a75f Alex Williamson
};
1758 2173a75f Alex Williamson
1759 8d4c78e7 Alex Williamson
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1760 8d4c78e7 Alex Williamson
 * address.  Each intermediate table provides the next L2_BITs of guest
1761 8d4c78e7 Alex Williamson
 * physical address space.  The number of levels vary based on host and
1762 8d4c78e7 Alex Williamson
 * guest configuration, making it efficient to build the final guest
1763 8d4c78e7 Alex Williamson
 * physical address by seeding the L1 offset and shifting and adding in
1764 8d4c78e7 Alex Williamson
 * each L2 offset as we recurse through them. */
1765 2173a75f Alex Williamson
static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1766 2173a75f Alex Williamson
                                 void **lp, target_phys_addr_t addr,
1767 2173a75f Alex Williamson
                                 struct last_map *map)
1768 f6f3fbca Michael S. Tsirkin
{
1769 5cd2c5b6 Richard Henderson
    int i;
1770 f6f3fbca Michael S. Tsirkin
1771 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1772 5cd2c5b6 Richard Henderson
        return;
1773 5cd2c5b6 Richard Henderson
    }
1774 5cd2c5b6 Richard Henderson
    if (level == 0) {
1775 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1776 8d4c78e7 Alex Williamson
        addr <<= L2_BITS + TARGET_PAGE_BITS;
1777 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1778 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1779 2173a75f Alex Williamson
                target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1780 2173a75f Alex Williamson
1781 2173a75f Alex Williamson
                if (map->size &&
1782 2173a75f Alex Williamson
                    start_addr == map->start_addr + map->size &&
1783 2173a75f Alex Williamson
                    pd[i].phys_offset == map->phys_offset + map->size) {
1784 2173a75f Alex Williamson
1785 2173a75f Alex Williamson
                    map->size += TARGET_PAGE_SIZE;
1786 2173a75f Alex Williamson
                    continue;
1787 2173a75f Alex Williamson
                } else if (map->size) {
1788 2173a75f Alex Williamson
                    client->set_memory(client, map->start_addr,
1789 2173a75f Alex Williamson
                                       map->size, map->phys_offset, false);
1790 2173a75f Alex Williamson
                }
1791 2173a75f Alex Williamson
1792 2173a75f Alex Williamson
                map->start_addr = start_addr;
1793 2173a75f Alex Williamson
                map->size = TARGET_PAGE_SIZE;
1794 2173a75f Alex Williamson
                map->phys_offset = pd[i].phys_offset;
1795 f6f3fbca Michael S. Tsirkin
            }
1796 5cd2c5b6 Richard Henderson
        }
1797 5cd2c5b6 Richard Henderson
    } else {
1798 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1799 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1800 8d4c78e7 Alex Williamson
            phys_page_for_each_1(client, level - 1, pp + i,
1801 2173a75f Alex Williamson
                                 (addr << L2_BITS) | i, map);
1802 f6f3fbca Michael S. Tsirkin
        }
1803 f6f3fbca Michael S. Tsirkin
    }
1804 f6f3fbca Michael S. Tsirkin
}
1805 f6f3fbca Michael S. Tsirkin
1806 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1807 f6f3fbca Michael S. Tsirkin
{
1808 5cd2c5b6 Richard Henderson
    int i;
1809 2173a75f Alex Williamson
    struct last_map map = { };
1810 2173a75f Alex Williamson
1811 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1812 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1813 2173a75f Alex Williamson
                             l1_phys_map + i, i, &map);
1814 2173a75f Alex Williamson
    }
1815 2173a75f Alex Williamson
    if (map.size) {
1816 2173a75f Alex Williamson
        client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1817 2173a75f Alex Williamson
                           false);
1818 f6f3fbca Michael S. Tsirkin
    }
1819 f6f3fbca Michael S. Tsirkin
}
1820 f6f3fbca Michael S. Tsirkin
1821 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1822 f6f3fbca Michael S. Tsirkin
{
1823 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1824 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1825 f6f3fbca Michael S. Tsirkin
}
1826 f6f3fbca Michael S. Tsirkin
1827 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1828 f6f3fbca Michael S. Tsirkin
{
1829 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1830 f6f3fbca Michael S. Tsirkin
}
1831 f6f3fbca Michael S. Tsirkin
#endif
1832 f6f3fbca Michael S. Tsirkin
1833 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1834 f193c797 bellard
{
1835 f193c797 bellard
    if (strlen(s2) != n)
1836 f193c797 bellard
        return 0;
1837 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1838 f193c797 bellard
}
1839 3b46e624 ths
1840 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1841 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1842 f193c797 bellard
{
1843 c7cd6a37 blueswir1
    const CPULogItem *item;
1844 f193c797 bellard
    int mask;
1845 f193c797 bellard
    const char *p, *p1;
1846 f193c797 bellard
1847 f193c797 bellard
    p = str;
1848 f193c797 bellard
    mask = 0;
1849 f193c797 bellard
    for(;;) {
1850 f193c797 bellard
        p1 = strchr(p, ',');
1851 f193c797 bellard
        if (!p1)
1852 f193c797 bellard
            p1 = p + strlen(p);
1853 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1854 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1855 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1856 9742bf26 Yoshiaki Tamura
            }
1857 9742bf26 Yoshiaki Tamura
        } else {
1858 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1859 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1860 9742bf26 Yoshiaki Tamura
                    goto found;
1861 9742bf26 Yoshiaki Tamura
            }
1862 9742bf26 Yoshiaki Tamura
            return 0;
1863 f193c797 bellard
        }
1864 f193c797 bellard
    found:
1865 f193c797 bellard
        mask |= item->mask;
1866 f193c797 bellard
        if (*p1 != ',')
1867 f193c797 bellard
            break;
1868 f193c797 bellard
        p = p1 + 1;
1869 f193c797 bellard
    }
1870 f193c797 bellard
    return mask;
1871 f193c797 bellard
}
1872 ea041c0e bellard
1873 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1874 7501267e bellard
{
1875 7501267e bellard
    va_list ap;
1876 493ae1f0 pbrook
    va_list ap2;
1877 7501267e bellard
1878 7501267e bellard
    va_start(ap, fmt);
1879 493ae1f0 pbrook
    va_copy(ap2, ap);
1880 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1881 7501267e bellard
    vfprintf(stderr, fmt, ap);
1882 7501267e bellard
    fprintf(stderr, "\n");
1883 7501267e bellard
#ifdef TARGET_I386
1884 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1885 7fe48483 bellard
#else
1886 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1887 7501267e bellard
#endif
1888 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1889 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1890 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1891 93fcfe39 aliguori
        qemu_log("\n");
1892 f9373291 j_mayer
#ifdef TARGET_I386
1893 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1894 f9373291 j_mayer
#else
1895 93fcfe39 aliguori
        log_cpu_state(env, 0);
1896 f9373291 j_mayer
#endif
1897 31b1a7b4 aliguori
        qemu_log_flush();
1898 93fcfe39 aliguori
        qemu_log_close();
1899 924edcae balrog
    }
1900 493ae1f0 pbrook
    va_end(ap2);
1901 f9373291 j_mayer
    va_end(ap);
1902 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1903 fd052bf6 Riku Voipio
    {
1904 fd052bf6 Riku Voipio
        struct sigaction act;
1905 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1906 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1907 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1908 fd052bf6 Riku Voipio
    }
1909 fd052bf6 Riku Voipio
#endif
1910 7501267e bellard
    abort();
1911 7501267e bellard
}
1912 7501267e bellard
1913 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1914 c5be9f08 ths
{
1915 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1916 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1917 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1918 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1919 5a38f081 aliguori
    CPUBreakpoint *bp;
1920 5a38f081 aliguori
    CPUWatchpoint *wp;
1921 5a38f081 aliguori
#endif
1922 5a38f081 aliguori
1923 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1924 5a38f081 aliguori
1925 5a38f081 aliguori
    /* Preserve chaining and index. */
1926 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1927 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1928 5a38f081 aliguori
1929 5a38f081 aliguori
    /* Clone all break/watchpoints.
1930 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1931 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1932 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1933 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1934 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1935 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1936 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1937 5a38f081 aliguori
    }
1938 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1939 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1940 5a38f081 aliguori
                              wp->flags, NULL);
1941 5a38f081 aliguori
    }
1942 5a38f081 aliguori
#endif
1943 5a38f081 aliguori
1944 c5be9f08 ths
    return new_env;
1945 c5be9f08 ths
}
1946 c5be9f08 ths
1947 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1948 0124311e bellard
1949 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1950 5c751e99 edgar_igl
{
1951 5c751e99 edgar_igl
    unsigned int i;
1952 5c751e99 edgar_igl
1953 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1954 5c751e99 edgar_igl
       overlap the flushed page.  */
1955 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1956 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1957 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1958 5c751e99 edgar_igl
1959 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1960 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1961 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1962 5c751e99 edgar_igl
}
1963 5c751e99 edgar_igl
1964 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1965 08738984 Igor Kovalenko
    .addr_read  = -1,
1966 08738984 Igor Kovalenko
    .addr_write = -1,
1967 08738984 Igor Kovalenko
    .addr_code  = -1,
1968 08738984 Igor Kovalenko
    .addend     = -1,
1969 08738984 Igor Kovalenko
};
1970 08738984 Igor Kovalenko
1971 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1972 ee8b7021 bellard
   implemented yet) */
1973 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1974 33417e70 bellard
{
1975 33417e70 bellard
    int i;
1976 0124311e bellard
1977 9fa3e853 bellard
#if defined(DEBUG_TLB)
1978 9fa3e853 bellard
    printf("tlb_flush:\n");
1979 9fa3e853 bellard
#endif
1980 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1981 0124311e bellard
       links while we are modifying them */
1982 0124311e bellard
    env->current_tb = NULL;
1983 0124311e bellard
1984 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1985 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1986 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1987 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1988 cfde4bd9 Isaku Yamahata
        }
1989 33417e70 bellard
    }
1990 9fa3e853 bellard
1991 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1992 9fa3e853 bellard
1993 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1994 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1995 e3db7226 bellard
    tlb_flush_count++;
1996 33417e70 bellard
}
1997 33417e70 bellard
1998 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1999 61382a50 bellard
{
2000 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
2001 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2002 5fafdf24 ths
        addr == (tlb_entry->addr_write &
2003 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2004 5fafdf24 ths
        addr == (tlb_entry->addr_code &
2005 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2006 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
2007 84b7b8e7 bellard
    }
2008 61382a50 bellard
}
2009 61382a50 bellard
2010 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2011 33417e70 bellard
{
2012 8a40a180 bellard
    int i;
2013 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2014 0124311e bellard
2015 9fa3e853 bellard
#if defined(DEBUG_TLB)
2016 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2017 9fa3e853 bellard
#endif
2018 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
2019 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2020 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
2021 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
2022 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2023 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
2024 d4c430a8 Paul Brook
#endif
2025 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2026 d4c430a8 Paul Brook
        return;
2027 d4c430a8 Paul Brook
    }
2028 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2029 0124311e bellard
       links while we are modifying them */
2030 0124311e bellard
    env->current_tb = NULL;
2031 61382a50 bellard
2032 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2033 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2034 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2035 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2036 0124311e bellard
2037 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2038 9fa3e853 bellard
}
2039 9fa3e853 bellard
2040 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2041 9fa3e853 bellard
   can be detected */
2042 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2043 9fa3e853 bellard
{
2044 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2045 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2046 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2047 9fa3e853 bellard
}
2048 9fa3e853 bellard
2049 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2050 3a7d929e bellard
   tested for self modifying code */
2051 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2052 3a7d929e bellard
                                    target_ulong vaddr)
2053 9fa3e853 bellard
{
2054 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2055 1ccde1cb bellard
}
2056 1ccde1cb bellard
2057 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2058 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2059 1ccde1cb bellard
{
2060 1ccde1cb bellard
    unsigned long addr;
2061 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2062 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2063 1ccde1cb bellard
        if ((addr - start) < length) {
2064 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2065 1ccde1cb bellard
        }
2066 1ccde1cb bellard
    }
2067 1ccde1cb bellard
}
2068 1ccde1cb bellard
2069 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2070 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2071 0a962c02 bellard
                                     int dirty_flags)
2072 1ccde1cb bellard
{
2073 1ccde1cb bellard
    CPUState *env;
2074 4f2ac237 bellard
    unsigned long length, start1;
2075 f7c11b53 Yoshiaki Tamura
    int i;
2076 1ccde1cb bellard
2077 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2078 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2079 1ccde1cb bellard
2080 1ccde1cb bellard
    length = end - start;
2081 1ccde1cb bellard
    if (length == 0)
2082 1ccde1cb bellard
        return;
2083 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2084 f23db169 bellard
2085 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2086 1ccde1cb bellard
       when accessing the range */
2087 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2088 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2089 5579c7f3 pbrook
       address comparisons below.  */
2090 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2091 5579c7f3 pbrook
            != (end - 1) - start) {
2092 5579c7f3 pbrook
        abort();
2093 5579c7f3 pbrook
    }
2094 5579c7f3 pbrook
2095 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2096 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2097 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2098 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2099 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2100 cfde4bd9 Isaku Yamahata
                                      start1, length);
2101 cfde4bd9 Isaku Yamahata
        }
2102 6a00d601 bellard
    }
2103 1ccde1cb bellard
}
2104 1ccde1cb bellard
2105 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2106 74576198 aliguori
{
2107 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2108 74576198 aliguori
    in_migration = enable;
2109 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2110 f6f3fbca Michael S. Tsirkin
    return ret;
2111 74576198 aliguori
}
2112 74576198 aliguori
2113 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2114 74576198 aliguori
{
2115 74576198 aliguori
    return in_migration;
2116 74576198 aliguori
}
2117 74576198 aliguori
2118 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2119 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2120 2bec46dc aliguori
{
2121 7b8f3b78 Michael S. Tsirkin
    int ret;
2122 151f7749 Jan Kiszka
2123 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2124 151f7749 Jan Kiszka
    return ret;
2125 2bec46dc aliguori
}
2126 2bec46dc aliguori
2127 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2128 e5896b12 Anthony PERARD
                           ram_addr_t size)
2129 e5896b12 Anthony PERARD
{
2130 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2131 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2132 e5896b12 Anthony PERARD
        if (client->log_start) {
2133 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2134 e5896b12 Anthony PERARD
            if (r < 0) {
2135 e5896b12 Anthony PERARD
                return r;
2136 e5896b12 Anthony PERARD
            }
2137 e5896b12 Anthony PERARD
        }
2138 e5896b12 Anthony PERARD
    }
2139 e5896b12 Anthony PERARD
    return 0;
2140 e5896b12 Anthony PERARD
}
2141 e5896b12 Anthony PERARD
2142 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2143 e5896b12 Anthony PERARD
                          ram_addr_t size)
2144 e5896b12 Anthony PERARD
{
2145 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2146 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2147 e5896b12 Anthony PERARD
        if (client->log_stop) {
2148 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2149 e5896b12 Anthony PERARD
            if (r < 0) {
2150 e5896b12 Anthony PERARD
                return r;
2151 e5896b12 Anthony PERARD
            }
2152 e5896b12 Anthony PERARD
        }
2153 e5896b12 Anthony PERARD
    }
2154 e5896b12 Anthony PERARD
    return 0;
2155 e5896b12 Anthony PERARD
}
2156 e5896b12 Anthony PERARD
2157 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2158 3a7d929e bellard
{
2159 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2160 5579c7f3 pbrook
    void *p;
2161 3a7d929e bellard
2162 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2163 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2164 5579c7f3 pbrook
            + tlb_entry->addend);
2165 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2166 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2167 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2168 3a7d929e bellard
        }
2169 3a7d929e bellard
    }
2170 3a7d929e bellard
}
2171 3a7d929e bellard
2172 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2173 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2174 3a7d929e bellard
{
2175 3a7d929e bellard
    int i;
2176 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2177 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2178 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2179 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2180 cfde4bd9 Isaku Yamahata
    }
2181 3a7d929e bellard
}
2182 3a7d929e bellard
2183 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2184 1ccde1cb bellard
{
2185 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2186 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2187 1ccde1cb bellard
}
2188 1ccde1cb bellard
2189 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2190 0f459d16 pbrook
   so that it is no longer dirty */
2191 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2192 1ccde1cb bellard
{
2193 1ccde1cb bellard
    int i;
2194 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2195 1ccde1cb bellard
2196 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2197 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2198 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2199 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2200 9fa3e853 bellard
}
2201 9fa3e853 bellard
2202 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2203 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2204 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2205 d4c430a8 Paul Brook
                               target_ulong size)
2206 d4c430a8 Paul Brook
{
2207 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2208 d4c430a8 Paul Brook
2209 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2210 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2211 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2212 d4c430a8 Paul Brook
        return;
2213 d4c430a8 Paul Brook
    }
2214 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2215 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2216 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2217 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2218 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2219 d4c430a8 Paul Brook
        mask <<= 1;
2220 d4c430a8 Paul Brook
    }
2221 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2222 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2223 d4c430a8 Paul Brook
}
2224 d4c430a8 Paul Brook
2225 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2226 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2227 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2228 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2229 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2230 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2231 9fa3e853 bellard
{
2232 92e873b9 bellard
    PhysPageDesc *p;
2233 4f2ac237 bellard
    unsigned long pd;
2234 9fa3e853 bellard
    unsigned int index;
2235 4f2ac237 bellard
    target_ulong address;
2236 0f459d16 pbrook
    target_ulong code_address;
2237 355b1943 Paul Brook
    unsigned long addend;
2238 84b7b8e7 bellard
    CPUTLBEntry *te;
2239 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2240 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2241 9fa3e853 bellard
2242 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2243 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2244 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2245 d4c430a8 Paul Brook
    }
2246 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2247 9fa3e853 bellard
    if (!p) {
2248 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2249 9fa3e853 bellard
    } else {
2250 9fa3e853 bellard
        pd = p->phys_offset;
2251 9fa3e853 bellard
    }
2252 9fa3e853 bellard
#if defined(DEBUG_TLB)
2253 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2254 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2255 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2256 9fa3e853 bellard
#endif
2257 9fa3e853 bellard
2258 0f459d16 pbrook
    address = vaddr;
2259 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2260 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2261 0f459d16 pbrook
        address |= TLB_MMIO;
2262 0f459d16 pbrook
    }
2263 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2264 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2265 0f459d16 pbrook
        /* Normal RAM.  */
2266 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2267 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2268 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2269 0f459d16 pbrook
        else
2270 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2271 0f459d16 pbrook
    } else {
2272 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2273 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2274 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2275 0f459d16 pbrook
           and avoid full address decoding in every device.
2276 0f459d16 pbrook
           We can't use the high bits of pd for this because
2277 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2278 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2279 8da3ff18 pbrook
        if (p) {
2280 8da3ff18 pbrook
            iotlb += p->region_offset;
2281 8da3ff18 pbrook
        } else {
2282 8da3ff18 pbrook
            iotlb += paddr;
2283 8da3ff18 pbrook
        }
2284 0f459d16 pbrook
    }
2285 0f459d16 pbrook
2286 0f459d16 pbrook
    code_address = address;
2287 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2288 0f459d16 pbrook
       watchpoint trap routines.  */
2289 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2290 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2291 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2292 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2293 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2294 bf298f83 Jun Koi
                address |= TLB_MMIO;
2295 bf298f83 Jun Koi
                break;
2296 bf298f83 Jun Koi
            }
2297 6658ffb8 pbrook
        }
2298 0f459d16 pbrook
    }
2299 d79acba4 balrog
2300 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2301 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2302 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2303 0f459d16 pbrook
    te->addend = addend - vaddr;
2304 0f459d16 pbrook
    if (prot & PAGE_READ) {
2305 0f459d16 pbrook
        te->addr_read = address;
2306 0f459d16 pbrook
    } else {
2307 0f459d16 pbrook
        te->addr_read = -1;
2308 0f459d16 pbrook
    }
2309 5c751e99 edgar_igl
2310 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2311 0f459d16 pbrook
        te->addr_code = code_address;
2312 0f459d16 pbrook
    } else {
2313 0f459d16 pbrook
        te->addr_code = -1;
2314 0f459d16 pbrook
    }
2315 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2316 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2317 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2318 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2319 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2320 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2321 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2322 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2323 9fa3e853 bellard
        } else {
2324 0f459d16 pbrook
            te->addr_write = address;
2325 9fa3e853 bellard
        }
2326 0f459d16 pbrook
    } else {
2327 0f459d16 pbrook
        te->addr_write = -1;
2328 9fa3e853 bellard
    }
2329 9fa3e853 bellard
}
2330 9fa3e853 bellard
2331 0124311e bellard
#else
2332 0124311e bellard
2333 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2334 0124311e bellard
{
2335 0124311e bellard
}
2336 0124311e bellard
2337 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2338 0124311e bellard
{
2339 0124311e bellard
}
2340 0124311e bellard
2341 edf8e2af Mika Westerberg
/*
2342 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2343 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2344 edf8e2af Mika Westerberg
 */
2345 5cd2c5b6 Richard Henderson
2346 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2347 5cd2c5b6 Richard Henderson
{
2348 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2349 5cd2c5b6 Richard Henderson
    void *priv;
2350 5cd2c5b6 Richard Henderson
    unsigned long start;
2351 5cd2c5b6 Richard Henderson
    int prot;
2352 5cd2c5b6 Richard Henderson
};
2353 5cd2c5b6 Richard Henderson
2354 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2355 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2356 5cd2c5b6 Richard Henderson
{
2357 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2358 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2359 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2360 5cd2c5b6 Richard Henderson
            return rc;
2361 5cd2c5b6 Richard Henderson
        }
2362 5cd2c5b6 Richard Henderson
    }
2363 5cd2c5b6 Richard Henderson
2364 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2365 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2366 5cd2c5b6 Richard Henderson
2367 5cd2c5b6 Richard Henderson
    return 0;
2368 5cd2c5b6 Richard Henderson
}
2369 5cd2c5b6 Richard Henderson
2370 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2371 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2372 5cd2c5b6 Richard Henderson
{
2373 b480d9b7 Paul Brook
    abi_ulong pa;
2374 5cd2c5b6 Richard Henderson
    int i, rc;
2375 5cd2c5b6 Richard Henderson
2376 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2377 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2378 5cd2c5b6 Richard Henderson
    }
2379 5cd2c5b6 Richard Henderson
2380 5cd2c5b6 Richard Henderson
    if (level == 0) {
2381 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2382 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2383 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2384 5cd2c5b6 Richard Henderson
2385 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2386 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2387 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2388 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2389 5cd2c5b6 Richard Henderson
                    return rc;
2390 9fa3e853 bellard
                }
2391 9fa3e853 bellard
            }
2392 5cd2c5b6 Richard Henderson
        }
2393 5cd2c5b6 Richard Henderson
    } else {
2394 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2395 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2396 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2397 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2398 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2399 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2400 5cd2c5b6 Richard Henderson
                return rc;
2401 5cd2c5b6 Richard Henderson
            }
2402 5cd2c5b6 Richard Henderson
        }
2403 5cd2c5b6 Richard Henderson
    }
2404 5cd2c5b6 Richard Henderson
2405 5cd2c5b6 Richard Henderson
    return 0;
2406 5cd2c5b6 Richard Henderson
}
2407 5cd2c5b6 Richard Henderson
2408 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2409 5cd2c5b6 Richard Henderson
{
2410 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2411 5cd2c5b6 Richard Henderson
    unsigned long i;
2412 5cd2c5b6 Richard Henderson
2413 5cd2c5b6 Richard Henderson
    data.fn = fn;
2414 5cd2c5b6 Richard Henderson
    data.priv = priv;
2415 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2416 5cd2c5b6 Richard Henderson
    data.prot = 0;
2417 5cd2c5b6 Richard Henderson
2418 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2419 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2420 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2421 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2422 5cd2c5b6 Richard Henderson
            return rc;
2423 9fa3e853 bellard
        }
2424 33417e70 bellard
    }
2425 5cd2c5b6 Richard Henderson
2426 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2427 edf8e2af Mika Westerberg
}
2428 edf8e2af Mika Westerberg
2429 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2430 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2431 edf8e2af Mika Westerberg
{
2432 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2433 edf8e2af Mika Westerberg
2434 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2435 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2436 edf8e2af Mika Westerberg
        start, end, end - start,
2437 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2438 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2439 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2440 edf8e2af Mika Westerberg
2441 edf8e2af Mika Westerberg
    return (0);
2442 edf8e2af Mika Westerberg
}
2443 edf8e2af Mika Westerberg
2444 edf8e2af Mika Westerberg
/* dump memory mappings */
2445 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2446 edf8e2af Mika Westerberg
{
2447 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2448 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2449 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2450 33417e70 bellard
}
2451 33417e70 bellard
2452 53a5960a pbrook
int page_get_flags(target_ulong address)
2453 33417e70 bellard
{
2454 9fa3e853 bellard
    PageDesc *p;
2455 9fa3e853 bellard
2456 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2457 33417e70 bellard
    if (!p)
2458 9fa3e853 bellard
        return 0;
2459 9fa3e853 bellard
    return p->flags;
2460 9fa3e853 bellard
}
2461 9fa3e853 bellard
2462 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2463 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2464 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2465 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2466 9fa3e853 bellard
{
2467 376a7909 Richard Henderson
    target_ulong addr, len;
2468 376a7909 Richard Henderson
2469 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2470 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2471 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2472 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2473 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2474 376a7909 Richard Henderson
#endif
2475 376a7909 Richard Henderson
    assert(start < end);
2476 9fa3e853 bellard
2477 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2478 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2479 376a7909 Richard Henderson
2480 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2481 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2482 376a7909 Richard Henderson
    }
2483 376a7909 Richard Henderson
2484 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2485 376a7909 Richard Henderson
         len != 0;
2486 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2487 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2488 376a7909 Richard Henderson
2489 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2490 376a7909 Richard Henderson
           the code inside.  */
2491 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2492 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2493 9fa3e853 bellard
            p->first_tb) {
2494 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2495 9fa3e853 bellard
        }
2496 9fa3e853 bellard
        p->flags = flags;
2497 9fa3e853 bellard
    }
2498 33417e70 bellard
}
2499 33417e70 bellard
2500 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2501 3d97b40b ths
{
2502 3d97b40b ths
    PageDesc *p;
2503 3d97b40b ths
    target_ulong end;
2504 3d97b40b ths
    target_ulong addr;
2505 3d97b40b ths
2506 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2507 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2508 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2509 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2510 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2511 376a7909 Richard Henderson
#endif
2512 376a7909 Richard Henderson
2513 3e0650a9 Richard Henderson
    if (len == 0) {
2514 3e0650a9 Richard Henderson
        return 0;
2515 3e0650a9 Richard Henderson
    }
2516 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2517 376a7909 Richard Henderson
        /* We've wrapped around.  */
2518 55f280c9 balrog
        return -1;
2519 376a7909 Richard Henderson
    }
2520 55f280c9 balrog
2521 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2522 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2523 3d97b40b ths
2524 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2525 376a7909 Richard Henderson
         len != 0;
2526 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2527 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2528 3d97b40b ths
        if( !p )
2529 3d97b40b ths
            return -1;
2530 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2531 3d97b40b ths
            return -1;
2532 3d97b40b ths
2533 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2534 3d97b40b ths
            return -1;
2535 dae3270c bellard
        if (flags & PAGE_WRITE) {
2536 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2537 dae3270c bellard
                return -1;
2538 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2539 dae3270c bellard
               contains translated code */
2540 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2541 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2542 dae3270c bellard
                    return -1;
2543 dae3270c bellard
            }
2544 dae3270c bellard
            return 0;
2545 dae3270c bellard
        }
2546 3d97b40b ths
    }
2547 3d97b40b ths
    return 0;
2548 3d97b40b ths
}
2549 3d97b40b ths
2550 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2551 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2552 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2553 9fa3e853 bellard
{
2554 45d679d6 Aurelien Jarno
    unsigned int prot;
2555 45d679d6 Aurelien Jarno
    PageDesc *p;
2556 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2557 9fa3e853 bellard
2558 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2559 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2560 c8a706fe pbrook
       practice it seems to be ok.  */
2561 c8a706fe pbrook
    mmap_lock();
2562 c8a706fe pbrook
2563 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2564 45d679d6 Aurelien Jarno
    if (!p) {
2565 c8a706fe pbrook
        mmap_unlock();
2566 9fa3e853 bellard
        return 0;
2567 c8a706fe pbrook
    }
2568 45d679d6 Aurelien Jarno
2569 9fa3e853 bellard
    /* if the page was really writable, then we change its
2570 9fa3e853 bellard
       protection back to writable */
2571 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2572 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2573 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2574 45d679d6 Aurelien Jarno
2575 45d679d6 Aurelien Jarno
        prot = 0;
2576 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2577 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2578 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2579 45d679d6 Aurelien Jarno
            prot |= p->flags;
2580 45d679d6 Aurelien Jarno
2581 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2582 9fa3e853 bellard
               the corresponding translated code. */
2583 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2584 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2585 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2586 9fa3e853 bellard
#endif
2587 9fa3e853 bellard
        }
2588 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2589 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2590 45d679d6 Aurelien Jarno
2591 45d679d6 Aurelien Jarno
        mmap_unlock();
2592 45d679d6 Aurelien Jarno
        return 1;
2593 9fa3e853 bellard
    }
2594 c8a706fe pbrook
    mmap_unlock();
2595 9fa3e853 bellard
    return 0;
2596 9fa3e853 bellard
}
2597 9fa3e853 bellard
2598 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2599 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2600 1ccde1cb bellard
{
2601 1ccde1cb bellard
}
2602 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2603 9fa3e853 bellard
2604 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2605 8da3ff18 pbrook
2606 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2607 c04b2b78 Paul Brook
typedef struct subpage_t {
2608 c04b2b78 Paul Brook
    target_phys_addr_t base;
2609 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2610 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2611 c04b2b78 Paul Brook
} subpage_t;
2612 c04b2b78 Paul Brook
2613 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2614 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2615 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2616 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2617 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2618 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2619 db7b5426 blueswir1
                      need_subpage)                                     \
2620 db7b5426 blueswir1
    do {                                                                \
2621 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2622 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2623 db7b5426 blueswir1
        else {                                                          \
2624 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2625 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2626 db7b5426 blueswir1
                need_subpage = 1;                                       \
2627 db7b5426 blueswir1
        }                                                               \
2628 db7b5426 blueswir1
                                                                        \
2629 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2630 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2631 db7b5426 blueswir1
        else {                                                          \
2632 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2633 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2634 db7b5426 blueswir1
                need_subpage = 1;                                       \
2635 db7b5426 blueswir1
        }                                                               \
2636 db7b5426 blueswir1
    } while (0)
2637 db7b5426 blueswir1
2638 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2639 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2640 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2641 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2642 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2643 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2644 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2645 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2646 0fd542fb Michael S. Tsirkin
void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2647 c227f099 Anthony Liguori
                                         ram_addr_t size,
2648 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2649 0fd542fb Michael S. Tsirkin
                                         ram_addr_t region_offset,
2650 0fd542fb Michael S. Tsirkin
                                         bool log_dirty)
2651 33417e70 bellard
{
2652 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2653 92e873b9 bellard
    PhysPageDesc *p;
2654 9d42037b bellard
    CPUState *env;
2655 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2656 f6405247 Richard Henderson
    subpage_t *subpage;
2657 33417e70 bellard
2658 3b8e6a2d Edgar E. Iglesias
    assert(size);
2659 0fd542fb Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2660 f6f3fbca Michael S. Tsirkin
2661 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2662 67c4d23c pbrook
        region_offset = start_addr;
2663 67c4d23c pbrook
    }
2664 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2665 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2666 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2667 3b8e6a2d Edgar E. Iglesias
2668 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2669 3b8e6a2d Edgar E. Iglesias
    do {
2670 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2671 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2672 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2673 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2674 db7b5426 blueswir1
            int need_subpage = 0;
2675 db7b5426 blueswir1
2676 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2677 db7b5426 blueswir1
                          need_subpage);
2678 f6405247 Richard Henderson
            if (need_subpage) {
2679 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2680 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2681 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2682 8da3ff18 pbrook
                                           p->region_offset);
2683 db7b5426 blueswir1
                } else {
2684 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2685 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2686 db7b5426 blueswir1
                }
2687 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2688 8da3ff18 pbrook
                                 region_offset);
2689 8da3ff18 pbrook
                p->region_offset = 0;
2690 db7b5426 blueswir1
            } else {
2691 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2692 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2693 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2694 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2695 db7b5426 blueswir1
            }
2696 db7b5426 blueswir1
        } else {
2697 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2698 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2699 8da3ff18 pbrook
            p->region_offset = region_offset;
2700 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2701 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2702 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2703 0e8f0967 pbrook
            } else {
2704 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2705 db7b5426 blueswir1
                int need_subpage = 0;
2706 db7b5426 blueswir1
2707 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2708 db7b5426 blueswir1
                              end_addr2, need_subpage);
2709 db7b5426 blueswir1
2710 f6405247 Richard Henderson
                if (need_subpage) {
2711 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2712 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2713 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2714 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2715 8da3ff18 pbrook
                                     phys_offset, region_offset);
2716 8da3ff18 pbrook
                    p->region_offset = 0;
2717 db7b5426 blueswir1
                }
2718 db7b5426 blueswir1
            }
2719 db7b5426 blueswir1
        }
2720 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2721 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2722 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2723 3b46e624 ths
2724 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2725 9d42037b bellard
       reset the modified entries */
2726 9d42037b bellard
    /* XXX: slow ! */
2727 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2728 9d42037b bellard
        tlb_flush(env, 1);
2729 9d42037b bellard
    }
2730 33417e70 bellard
}
2731 33417e70 bellard
2732 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2733 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2734 ba863458 bellard
{
2735 ba863458 bellard
    PhysPageDesc *p;
2736 ba863458 bellard
2737 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2738 ba863458 bellard
    if (!p)
2739 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2740 ba863458 bellard
    return p->phys_offset;
2741 ba863458 bellard
}
2742 ba863458 bellard
2743 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2744 f65ed4c1 aliguori
{
2745 f65ed4c1 aliguori
    if (kvm_enabled())
2746 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2747 f65ed4c1 aliguori
}
2748 f65ed4c1 aliguori
2749 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2750 f65ed4c1 aliguori
{
2751 f65ed4c1 aliguori
    if (kvm_enabled())
2752 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2753 f65ed4c1 aliguori
}
2754 f65ed4c1 aliguori
2755 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2756 62a2744c Sheng Yang
{
2757 62a2744c Sheng Yang
    if (kvm_enabled())
2758 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2759 62a2744c Sheng Yang
}
2760 62a2744c Sheng Yang
2761 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2762 c902760f Marcelo Tosatti
2763 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2764 c902760f Marcelo Tosatti
2765 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2766 c902760f Marcelo Tosatti
2767 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2768 c902760f Marcelo Tosatti
{
2769 c902760f Marcelo Tosatti
    struct statfs fs;
2770 c902760f Marcelo Tosatti
    int ret;
2771 c902760f Marcelo Tosatti
2772 c902760f Marcelo Tosatti
    do {
2773 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2774 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2775 c902760f Marcelo Tosatti
2776 c902760f Marcelo Tosatti
    if (ret != 0) {
2777 9742bf26 Yoshiaki Tamura
        perror(path);
2778 9742bf26 Yoshiaki Tamura
        return 0;
2779 c902760f Marcelo Tosatti
    }
2780 c902760f Marcelo Tosatti
2781 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2782 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2783 c902760f Marcelo Tosatti
2784 c902760f Marcelo Tosatti
    return fs.f_bsize;
2785 c902760f Marcelo Tosatti
}
2786 c902760f Marcelo Tosatti
2787 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2788 04b16653 Alex Williamson
                            ram_addr_t memory,
2789 04b16653 Alex Williamson
                            const char *path)
2790 c902760f Marcelo Tosatti
{
2791 c902760f Marcelo Tosatti
    char *filename;
2792 c902760f Marcelo Tosatti
    void *area;
2793 c902760f Marcelo Tosatti
    int fd;
2794 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2795 c902760f Marcelo Tosatti
    int flags;
2796 c902760f Marcelo Tosatti
#endif
2797 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2798 c902760f Marcelo Tosatti
2799 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2800 c902760f Marcelo Tosatti
    if (!hpagesize) {
2801 9742bf26 Yoshiaki Tamura
        return NULL;
2802 c902760f Marcelo Tosatti
    }
2803 c902760f Marcelo Tosatti
2804 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2805 c902760f Marcelo Tosatti
        return NULL;
2806 c902760f Marcelo Tosatti
    }
2807 c902760f Marcelo Tosatti
2808 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2809 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2810 c902760f Marcelo Tosatti
        return NULL;
2811 c902760f Marcelo Tosatti
    }
2812 c902760f Marcelo Tosatti
2813 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2814 9742bf26 Yoshiaki Tamura
        return NULL;
2815 c902760f Marcelo Tosatti
    }
2816 c902760f Marcelo Tosatti
2817 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2818 c902760f Marcelo Tosatti
    if (fd < 0) {
2819 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2820 9742bf26 Yoshiaki Tamura
        free(filename);
2821 9742bf26 Yoshiaki Tamura
        return NULL;
2822 c902760f Marcelo Tosatti
    }
2823 c902760f Marcelo Tosatti
    unlink(filename);
2824 c902760f Marcelo Tosatti
    free(filename);
2825 c902760f Marcelo Tosatti
2826 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2827 c902760f Marcelo Tosatti
2828 c902760f Marcelo Tosatti
    /*
2829 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2830 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2831 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2832 c902760f Marcelo Tosatti
     * mmap will fail.
2833 c902760f Marcelo Tosatti
     */
2834 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2835 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2836 c902760f Marcelo Tosatti
2837 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2838 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2839 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2840 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2841 c902760f Marcelo Tosatti
     */
2842 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2843 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2844 c902760f Marcelo Tosatti
#else
2845 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2846 c902760f Marcelo Tosatti
#endif
2847 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2848 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2849 9742bf26 Yoshiaki Tamura
        close(fd);
2850 9742bf26 Yoshiaki Tamura
        return (NULL);
2851 c902760f Marcelo Tosatti
    }
2852 04b16653 Alex Williamson
    block->fd = fd;
2853 c902760f Marcelo Tosatti
    return area;
2854 c902760f Marcelo Tosatti
}
2855 c902760f Marcelo Tosatti
#endif
2856 c902760f Marcelo Tosatti
2857 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2858 d17b5288 Alex Williamson
{
2859 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2860 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2861 04b16653 Alex Williamson
2862 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2863 04b16653 Alex Williamson
        return 0;
2864 04b16653 Alex Williamson
2865 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2866 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2867 04b16653 Alex Williamson
2868 04b16653 Alex Williamson
        end = block->offset + block->length;
2869 04b16653 Alex Williamson
2870 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2871 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2872 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2873 04b16653 Alex Williamson
            }
2874 04b16653 Alex Williamson
        }
2875 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2876 04b16653 Alex Williamson
            offset =  end;
2877 04b16653 Alex Williamson
            mingap = next - end;
2878 04b16653 Alex Williamson
        }
2879 04b16653 Alex Williamson
    }
2880 04b16653 Alex Williamson
    return offset;
2881 04b16653 Alex Williamson
}
2882 04b16653 Alex Williamson
2883 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2884 04b16653 Alex Williamson
{
2885 d17b5288 Alex Williamson
    RAMBlock *block;
2886 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2887 d17b5288 Alex Williamson
2888 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2889 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2890 d17b5288 Alex Williamson
2891 d17b5288 Alex Williamson
    return last;
2892 d17b5288 Alex Williamson
}
2893 d17b5288 Alex Williamson
2894 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2895 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2896 84b89d78 Cam Macdonell
{
2897 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2898 84b89d78 Cam Macdonell
2899 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2900 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2901 84b89d78 Cam Macdonell
2902 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2903 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2904 84b89d78 Cam Macdonell
        if (id) {
2905 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2906 84b89d78 Cam Macdonell
            qemu_free(id);
2907 84b89d78 Cam Macdonell
        }
2908 84b89d78 Cam Macdonell
    }
2909 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2910 84b89d78 Cam Macdonell
2911 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2912 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2913 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2914 84b89d78 Cam Macdonell
                    new_block->idstr);
2915 84b89d78 Cam Macdonell
            abort();
2916 84b89d78 Cam Macdonell
        }
2917 84b89d78 Cam Macdonell
    }
2918 84b89d78 Cam Macdonell
2919 6977dfe6 Yoshiaki Tamura
    if (host) {
2920 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2921 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2922 6977dfe6 Yoshiaki Tamura
    } else {
2923 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2924 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2925 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2926 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2927 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2928 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2929 6977dfe6 Yoshiaki Tamura
            }
2930 c902760f Marcelo Tosatti
#else
2931 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2932 6977dfe6 Yoshiaki Tamura
            exit(1);
2933 c902760f Marcelo Tosatti
#endif
2934 6977dfe6 Yoshiaki Tamura
        } else {
2935 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2936 6977dfe6 Yoshiaki Tamura
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2937 6977dfe6 Yoshiaki Tamura
            new_block->host = mmap((void*)0x1000000, size,
2938 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2939 6977dfe6 Yoshiaki Tamura
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2940 6b02494d Alexander Graf
#else
2941 6977dfe6 Yoshiaki Tamura
            new_block->host = qemu_vmalloc(size);
2942 6b02494d Alexander Graf
#endif
2943 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2944 6977dfe6 Yoshiaki Tamura
        }
2945 c902760f Marcelo Tosatti
    }
2946 6977dfe6 Yoshiaki Tamura
2947 d17b5288 Alex Williamson
    new_block->offset = find_ram_offset(size);
2948 94a6b54f pbrook
    new_block->length = size;
2949 94a6b54f pbrook
2950 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2951 94a6b54f pbrook
2952 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2953 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2954 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2955 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2956 94a6b54f pbrook
2957 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2958 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2959 6f0437e8 Jan Kiszka
2960 94a6b54f pbrook
    return new_block->offset;
2961 94a6b54f pbrook
}
2962 e9a1ab19 bellard
2963 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2964 6977dfe6 Yoshiaki Tamura
{
2965 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2966 6977dfe6 Yoshiaki Tamura
}
2967 6977dfe6 Yoshiaki Tamura
2968 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2969 e9a1ab19 bellard
{
2970 04b16653 Alex Williamson
    RAMBlock *block;
2971 04b16653 Alex Williamson
2972 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 04b16653 Alex Williamson
        if (addr == block->offset) {
2974 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2975 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2976 cd19cfa2 Huang Ying
                ;
2977 cd19cfa2 Huang Ying
            } else if (mem_path) {
2978 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2979 04b16653 Alex Williamson
                if (block->fd) {
2980 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2981 04b16653 Alex Williamson
                    close(block->fd);
2982 04b16653 Alex Williamson
                } else {
2983 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2984 04b16653 Alex Williamson
                }
2985 fd28aa13 Jan Kiszka
#else
2986 fd28aa13 Jan Kiszka
                abort();
2987 04b16653 Alex Williamson
#endif
2988 04b16653 Alex Williamson
            } else {
2989 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2990 04b16653 Alex Williamson
                munmap(block->host, block->length);
2991 04b16653 Alex Williamson
#else
2992 04b16653 Alex Williamson
                qemu_vfree(block->host);
2993 04b16653 Alex Williamson
#endif
2994 04b16653 Alex Williamson
            }
2995 04b16653 Alex Williamson
            qemu_free(block);
2996 04b16653 Alex Williamson
            return;
2997 04b16653 Alex Williamson
        }
2998 04b16653 Alex Williamson
    }
2999 04b16653 Alex Williamson
3000 e9a1ab19 bellard
}
3001 e9a1ab19 bellard
3002 cd19cfa2 Huang Ying
#ifndef _WIN32
3003 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3004 cd19cfa2 Huang Ying
{
3005 cd19cfa2 Huang Ying
    RAMBlock *block;
3006 cd19cfa2 Huang Ying
    ram_addr_t offset;
3007 cd19cfa2 Huang Ying
    int flags;
3008 cd19cfa2 Huang Ying
    void *area, *vaddr;
3009 cd19cfa2 Huang Ying
3010 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3011 cd19cfa2 Huang Ying
        offset = addr - block->offset;
3012 cd19cfa2 Huang Ying
        if (offset < block->length) {
3013 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
3014 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3015 cd19cfa2 Huang Ying
                ;
3016 cd19cfa2 Huang Ying
            } else {
3017 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
3018 cd19cfa2 Huang Ying
                munmap(vaddr, length);
3019 cd19cfa2 Huang Ying
                if (mem_path) {
3020 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
3021 cd19cfa2 Huang Ying
                    if (block->fd) {
3022 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
3023 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3024 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
3025 cd19cfa2 Huang Ying
#else
3026 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
3027 cd19cfa2 Huang Ying
#endif
3028 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3029 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
3030 cd19cfa2 Huang Ying
                    } else {
3031 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3032 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3033 cd19cfa2 Huang Ying
                                    flags, -1, 0);
3034 cd19cfa2 Huang Ying
                    }
3035 fd28aa13 Jan Kiszka
#else
3036 fd28aa13 Jan Kiszka
                    abort();
3037 cd19cfa2 Huang Ying
#endif
3038 cd19cfa2 Huang Ying
                } else {
3039 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3040 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
3041 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3042 cd19cfa2 Huang Ying
                                flags, -1, 0);
3043 cd19cfa2 Huang Ying
#else
3044 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3045 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3046 cd19cfa2 Huang Ying
                                flags, -1, 0);
3047 cd19cfa2 Huang Ying
#endif
3048 cd19cfa2 Huang Ying
                }
3049 cd19cfa2 Huang Ying
                if (area != vaddr) {
3050 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3051 cd19cfa2 Huang Ying
                            length, addr);
3052 cd19cfa2 Huang Ying
                    exit(1);
3053 cd19cfa2 Huang Ying
                }
3054 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3055 cd19cfa2 Huang Ying
            }
3056 cd19cfa2 Huang Ying
            return;
3057 cd19cfa2 Huang Ying
        }
3058 cd19cfa2 Huang Ying
    }
3059 cd19cfa2 Huang Ying
}
3060 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3061 cd19cfa2 Huang Ying
3062 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3063 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3064 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3065 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3066 5579c7f3 pbrook

3067 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3068 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3069 5579c7f3 pbrook
 */
3070 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3071 dc828ca1 pbrook
{
3072 94a6b54f pbrook
    RAMBlock *block;
3073 94a6b54f pbrook
3074 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3075 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3076 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3077 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3078 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3079 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3080 7d82af38 Vincent Palatin
            }
3081 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3082 f471a17e Alex Williamson
        }
3083 94a6b54f pbrook
    }
3084 f471a17e Alex Williamson
3085 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3086 f471a17e Alex Williamson
    abort();
3087 f471a17e Alex Williamson
3088 f471a17e Alex Williamson
    return NULL;
3089 dc828ca1 pbrook
}
3090 dc828ca1 pbrook
3091 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3092 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3093 b2e0a138 Michael S. Tsirkin
 */
3094 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3095 b2e0a138 Michael S. Tsirkin
{
3096 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3097 b2e0a138 Michael S. Tsirkin
3098 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3099 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3100 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3101 b2e0a138 Michael S. Tsirkin
        }
3102 b2e0a138 Michael S. Tsirkin
    }
3103 b2e0a138 Michael S. Tsirkin
3104 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3105 b2e0a138 Michael S. Tsirkin
    abort();
3106 b2e0a138 Michael S. Tsirkin
3107 b2e0a138 Michael S. Tsirkin
    return NULL;
3108 b2e0a138 Michael S. Tsirkin
}
3109 b2e0a138 Michael S. Tsirkin
3110 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3111 5579c7f3 pbrook
{
3112 94a6b54f pbrook
    RAMBlock *block;
3113 94a6b54f pbrook
    uint8_t *host = ptr;
3114 94a6b54f pbrook
3115 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3117 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3118 e890261f Marcelo Tosatti
            return 0;
3119 f471a17e Alex Williamson
        }
3120 94a6b54f pbrook
    }
3121 e890261f Marcelo Tosatti
    return -1;
3122 e890261f Marcelo Tosatti
}
3123 f471a17e Alex Williamson
3124 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3125 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3126 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3127 e890261f Marcelo Tosatti
{
3128 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3129 f471a17e Alex Williamson
3130 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3131 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3132 e890261f Marcelo Tosatti
        abort();
3133 e890261f Marcelo Tosatti
    }
3134 e890261f Marcelo Tosatti
    return ram_addr;
3135 5579c7f3 pbrook
}
3136 5579c7f3 pbrook
3137 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3138 33417e70 bellard
{
3139 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3140 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3141 67d3b957 pbrook
#endif
3142 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3144 e18231a3 blueswir1
#endif
3145 e18231a3 blueswir1
    return 0;
3146 e18231a3 blueswir1
}
3147 e18231a3 blueswir1
3148 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3149 e18231a3 blueswir1
{
3150 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3151 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3152 e18231a3 blueswir1
#endif
3153 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3154 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3155 e18231a3 blueswir1
#endif
3156 e18231a3 blueswir1
    return 0;
3157 e18231a3 blueswir1
}
3158 e18231a3 blueswir1
3159 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3160 e18231a3 blueswir1
{
3161 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3162 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3163 e18231a3 blueswir1
#endif
3164 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3165 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3166 b4f0a316 blueswir1
#endif
3167 33417e70 bellard
    return 0;
3168 33417e70 bellard
}
3169 33417e70 bellard
3170 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3171 33417e70 bellard
{
3172 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3173 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3174 67d3b957 pbrook
#endif
3175 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3176 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3177 e18231a3 blueswir1
#endif
3178 e18231a3 blueswir1
}
3179 e18231a3 blueswir1
3180 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3181 e18231a3 blueswir1
{
3182 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3183 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3184 e18231a3 blueswir1
#endif
3185 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3186 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3187 e18231a3 blueswir1
#endif
3188 e18231a3 blueswir1
}
3189 e18231a3 blueswir1
3190 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3191 e18231a3 blueswir1
{
3192 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3193 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3194 e18231a3 blueswir1
#endif
3195 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3196 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3197 b4f0a316 blueswir1
#endif
3198 33417e70 bellard
}
3199 33417e70 bellard
3200 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3201 33417e70 bellard
    unassigned_mem_readb,
3202 e18231a3 blueswir1
    unassigned_mem_readw,
3203 e18231a3 blueswir1
    unassigned_mem_readl,
3204 33417e70 bellard
};
3205 33417e70 bellard
3206 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3207 33417e70 bellard
    unassigned_mem_writeb,
3208 e18231a3 blueswir1
    unassigned_mem_writew,
3209 e18231a3 blueswir1
    unassigned_mem_writel,
3210 33417e70 bellard
};
3211 33417e70 bellard
3212 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3213 0f459d16 pbrook
                                uint32_t val)
3214 9fa3e853 bellard
{
3215 3a7d929e bellard
    int dirty_flags;
3216 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3217 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3218 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3219 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3220 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3221 9fa3e853 bellard
#endif
3222 3a7d929e bellard
    }
3223 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3224 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3225 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3226 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3227 f23db169 bellard
       flushed */
3228 f23db169 bellard
    if (dirty_flags == 0xff)
3229 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3230 9fa3e853 bellard
}
3231 9fa3e853 bellard
3232 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3233 0f459d16 pbrook
                                uint32_t val)
3234 9fa3e853 bellard
{
3235 3a7d929e bellard
    int dirty_flags;
3236 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3237 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3238 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3239 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3240 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3241 9fa3e853 bellard
#endif
3242 3a7d929e bellard
    }
3243 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3244 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3245 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3246 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3247 f23db169 bellard
       flushed */
3248 f23db169 bellard
    if (dirty_flags == 0xff)
3249 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3250 9fa3e853 bellard
}
3251 9fa3e853 bellard
3252 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3253 0f459d16 pbrook
                                uint32_t val)
3254 9fa3e853 bellard
{
3255 3a7d929e bellard
    int dirty_flags;
3256 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3257 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3258 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3259 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3260 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3261 9fa3e853 bellard
#endif
3262 3a7d929e bellard
    }
3263 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3264 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3265 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3266 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3267 f23db169 bellard
       flushed */
3268 f23db169 bellard
    if (dirty_flags == 0xff)
3269 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3270 9fa3e853 bellard
}
3271 9fa3e853 bellard
3272 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3273 9fa3e853 bellard
    NULL, /* never used */
3274 9fa3e853 bellard
    NULL, /* never used */
3275 9fa3e853 bellard
    NULL, /* never used */
3276 9fa3e853 bellard
};
3277 9fa3e853 bellard
3278 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3279 1ccde1cb bellard
    notdirty_mem_writeb,
3280 1ccde1cb bellard
    notdirty_mem_writew,
3281 1ccde1cb bellard
    notdirty_mem_writel,
3282 1ccde1cb bellard
};
3283 1ccde1cb bellard
3284 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3285 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3286 0f459d16 pbrook
{
3287 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3288 06d55cc1 aliguori
    target_ulong pc, cs_base;
3289 06d55cc1 aliguori
    TranslationBlock *tb;
3290 0f459d16 pbrook
    target_ulong vaddr;
3291 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3292 06d55cc1 aliguori
    int cpu_flags;
3293 0f459d16 pbrook
3294 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3295 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3296 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3297 06d55cc1 aliguori
         * current instruction. */
3298 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3299 06d55cc1 aliguori
        return;
3300 06d55cc1 aliguori
    }
3301 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3302 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3303 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3304 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3305 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3306 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3307 6e140f28 aliguori
                env->watchpoint_hit = wp;
3308 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3309 6e140f28 aliguori
                if (!tb) {
3310 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3311 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3312 6e140f28 aliguori
                }
3313 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3314 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3315 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3316 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3317 6e140f28 aliguori
                } else {
3318 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3319 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3320 6e140f28 aliguori
                }
3321 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3322 06d55cc1 aliguori
            }
3323 6e140f28 aliguori
        } else {
3324 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3325 0f459d16 pbrook
        }
3326 0f459d16 pbrook
    }
3327 0f459d16 pbrook
}
3328 0f459d16 pbrook
3329 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3330 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3331 6658ffb8 pbrook
   phys routines.  */
3332 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3333 6658ffb8 pbrook
{
3334 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3335 6658ffb8 pbrook
    return ldub_phys(addr);
3336 6658ffb8 pbrook
}
3337 6658ffb8 pbrook
3338 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3339 6658ffb8 pbrook
{
3340 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3341 6658ffb8 pbrook
    return lduw_phys(addr);
3342 6658ffb8 pbrook
}
3343 6658ffb8 pbrook
3344 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3345 6658ffb8 pbrook
{
3346 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3347 6658ffb8 pbrook
    return ldl_phys(addr);
3348 6658ffb8 pbrook
}
3349 6658ffb8 pbrook
3350 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3351 6658ffb8 pbrook
                             uint32_t val)
3352 6658ffb8 pbrook
{
3353 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3354 6658ffb8 pbrook
    stb_phys(addr, val);
3355 6658ffb8 pbrook
}
3356 6658ffb8 pbrook
3357 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3358 6658ffb8 pbrook
                             uint32_t val)
3359 6658ffb8 pbrook
{
3360 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3361 6658ffb8 pbrook
    stw_phys(addr, val);
3362 6658ffb8 pbrook
}
3363 6658ffb8 pbrook
3364 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3365 6658ffb8 pbrook
                             uint32_t val)
3366 6658ffb8 pbrook
{
3367 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3368 6658ffb8 pbrook
    stl_phys(addr, val);
3369 6658ffb8 pbrook
}
3370 6658ffb8 pbrook
3371 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3372 6658ffb8 pbrook
    watch_mem_readb,
3373 6658ffb8 pbrook
    watch_mem_readw,
3374 6658ffb8 pbrook
    watch_mem_readl,
3375 6658ffb8 pbrook
};
3376 6658ffb8 pbrook
3377 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3378 6658ffb8 pbrook
    watch_mem_writeb,
3379 6658ffb8 pbrook
    watch_mem_writew,
3380 6658ffb8 pbrook
    watch_mem_writel,
3381 6658ffb8 pbrook
};
3382 6658ffb8 pbrook
3383 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3384 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3385 f6405247 Richard Henderson
                                        unsigned int len)
3386 db7b5426 blueswir1
{
3387 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3388 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3389 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3390 db7b5426 blueswir1
           mmio, len, addr, idx);
3391 db7b5426 blueswir1
#endif
3392 db7b5426 blueswir1
3393 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3394 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3395 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3396 db7b5426 blueswir1
}
3397 db7b5426 blueswir1
3398 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3399 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3400 db7b5426 blueswir1
{
3401 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3402 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3403 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3404 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3405 db7b5426 blueswir1
#endif
3406 f6405247 Richard Henderson
3407 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3408 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3409 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3410 db7b5426 blueswir1
}
3411 db7b5426 blueswir1
3412 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3413 db7b5426 blueswir1
{
3414 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3415 db7b5426 blueswir1
}
3416 db7b5426 blueswir1
3417 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3418 db7b5426 blueswir1
                            uint32_t value)
3419 db7b5426 blueswir1
{
3420 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3421 db7b5426 blueswir1
}
3422 db7b5426 blueswir1
3423 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3424 db7b5426 blueswir1
{
3425 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3426 db7b5426 blueswir1
}
3427 db7b5426 blueswir1
3428 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3429 db7b5426 blueswir1
                            uint32_t value)
3430 db7b5426 blueswir1
{
3431 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3432 db7b5426 blueswir1
}
3433 db7b5426 blueswir1
3434 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3435 db7b5426 blueswir1
{
3436 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3437 db7b5426 blueswir1
}
3438 db7b5426 blueswir1
3439 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3440 f6405247 Richard Henderson
                            uint32_t value)
3441 db7b5426 blueswir1
{
3442 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3443 db7b5426 blueswir1
}
3444 db7b5426 blueswir1
3445 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3446 db7b5426 blueswir1
    &subpage_readb,
3447 db7b5426 blueswir1
    &subpage_readw,
3448 db7b5426 blueswir1
    &subpage_readl,
3449 db7b5426 blueswir1
};
3450 db7b5426 blueswir1
3451 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3452 db7b5426 blueswir1
    &subpage_writeb,
3453 db7b5426 blueswir1
    &subpage_writew,
3454 db7b5426 blueswir1
    &subpage_writel,
3455 db7b5426 blueswir1
};
3456 db7b5426 blueswir1
3457 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3458 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3459 db7b5426 blueswir1
{
3460 db7b5426 blueswir1
    int idx, eidx;
3461 db7b5426 blueswir1
3462 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3463 db7b5426 blueswir1
        return -1;
3464 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3465 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3466 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3467 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3468 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3469 db7b5426 blueswir1
#endif
3470 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3471 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3472 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3473 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3474 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3475 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3476 db7b5426 blueswir1
    }
3477 db7b5426 blueswir1
3478 db7b5426 blueswir1
    return 0;
3479 db7b5426 blueswir1
}
3480 db7b5426 blueswir1
3481 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3482 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3483 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3484 db7b5426 blueswir1
{
3485 c227f099 Anthony Liguori
    subpage_t *mmio;
3486 db7b5426 blueswir1
    int subpage_memory;
3487 db7b5426 blueswir1
3488 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3489 1eec614b aliguori
3490 1eec614b aliguori
    mmio->base = base;
3491 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3492 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3493 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3494 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3495 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3496 db7b5426 blueswir1
#endif
3497 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3498 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3499 db7b5426 blueswir1
3500 db7b5426 blueswir1
    return mmio;
3501 db7b5426 blueswir1
}
3502 db7b5426 blueswir1
3503 88715657 aliguori
static int get_free_io_mem_idx(void)
3504 88715657 aliguori
{
3505 88715657 aliguori
    int i;
3506 88715657 aliguori
3507 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3508 88715657 aliguori
        if (!io_mem_used[i]) {
3509 88715657 aliguori
            io_mem_used[i] = 1;
3510 88715657 aliguori
            return i;
3511 88715657 aliguori
        }
3512 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3513 88715657 aliguori
    return -1;
3514 88715657 aliguori
}
3515 88715657 aliguori
3516 dd310534 Alexander Graf
/*
3517 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3518 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3519 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3520 dd310534 Alexander Graf
 *
3521 dd310534 Alexander Graf
 * CPU          Device           swap?
3522 dd310534 Alexander Graf
 *
3523 dd310534 Alexander Graf
 * little       little           no
3524 dd310534 Alexander Graf
 * little       big              yes
3525 dd310534 Alexander Graf
 * big          little           yes
3526 dd310534 Alexander Graf
 * big          big              no
3527 dd310534 Alexander Graf
 */
3528 dd310534 Alexander Graf
3529 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3530 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3531 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3532 dd310534 Alexander Graf
    void *opaque;
3533 dd310534 Alexander Graf
} SwapEndianContainer;
3534 dd310534 Alexander Graf
3535 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3536 dd310534 Alexander Graf
{
3537 dd310534 Alexander Graf
    uint32_t val;
3538 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3539 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3540 dd310534 Alexander Graf
    return val;
3541 dd310534 Alexander Graf
}
3542 dd310534 Alexander Graf
3543 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3544 dd310534 Alexander Graf
{
3545 dd310534 Alexander Graf
    uint32_t val;
3546 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3547 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3548 dd310534 Alexander Graf
    return val;
3549 dd310534 Alexander Graf
}
3550 dd310534 Alexander Graf
3551 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3552 dd310534 Alexander Graf
{
3553 dd310534 Alexander Graf
    uint32_t val;
3554 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3555 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3556 dd310534 Alexander Graf
    return val;
3557 dd310534 Alexander Graf
}
3558 dd310534 Alexander Graf
3559 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3560 dd310534 Alexander Graf
    swapendian_mem_readb,
3561 dd310534 Alexander Graf
    swapendian_mem_readw,
3562 dd310534 Alexander Graf
    swapendian_mem_readl
3563 dd310534 Alexander Graf
};
3564 dd310534 Alexander Graf
3565 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3566 dd310534 Alexander Graf
                                  uint32_t val)
3567 dd310534 Alexander Graf
{
3568 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3569 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3570 dd310534 Alexander Graf
}
3571 dd310534 Alexander Graf
3572 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3573 dd310534 Alexander Graf
                                  uint32_t val)
3574 dd310534 Alexander Graf
{
3575 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3576 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3577 dd310534 Alexander Graf
}
3578 dd310534 Alexander Graf
3579 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3580 dd310534 Alexander Graf
                                  uint32_t val)
3581 dd310534 Alexander Graf
{
3582 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3583 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3584 dd310534 Alexander Graf
}
3585 dd310534 Alexander Graf
3586 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3587 dd310534 Alexander Graf
    swapendian_mem_writeb,
3588 dd310534 Alexander Graf
    swapendian_mem_writew,
3589 dd310534 Alexander Graf
    swapendian_mem_writel
3590 dd310534 Alexander Graf
};
3591 dd310534 Alexander Graf
3592 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3593 dd310534 Alexander Graf
{
3594 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3595 dd310534 Alexander Graf
    int i;
3596 dd310534 Alexander Graf
3597 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3598 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3599 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3600 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3601 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3602 dd310534 Alexander Graf
3603 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3604 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3605 dd310534 Alexander Graf
    }
3606 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3607 dd310534 Alexander Graf
}
3608 dd310534 Alexander Graf
3609 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3610 dd310534 Alexander Graf
{
3611 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3612 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3613 dd310534 Alexander Graf
    }
3614 dd310534 Alexander Graf
}
3615 dd310534 Alexander Graf
3616 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3617 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3618 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3619 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3620 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3621 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3622 4254fab8 blueswir1
   returned if error. */
3623 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3624 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3625 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3626 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3627 33417e70 bellard
{
3628 3cab721d Richard Henderson
    int i;
3629 3cab721d Richard Henderson
3630 33417e70 bellard
    if (io_index <= 0) {
3631 88715657 aliguori
        io_index = get_free_io_mem_idx();
3632 88715657 aliguori
        if (io_index == -1)
3633 88715657 aliguori
            return io_index;
3634 33417e70 bellard
    } else {
3635 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3636 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3637 33417e70 bellard
            return -1;
3638 33417e70 bellard
    }
3639 b5ff1b31 bellard
3640 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3641 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3642 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3643 3cab721d Richard Henderson
    }
3644 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3645 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3646 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3647 3cab721d Richard Henderson
    }
3648 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3649 f6405247 Richard Henderson
3650 dd310534 Alexander Graf
    switch (endian) {
3651 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3652 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3653 dd310534 Alexander Graf
        swapendian_init(io_index);
3654 dd310534 Alexander Graf
#endif
3655 dd310534 Alexander Graf
        break;
3656 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3657 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3658 dd310534 Alexander Graf
        swapendian_init(io_index);
3659 dd310534 Alexander Graf
#endif
3660 dd310534 Alexander Graf
        break;
3661 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3662 dd310534 Alexander Graf
    default:
3663 dd310534 Alexander Graf
        break;
3664 dd310534 Alexander Graf
    }
3665 dd310534 Alexander Graf
3666 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3667 33417e70 bellard
}
3668 61382a50 bellard
3669 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3670 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3671 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3672 1eed09cb Avi Kivity
{
3673 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3674 1eed09cb Avi Kivity
}
3675 1eed09cb Avi Kivity
3676 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3677 88715657 aliguori
{
3678 88715657 aliguori
    int i;
3679 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3680 88715657 aliguori
3681 dd310534 Alexander Graf
    swapendian_del(io_index);
3682 dd310534 Alexander Graf
3683 88715657 aliguori
    for (i=0;i < 3; i++) {
3684 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3685 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3686 88715657 aliguori
    }
3687 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3688 88715657 aliguori
    io_mem_used[io_index] = 0;
3689 88715657 aliguori
}
3690 88715657 aliguori
3691 e9179ce1 Avi Kivity
static void io_mem_init(void)
3692 e9179ce1 Avi Kivity
{
3693 e9179ce1 Avi Kivity
    int i;
3694 e9179ce1 Avi Kivity
3695 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3696 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3697 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3698 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3699 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3700 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3701 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3702 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3703 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3704 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3705 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3706 e9179ce1 Avi Kivity
3707 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3708 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3709 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3710 e9179ce1 Avi Kivity
}
3711 e9179ce1 Avi Kivity
3712 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3713 e2eef170 pbrook
3714 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3715 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3716 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3717 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3718 13eb76e0 bellard
{
3719 13eb76e0 bellard
    int l, flags;
3720 13eb76e0 bellard
    target_ulong page;
3721 53a5960a pbrook
    void * p;
3722 13eb76e0 bellard
3723 13eb76e0 bellard
    while (len > 0) {
3724 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3725 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3726 13eb76e0 bellard
        if (l > len)
3727 13eb76e0 bellard
            l = len;
3728 13eb76e0 bellard
        flags = page_get_flags(page);
3729 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3730 a68fe89c Paul Brook
            return -1;
3731 13eb76e0 bellard
        if (is_write) {
3732 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3733 a68fe89c Paul Brook
                return -1;
3734 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3735 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3736 a68fe89c Paul Brook
                return -1;
3737 72fb7daa aurel32
            memcpy(p, buf, l);
3738 72fb7daa aurel32
            unlock_user(p, addr, l);
3739 13eb76e0 bellard
        } else {
3740 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3741 a68fe89c Paul Brook
                return -1;
3742 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3743 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3744 a68fe89c Paul Brook
                return -1;
3745 72fb7daa aurel32
            memcpy(buf, p, l);
3746 5b257578 aurel32
            unlock_user(p, addr, 0);
3747 13eb76e0 bellard
        }
3748 13eb76e0 bellard
        len -= l;
3749 13eb76e0 bellard
        buf += l;
3750 13eb76e0 bellard
        addr += l;
3751 13eb76e0 bellard
    }
3752 a68fe89c Paul Brook
    return 0;
3753 13eb76e0 bellard
}
3754 8df1cd07 bellard
3755 13eb76e0 bellard
#else
3756 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3757 13eb76e0 bellard
                            int len, int is_write)
3758 13eb76e0 bellard
{
3759 13eb76e0 bellard
    int l, io_index;
3760 13eb76e0 bellard
    uint8_t *ptr;
3761 13eb76e0 bellard
    uint32_t val;
3762 c227f099 Anthony Liguori
    target_phys_addr_t page;
3763 2e12669a bellard
    unsigned long pd;
3764 92e873b9 bellard
    PhysPageDesc *p;
3765 3b46e624 ths
3766 13eb76e0 bellard
    while (len > 0) {
3767 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3768 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3769 13eb76e0 bellard
        if (l > len)
3770 13eb76e0 bellard
            l = len;
3771 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3772 13eb76e0 bellard
        if (!p) {
3773 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3774 13eb76e0 bellard
        } else {
3775 13eb76e0 bellard
            pd = p->phys_offset;
3776 13eb76e0 bellard
        }
3777 3b46e624 ths
3778 13eb76e0 bellard
        if (is_write) {
3779 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3780 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3781 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3782 8da3ff18 pbrook
                if (p)
3783 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3784 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3785 6a00d601 bellard
                   potential bugs */
3786 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3787 1c213d19 bellard
                    /* 32 bit write access */
3788 c27004ec bellard
                    val = ldl_p(buf);
3789 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3790 13eb76e0 bellard
                    l = 4;
3791 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3792 1c213d19 bellard
                    /* 16 bit write access */
3793 c27004ec bellard
                    val = lduw_p(buf);
3794 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3795 13eb76e0 bellard
                    l = 2;
3796 13eb76e0 bellard
                } else {
3797 1c213d19 bellard
                    /* 8 bit write access */
3798 c27004ec bellard
                    val = ldub_p(buf);
3799 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3800 13eb76e0 bellard
                    l = 1;
3801 13eb76e0 bellard
                }
3802 13eb76e0 bellard
            } else {
3803 b448f2f3 bellard
                unsigned long addr1;
3804 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3805 13eb76e0 bellard
                /* RAM case */
3806 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3807 13eb76e0 bellard
                memcpy(ptr, buf, l);
3808 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3809 3a7d929e bellard
                    /* invalidate code */
3810 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3811 3a7d929e bellard
                    /* set dirty bit */
3812 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3813 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3814 3a7d929e bellard
                }
3815 13eb76e0 bellard
            }
3816 13eb76e0 bellard
        } else {
3817 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3818 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3819 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3820 13eb76e0 bellard
                /* I/O case */
3821 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3822 8da3ff18 pbrook
                if (p)
3823 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3824 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3825 13eb76e0 bellard
                    /* 32 bit read access */
3826 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3827 c27004ec bellard
                    stl_p(buf, val);
3828 13eb76e0 bellard
                    l = 4;
3829 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3830 13eb76e0 bellard
                    /* 16 bit read access */
3831 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3832 c27004ec bellard
                    stw_p(buf, val);
3833 13eb76e0 bellard
                    l = 2;
3834 13eb76e0 bellard
                } else {
3835 1c213d19 bellard
                    /* 8 bit read access */
3836 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3837 c27004ec bellard
                    stb_p(buf, val);
3838 13eb76e0 bellard
                    l = 1;
3839 13eb76e0 bellard
                }
3840 13eb76e0 bellard
            } else {
3841 13eb76e0 bellard
                /* RAM case */
3842 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3843 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3844 13eb76e0 bellard
                memcpy(buf, ptr, l);
3845 13eb76e0 bellard
            }
3846 13eb76e0 bellard
        }
3847 13eb76e0 bellard
        len -= l;
3848 13eb76e0 bellard
        buf += l;
3849 13eb76e0 bellard
        addr += l;
3850 13eb76e0 bellard
    }
3851 13eb76e0 bellard
}
3852 8df1cd07 bellard
3853 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3854 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3855 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3856 d0ecd2aa bellard
{
3857 d0ecd2aa bellard
    int l;
3858 d0ecd2aa bellard
    uint8_t *ptr;
3859 c227f099 Anthony Liguori
    target_phys_addr_t page;
3860 d0ecd2aa bellard
    unsigned long pd;
3861 d0ecd2aa bellard
    PhysPageDesc *p;
3862 3b46e624 ths
3863 d0ecd2aa bellard
    while (len > 0) {
3864 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3865 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3866 d0ecd2aa bellard
        if (l > len)
3867 d0ecd2aa bellard
            l = len;
3868 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3869 d0ecd2aa bellard
        if (!p) {
3870 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3871 d0ecd2aa bellard
        } else {
3872 d0ecd2aa bellard
            pd = p->phys_offset;
3873 d0ecd2aa bellard
        }
3874 3b46e624 ths
3875 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3876 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3877 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3878 d0ecd2aa bellard
            /* do nothing */
3879 d0ecd2aa bellard
        } else {
3880 d0ecd2aa bellard
            unsigned long addr1;
3881 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3882 d0ecd2aa bellard
            /* ROM/RAM case */
3883 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3884 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3885 d0ecd2aa bellard
        }
3886 d0ecd2aa bellard
        len -= l;
3887 d0ecd2aa bellard
        buf += l;
3888 d0ecd2aa bellard
        addr += l;
3889 d0ecd2aa bellard
    }
3890 d0ecd2aa bellard
}
3891 d0ecd2aa bellard
3892 6d16c2f8 aliguori
typedef struct {
3893 6d16c2f8 aliguori
    void *buffer;
3894 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3895 c227f099 Anthony Liguori
    target_phys_addr_t len;
3896 6d16c2f8 aliguori
} BounceBuffer;
3897 6d16c2f8 aliguori
3898 6d16c2f8 aliguori
static BounceBuffer bounce;
3899 6d16c2f8 aliguori
3900 ba223c29 aliguori
typedef struct MapClient {
3901 ba223c29 aliguori
    void *opaque;
3902 ba223c29 aliguori
    void (*callback)(void *opaque);
3903 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3904 ba223c29 aliguori
} MapClient;
3905 ba223c29 aliguori
3906 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3907 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3908 ba223c29 aliguori
3909 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3910 ba223c29 aliguori
{
3911 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3912 ba223c29 aliguori
3913 ba223c29 aliguori
    client->opaque = opaque;
3914 ba223c29 aliguori
    client->callback = callback;
3915 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3916 ba223c29 aliguori
    return client;
3917 ba223c29 aliguori
}
3918 ba223c29 aliguori
3919 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3920 ba223c29 aliguori
{
3921 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3922 ba223c29 aliguori
3923 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3924 34d5e948 Isaku Yamahata
    qemu_free(client);
3925 ba223c29 aliguori
}
3926 ba223c29 aliguori
3927 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3928 ba223c29 aliguori
{
3929 ba223c29 aliguori
    MapClient *client;
3930 ba223c29 aliguori
3931 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3932 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3933 ba223c29 aliguori
        client->callback(client->opaque);
3934 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3935 ba223c29 aliguori
    }
3936 ba223c29 aliguori
}
3937 ba223c29 aliguori
3938 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3939 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3940 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3941 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3942 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3943 ba223c29 aliguori
 * likely to succeed.
3944 6d16c2f8 aliguori
 */
3945 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3946 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3947 6d16c2f8 aliguori
                              int is_write)
3948 6d16c2f8 aliguori
{
3949 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3950 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3951 6d16c2f8 aliguori
    int l;
3952 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3953 6d16c2f8 aliguori
    uint8_t *ptr;
3954 c227f099 Anthony Liguori
    target_phys_addr_t page;
3955 6d16c2f8 aliguori
    unsigned long pd;
3956 6d16c2f8 aliguori
    PhysPageDesc *p;
3957 6d16c2f8 aliguori
    unsigned long addr1;
3958 6d16c2f8 aliguori
3959 6d16c2f8 aliguori
    while (len > 0) {
3960 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3961 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3962 6d16c2f8 aliguori
        if (l > len)
3963 6d16c2f8 aliguori
            l = len;
3964 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3965 6d16c2f8 aliguori
        if (!p) {
3966 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3967 6d16c2f8 aliguori
        } else {
3968 6d16c2f8 aliguori
            pd = p->phys_offset;
3969 6d16c2f8 aliguori
        }
3970 6d16c2f8 aliguori
3971 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3972 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3973 6d16c2f8 aliguori
                break;
3974 6d16c2f8 aliguori
            }
3975 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3976 6d16c2f8 aliguori
            bounce.addr = addr;
3977 6d16c2f8 aliguori
            bounce.len = l;
3978 6d16c2f8 aliguori
            if (!is_write) {
3979 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
3980 6d16c2f8 aliguori
            }
3981 6d16c2f8 aliguori
            ptr = bounce.buffer;
3982 6d16c2f8 aliguori
        } else {
3983 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3984 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3985 6d16c2f8 aliguori
        }
3986 6d16c2f8 aliguori
        if (!done) {
3987 6d16c2f8 aliguori
            ret = ptr;
3988 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3989 6d16c2f8 aliguori
            break;
3990 6d16c2f8 aliguori
        }
3991 6d16c2f8 aliguori
3992 6d16c2f8 aliguori
        len -= l;
3993 6d16c2f8 aliguori
        addr += l;
3994 6d16c2f8 aliguori
        done += l;
3995 6d16c2f8 aliguori
    }
3996 6d16c2f8 aliguori
    *plen = done;
3997 6d16c2f8 aliguori
    return ret;
3998 6d16c2f8 aliguori
}
3999 6d16c2f8 aliguori
4000 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4001 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
4002 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
4003 6d16c2f8 aliguori
 */
4004 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4005 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
4006 6d16c2f8 aliguori
{
4007 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
4008 6d16c2f8 aliguori
        if (is_write) {
4009 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4010 6d16c2f8 aliguori
            while (access_len) {
4011 6d16c2f8 aliguori
                unsigned l;
4012 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
4013 6d16c2f8 aliguori
                if (l > access_len)
4014 6d16c2f8 aliguori
                    l = access_len;
4015 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
4016 6d16c2f8 aliguori
                    /* invalidate code */
4017 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4018 6d16c2f8 aliguori
                    /* set dirty bit */
4019 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
4020 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
4021 6d16c2f8 aliguori
                }
4022 6d16c2f8 aliguori
                addr1 += l;
4023 6d16c2f8 aliguori
                access_len -= l;
4024 6d16c2f8 aliguori
            }
4025 6d16c2f8 aliguori
        }
4026 6d16c2f8 aliguori
        return;
4027 6d16c2f8 aliguori
    }
4028 6d16c2f8 aliguori
    if (is_write) {
4029 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4030 6d16c2f8 aliguori
    }
4031 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
4032 6d16c2f8 aliguori
    bounce.buffer = NULL;
4033 ba223c29 aliguori
    cpu_notify_map_clients();
4034 6d16c2f8 aliguori
}
4035 d0ecd2aa bellard
4036 8df1cd07 bellard
/* warning: addr must be aligned */
4037 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
4038 8df1cd07 bellard
{
4039 8df1cd07 bellard
    int io_index;
4040 8df1cd07 bellard
    uint8_t *ptr;
4041 8df1cd07 bellard
    uint32_t val;
4042 8df1cd07 bellard
    unsigned long pd;
4043 8df1cd07 bellard
    PhysPageDesc *p;
4044 8df1cd07 bellard
4045 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4046 8df1cd07 bellard
    if (!p) {
4047 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4048 8df1cd07 bellard
    } else {
4049 8df1cd07 bellard
        pd = p->phys_offset;
4050 8df1cd07 bellard
    }
4051 3b46e624 ths
4052 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4053 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4054 8df1cd07 bellard
        /* I/O case */
4055 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4056 8da3ff18 pbrook
        if (p)
4057 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4058 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4059 8df1cd07 bellard
    } else {
4060 8df1cd07 bellard
        /* RAM case */
4061 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4062 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4063 8df1cd07 bellard
        val = ldl_p(ptr);
4064 8df1cd07 bellard
    }
4065 8df1cd07 bellard
    return val;
4066 8df1cd07 bellard
}
4067 8df1cd07 bellard
4068 84b7b8e7 bellard
/* warning: addr must be aligned */
4069 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
4070 84b7b8e7 bellard
{
4071 84b7b8e7 bellard
    int io_index;
4072 84b7b8e7 bellard
    uint8_t *ptr;
4073 84b7b8e7 bellard
    uint64_t val;
4074 84b7b8e7 bellard
    unsigned long pd;
4075 84b7b8e7 bellard
    PhysPageDesc *p;
4076 84b7b8e7 bellard
4077 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4078 84b7b8e7 bellard
    if (!p) {
4079 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4080 84b7b8e7 bellard
    } else {
4081 84b7b8e7 bellard
        pd = p->phys_offset;
4082 84b7b8e7 bellard
    }
4083 3b46e624 ths
4084 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4085 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4086 84b7b8e7 bellard
        /* I/O case */
4087 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4088 8da3ff18 pbrook
        if (p)
4089 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4090 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4091 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4092 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4093 84b7b8e7 bellard
#else
4094 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4095 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4096 84b7b8e7 bellard
#endif
4097 84b7b8e7 bellard
    } else {
4098 84b7b8e7 bellard
        /* RAM case */
4099 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4100 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4101 84b7b8e7 bellard
        val = ldq_p(ptr);
4102 84b7b8e7 bellard
    }
4103 84b7b8e7 bellard
    return val;
4104 84b7b8e7 bellard
}
4105 84b7b8e7 bellard
4106 aab33094 bellard
/* XXX: optimize */
4107 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4108 aab33094 bellard
{
4109 aab33094 bellard
    uint8_t val;
4110 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4111 aab33094 bellard
    return val;
4112 aab33094 bellard
}
4113 aab33094 bellard
4114 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4115 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
4116 aab33094 bellard
{
4117 733f0b02 Michael S. Tsirkin
    int io_index;
4118 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4119 733f0b02 Michael S. Tsirkin
    uint64_t val;
4120 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4121 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4122 733f0b02 Michael S. Tsirkin
4123 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4124 733f0b02 Michael S. Tsirkin
    if (!p) {
4125 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4126 733f0b02 Michael S. Tsirkin
    } else {
4127 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4128 733f0b02 Michael S. Tsirkin
    }
4129 733f0b02 Michael S. Tsirkin
4130 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4131 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4132 733f0b02 Michael S. Tsirkin
        /* I/O case */
4133 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4134 733f0b02 Michael S. Tsirkin
        if (p)
4135 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4136 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4137 733f0b02 Michael S. Tsirkin
    } else {
4138 733f0b02 Michael S. Tsirkin
        /* RAM case */
4139 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4140 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4141 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
4142 733f0b02 Michael S. Tsirkin
    }
4143 733f0b02 Michael S. Tsirkin
    return val;
4144 aab33094 bellard
}
4145 aab33094 bellard
4146 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4147 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4148 8df1cd07 bellard
   bits are used to track modified PTEs */
4149 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4150 8df1cd07 bellard
{
4151 8df1cd07 bellard
    int io_index;
4152 8df1cd07 bellard
    uint8_t *ptr;
4153 8df1cd07 bellard
    unsigned long pd;
4154 8df1cd07 bellard
    PhysPageDesc *p;
4155 8df1cd07 bellard
4156 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4157 8df1cd07 bellard
    if (!p) {
4158 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4159 8df1cd07 bellard
    } else {
4160 8df1cd07 bellard
        pd = p->phys_offset;
4161 8df1cd07 bellard
    }
4162 3b46e624 ths
4163 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4164 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4165 8da3ff18 pbrook
        if (p)
4166 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4167 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4168 8df1cd07 bellard
    } else {
4169 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4170 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4171 8df1cd07 bellard
        stl_p(ptr, val);
4172 74576198 aliguori
4173 74576198 aliguori
        if (unlikely(in_migration)) {
4174 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4175 74576198 aliguori
                /* invalidate code */
4176 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4177 74576198 aliguori
                /* set dirty bit */
4178 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4179 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4180 74576198 aliguori
            }
4181 74576198 aliguori
        }
4182 8df1cd07 bellard
    }
4183 8df1cd07 bellard
}
4184 8df1cd07 bellard
4185 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4186 bc98a7ef j_mayer
{
4187 bc98a7ef j_mayer
    int io_index;
4188 bc98a7ef j_mayer
    uint8_t *ptr;
4189 bc98a7ef j_mayer
    unsigned long pd;
4190 bc98a7ef j_mayer
    PhysPageDesc *p;
4191 bc98a7ef j_mayer
4192 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4193 bc98a7ef j_mayer
    if (!p) {
4194 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4195 bc98a7ef j_mayer
    } else {
4196 bc98a7ef j_mayer
        pd = p->phys_offset;
4197 bc98a7ef j_mayer
    }
4198 3b46e624 ths
4199 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4200 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4201 8da3ff18 pbrook
        if (p)
4202 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4203 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4204 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4205 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4206 bc98a7ef j_mayer
#else
4207 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4208 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4209 bc98a7ef j_mayer
#endif
4210 bc98a7ef j_mayer
    } else {
4211 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4212 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4213 bc98a7ef j_mayer
        stq_p(ptr, val);
4214 bc98a7ef j_mayer
    }
4215 bc98a7ef j_mayer
}
4216 bc98a7ef j_mayer
4217 8df1cd07 bellard
/* warning: addr must be aligned */
4218 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4219 8df1cd07 bellard
{
4220 8df1cd07 bellard
    int io_index;
4221 8df1cd07 bellard
    uint8_t *ptr;
4222 8df1cd07 bellard
    unsigned long pd;
4223 8df1cd07 bellard
    PhysPageDesc *p;
4224 8df1cd07 bellard
4225 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4226 8df1cd07 bellard
    if (!p) {
4227 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4228 8df1cd07 bellard
    } else {
4229 8df1cd07 bellard
        pd = p->phys_offset;
4230 8df1cd07 bellard
    }
4231 3b46e624 ths
4232 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4233 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4234 8da3ff18 pbrook
        if (p)
4235 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4236 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4237 8df1cd07 bellard
    } else {
4238 8df1cd07 bellard
        unsigned long addr1;
4239 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4240 8df1cd07 bellard
        /* RAM case */
4241 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4242 8df1cd07 bellard
        stl_p(ptr, val);
4243 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4244 3a7d929e bellard
            /* invalidate code */
4245 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4246 3a7d929e bellard
            /* set dirty bit */
4247 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4248 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4249 3a7d929e bellard
        }
4250 8df1cd07 bellard
    }
4251 8df1cd07 bellard
}
4252 8df1cd07 bellard
4253 aab33094 bellard
/* XXX: optimize */
4254 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4255 aab33094 bellard
{
4256 aab33094 bellard
    uint8_t v = val;
4257 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4258 aab33094 bellard
}
4259 aab33094 bellard
4260 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4261 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4262 aab33094 bellard
{
4263 733f0b02 Michael S. Tsirkin
    int io_index;
4264 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4265 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4266 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4267 733f0b02 Michael S. Tsirkin
4268 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4269 733f0b02 Michael S. Tsirkin
    if (!p) {
4270 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4271 733f0b02 Michael S. Tsirkin
    } else {
4272 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4273 733f0b02 Michael S. Tsirkin
    }
4274 733f0b02 Michael S. Tsirkin
4275 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4276 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4277 733f0b02 Michael S. Tsirkin
        if (p)
4278 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4279 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4280 733f0b02 Michael S. Tsirkin
    } else {
4281 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4282 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4283 733f0b02 Michael S. Tsirkin
        /* RAM case */
4284 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4285 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4286 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4287 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4288 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4289 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4290 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4291 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4292 733f0b02 Michael S. Tsirkin
        }
4293 733f0b02 Michael S. Tsirkin
    }
4294 aab33094 bellard
}
4295 aab33094 bellard
4296 aab33094 bellard
/* XXX: optimize */
4297 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4298 aab33094 bellard
{
4299 aab33094 bellard
    val = tswap64(val);
4300 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4301 aab33094 bellard
}
4302 aab33094 bellard
4303 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4304 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4305 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4306 13eb76e0 bellard
{
4307 13eb76e0 bellard
    int l;
4308 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4309 9b3c35e0 j_mayer
    target_ulong page;
4310 13eb76e0 bellard
4311 13eb76e0 bellard
    while (len > 0) {
4312 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4313 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4314 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4315 13eb76e0 bellard
        if (phys_addr == -1)
4316 13eb76e0 bellard
            return -1;
4317 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4318 13eb76e0 bellard
        if (l > len)
4319 13eb76e0 bellard
            l = len;
4320 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4321 5e2972fd aliguori
        if (is_write)
4322 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4323 5e2972fd aliguori
        else
4324 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4325 13eb76e0 bellard
        len -= l;
4326 13eb76e0 bellard
        buf += l;
4327 13eb76e0 bellard
        addr += l;
4328 13eb76e0 bellard
    }
4329 13eb76e0 bellard
    return 0;
4330 13eb76e0 bellard
}
4331 a68fe89c Paul Brook
#endif
4332 13eb76e0 bellard
4333 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4334 2e70f6ef pbrook
   must be at the end of the TB */
4335 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4336 2e70f6ef pbrook
{
4337 2e70f6ef pbrook
    TranslationBlock *tb;
4338 2e70f6ef pbrook
    uint32_t n, cflags;
4339 2e70f6ef pbrook
    target_ulong pc, cs_base;
4340 2e70f6ef pbrook
    uint64_t flags;
4341 2e70f6ef pbrook
4342 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4343 2e70f6ef pbrook
    if (!tb) {
4344 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4345 2e70f6ef pbrook
                  retaddr);
4346 2e70f6ef pbrook
    }
4347 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4348 618ba8e6 Stefan Weil
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4349 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4350 bf20dc07 ths
       occurred.  */
4351 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4352 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4353 2e70f6ef pbrook
    n++;
4354 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4355 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4356 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4357 2e70f6ef pbrook
       branch.  */
4358 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4359 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4360 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4361 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4362 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4363 2e70f6ef pbrook
    }
4364 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4365 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4366 2e70f6ef pbrook
            && n > 1) {
4367 2e70f6ef pbrook
        env->pc -= 2;
4368 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4369 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4370 2e70f6ef pbrook
    }
4371 2e70f6ef pbrook
#endif
4372 2e70f6ef pbrook
    /* This should never happen.  */
4373 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4374 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4375 2e70f6ef pbrook
4376 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4377 2e70f6ef pbrook
    pc = tb->pc;
4378 2e70f6ef pbrook
    cs_base = tb->cs_base;
4379 2e70f6ef pbrook
    flags = tb->flags;
4380 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4381 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4382 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4383 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4384 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4385 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4386 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4387 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4388 2e70f6ef pbrook
       second new TB.  */
4389 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4390 2e70f6ef pbrook
}
4391 2e70f6ef pbrook
4392 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4393 b3755a91 Paul Brook
4394 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4395 e3db7226 bellard
{
4396 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4397 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4398 e3db7226 bellard
    TranslationBlock *tb;
4399 3b46e624 ths
4400 e3db7226 bellard
    target_code_size = 0;
4401 e3db7226 bellard
    max_target_code_size = 0;
4402 e3db7226 bellard
    cross_page = 0;
4403 e3db7226 bellard
    direct_jmp_count = 0;
4404 e3db7226 bellard
    direct_jmp2_count = 0;
4405 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4406 e3db7226 bellard
        tb = &tbs[i];
4407 e3db7226 bellard
        target_code_size += tb->size;
4408 e3db7226 bellard
        if (tb->size > max_target_code_size)
4409 e3db7226 bellard
            max_target_code_size = tb->size;
4410 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4411 e3db7226 bellard
            cross_page++;
4412 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4413 e3db7226 bellard
            direct_jmp_count++;
4414 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4415 e3db7226 bellard
                direct_jmp2_count++;
4416 e3db7226 bellard
            }
4417 e3db7226 bellard
        }
4418 e3db7226 bellard
    }
4419 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4420 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4421 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4422 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4423 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4424 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4425 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4426 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4427 e3db7226 bellard
                max_target_code_size);
4428 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4429 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4430 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4431 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4432 5fafdf24 ths
            cross_page,
4433 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4434 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4435 5fafdf24 ths
                direct_jmp_count,
4436 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4437 e3db7226 bellard
                direct_jmp2_count,
4438 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4439 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4440 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4441 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4442 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4443 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4444 e3db7226 bellard
}
4445 e3db7226 bellard
4446 61382a50 bellard
#define MMUSUFFIX _cmmu
4447 61382a50 bellard
#define GETPC() NULL
4448 61382a50 bellard
#define env cpu_single_env
4449 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4450 61382a50 bellard
4451 61382a50 bellard
#define SHIFT 0
4452 61382a50 bellard
#include "softmmu_template.h"
4453 61382a50 bellard
4454 61382a50 bellard
#define SHIFT 1
4455 61382a50 bellard
#include "softmmu_template.h"
4456 61382a50 bellard
4457 61382a50 bellard
#define SHIFT 2
4458 61382a50 bellard
#include "softmmu_template.h"
4459 61382a50 bellard
4460 61382a50 bellard
#define SHIFT 3
4461 61382a50 bellard
#include "softmmu_template.h"
4462 61382a50 bellard
4463 61382a50 bellard
#undef env
4464 61382a50 bellard
4465 61382a50 bellard
#endif