Statistics
| Branch: | Revision:

root / exec.c @ 42138043

History | View | Annotate | Download (133.3 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 6180a181 bellard
#include "exec-all.h"
30 b67d9a52 bellard
#include "tcg.h"
31 b3c7724c pbrook
#include "hw/hw.h"
32 cc9e98cb Alex Williamson
#include "hw/qdev.h"
33 74576198 aliguori
#include "osdep.h"
34 7ba1e619 aliguori
#include "kvm.h"
35 432d268c Jun Nakajima
#include "hw/xen.h"
36 29e922b6 Blue Swirl
#include "qemu-timer.h"
37 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
38 53a5960a pbrook
#include <qemu.h>
39 fd052bf6 Riku Voipio
#include <signal.h>
40 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 f01576f1 Juergen Lock
#include <sys/param.h>
42 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
43 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
44 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45 f01576f1 Juergen Lock
#include <sys/time.h>
46 f01576f1 Juergen Lock
#include <sys/proc.h>
47 f01576f1 Juergen Lock
#include <machine/profile.h>
48 f01576f1 Juergen Lock
#define _KERNEL
49 f01576f1 Juergen Lock
#include <sys/user.h>
50 f01576f1 Juergen Lock
#undef _KERNEL
51 f01576f1 Juergen Lock
#undef sigqueue
52 f01576f1 Juergen Lock
#include <libutil.h>
53 f01576f1 Juergen Lock
#endif
54 f01576f1 Juergen Lock
#endif
55 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
56 432d268c Jun Nakajima
#include "xen-mapcache.h"
57 53a5960a pbrook
#endif
58 54936004 bellard
59 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
60 66e85a21 bellard
//#define DEBUG_FLUSH
61 9fa3e853 bellard
//#define DEBUG_TLB
62 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
63 fd6ce8f6 bellard
64 fd6ce8f6 bellard
/* make various TB consistency checks */
65 5fafdf24 ths
//#define DEBUG_TB_CHECK
66 5fafdf24 ths
//#define DEBUG_TLB_CHECK
67 fd6ce8f6 bellard
68 1196be37 ths
//#define DEBUG_IOPORT
69 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
70 1196be37 ths
71 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
72 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
73 99773bd4 pbrook
#undef DEBUG_TB_CHECK
74 99773bd4 pbrook
#endif
75 99773bd4 pbrook
76 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
77 9fa3e853 bellard
78 bdaf78e0 blueswir1
static TranslationBlock *tbs;
79 24ab68ac Stefan Weil
static int code_gen_max_blocks;
80 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
81 bdaf78e0 blueswir1
static int nb_tbs;
82 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
83 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
84 fd6ce8f6 bellard
85 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
86 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
87 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
88 d03d860b blueswir1
 section close to code segment. */
89 d03d860b blueswir1
#define code_gen_section                                \
90 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
91 d03d860b blueswir1
    __attribute__((aligned (32)))
92 f8e2af11 Stefan Weil
#elif defined(_WIN32)
93 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
94 f8e2af11 Stefan Weil
#define code_gen_section                                \
95 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
96 d03d860b blueswir1
#else
97 d03d860b blueswir1
#define code_gen_section                                \
98 d03d860b blueswir1
    __attribute__((aligned (32)))
99 d03d860b blueswir1
#endif
100 d03d860b blueswir1
101 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
102 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
103 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
104 26a5f13b bellard
/* threshold to flush the translated code buffer */
105 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
106 24ab68ac Stefan Weil
static uint8_t *code_gen_ptr;
107 fd6ce8f6 bellard
108 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
109 9fa3e853 bellard
int phys_ram_fd;
110 74576198 aliguori
static int in_migration;
111 94a6b54f pbrook
112 f471a17e Alex Williamson
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
113 e2eef170 pbrook
#endif
114 9fa3e853 bellard
115 6a00d601 bellard
CPUState *first_cpu;
116 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
117 6a00d601 bellard
   cpu_exec() */
118 5fafdf24 ths
CPUState *cpu_single_env;
119 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
120 bf20dc07 ths
   1 = Precise instruction counting.
121 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
122 2e70f6ef pbrook
int use_icount = 0;
123 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
124 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
125 2e70f6ef pbrook
int64_t qemu_icount;
126 6a00d601 bellard
127 54936004 bellard
typedef struct PageDesc {
128 92e873b9 bellard
    /* list of TBs intersecting this ram page */
129 fd6ce8f6 bellard
    TranslationBlock *first_tb;
130 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
131 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
132 9fa3e853 bellard
    unsigned int code_write_count;
133 9fa3e853 bellard
    uint8_t *code_bitmap;
134 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
135 9fa3e853 bellard
    unsigned long flags;
136 9fa3e853 bellard
#endif
137 54936004 bellard
} PageDesc;
138 54936004 bellard
139 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
140 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
141 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
142 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
143 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
144 41c1b1c9 Paul Brook
#else
145 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
146 41c1b1c9 Paul Brook
#endif
147 bedb69ea j_mayer
#else
148 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
149 bedb69ea j_mayer
#endif
150 54936004 bellard
151 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
152 5cd2c5b6 Richard Henderson
#define L2_BITS 10
153 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
154 54936004 bellard
155 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
156 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
157 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
159 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 5cd2c5b6 Richard Henderson
161 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
162 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
163 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
164 5cd2c5b6 Richard Henderson
#else
165 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
166 5cd2c5b6 Richard Henderson
#endif
167 5cd2c5b6 Richard Henderson
168 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
169 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
170 5cd2c5b6 Richard Henderson
#else
171 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
172 5cd2c5b6 Richard Henderson
#endif
173 5cd2c5b6 Richard Henderson
174 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
175 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
176 5cd2c5b6 Richard Henderson
177 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
178 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 5cd2c5b6 Richard Henderson
180 83fb7adf bellard
unsigned long qemu_real_host_page_size;
181 83fb7adf bellard
unsigned long qemu_host_page_bits;
182 83fb7adf bellard
unsigned long qemu_host_page_size;
183 83fb7adf bellard
unsigned long qemu_host_page_mask;
184 54936004 bellard
185 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
186 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
187 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
188 54936004 bellard
189 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
190 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
191 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
192 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
193 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
194 41c1b1c9 Paul Brook
} PhysPageDesc;
195 41c1b1c9 Paul Brook
196 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
197 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
198 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
199 6d9a1304 Paul Brook
200 e2eef170 pbrook
static void io_mem_init(void);
201 e2eef170 pbrook
202 33417e70 bellard
/* io memory support */
203 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
204 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
205 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
206 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
207 6658ffb8 pbrook
static int io_mem_watch;
208 6658ffb8 pbrook
#endif
209 33417e70 bellard
210 34865134 bellard
/* log support */
211 1e8b27ca Juha Riihimรคki
#ifdef WIN32
212 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
213 1e8b27ca Juha Riihimรคki
#else
214 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
215 1e8b27ca Juha Riihimรคki
#endif
216 34865134 bellard
FILE *logfile;
217 34865134 bellard
int loglevel;
218 e735b91c pbrook
static int log_append = 0;
219 34865134 bellard
220 e3db7226 bellard
/* statistics */
221 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
222 e3db7226 bellard
static int tlb_flush_count;
223 b3755a91 Paul Brook
#endif
224 e3db7226 bellard
static int tb_flush_count;
225 e3db7226 bellard
static int tb_phys_invalidate_count;
226 e3db7226 bellard
227 7cb69cae bellard
#ifdef _WIN32
228 7cb69cae bellard
static void map_exec(void *addr, long size)
229 7cb69cae bellard
{
230 7cb69cae bellard
    DWORD old_protect;
231 7cb69cae bellard
    VirtualProtect(addr, size,
232 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
233 7cb69cae bellard
    
234 7cb69cae bellard
}
235 7cb69cae bellard
#else
236 7cb69cae bellard
static void map_exec(void *addr, long size)
237 7cb69cae bellard
{
238 4369415f bellard
    unsigned long start, end, page_size;
239 7cb69cae bellard
    
240 4369415f bellard
    page_size = getpagesize();
241 7cb69cae bellard
    start = (unsigned long)addr;
242 4369415f bellard
    start &= ~(page_size - 1);
243 7cb69cae bellard
    
244 7cb69cae bellard
    end = (unsigned long)addr + size;
245 4369415f bellard
    end += page_size - 1;
246 4369415f bellard
    end &= ~(page_size - 1);
247 7cb69cae bellard
    
248 7cb69cae bellard
    mprotect((void *)start, end - start,
249 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
250 7cb69cae bellard
}
251 7cb69cae bellard
#endif
252 7cb69cae bellard
253 b346ff46 bellard
static void page_init(void)
254 54936004 bellard
{
255 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
256 54936004 bellard
       TARGET_PAGE_SIZE */
257 c2b48b69 aliguori
#ifdef _WIN32
258 c2b48b69 aliguori
    {
259 c2b48b69 aliguori
        SYSTEM_INFO system_info;
260 c2b48b69 aliguori
261 c2b48b69 aliguori
        GetSystemInfo(&system_info);
262 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
263 c2b48b69 aliguori
    }
264 c2b48b69 aliguori
#else
265 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
266 c2b48b69 aliguori
#endif
267 83fb7adf bellard
    if (qemu_host_page_size == 0)
268 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
269 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
270 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
271 83fb7adf bellard
    qemu_host_page_bits = 0;
272 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
273 83fb7adf bellard
        qemu_host_page_bits++;
274 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
275 50a9569b balrog
276 2e9a5713 Paul Brook
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
277 50a9569b balrog
    {
278 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
279 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
280 f01576f1 Juergen Lock
        int i, cnt;
281 f01576f1 Juergen Lock
282 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
283 f01576f1 Juergen Lock
        if (freep) {
284 f01576f1 Juergen Lock
            mmap_lock();
285 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
286 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
287 f01576f1 Juergen Lock
288 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
289 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
290 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
291 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
292 f01576f1 Juergen Lock
293 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
294 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
295 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
296 f01576f1 Juergen Lock
                    } else {
297 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298 f01576f1 Juergen Lock
                        endaddr = ~0ul;
299 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 f01576f1 Juergen Lock
#endif
301 f01576f1 Juergen Lock
                    }
302 f01576f1 Juergen Lock
                }
303 f01576f1 Juergen Lock
            }
304 f01576f1 Juergen Lock
            free(freep);
305 f01576f1 Juergen Lock
            mmap_unlock();
306 f01576f1 Juergen Lock
        }
307 f01576f1 Juergen Lock
#else
308 50a9569b balrog
        FILE *f;
309 50a9569b balrog
310 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
311 5cd2c5b6 Richard Henderson
312 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
313 50a9569b balrog
        if (f) {
314 5cd2c5b6 Richard Henderson
            mmap_lock();
315 5cd2c5b6 Richard Henderson
316 50a9569b balrog
            do {
317 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
318 5cd2c5b6 Richard Henderson
                int n;
319 5cd2c5b6 Richard Henderson
320 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
321 5cd2c5b6 Richard Henderson
322 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
323 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
324 5cd2c5b6 Richard Henderson
325 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
326 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
327 5cd2c5b6 Richard Henderson
                    } else {
328 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
329 5cd2c5b6 Richard Henderson
                    }
330 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
331 50a9569b balrog
                }
332 50a9569b balrog
            } while (!feof(f));
333 5cd2c5b6 Richard Henderson
334 50a9569b balrog
            fclose(f);
335 5cd2c5b6 Richard Henderson
            mmap_unlock();
336 50a9569b balrog
        }
337 f01576f1 Juergen Lock
#endif
338 50a9569b balrog
    }
339 50a9569b balrog
#endif
340 54936004 bellard
}
341 54936004 bellard
342 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
343 54936004 bellard
{
344 41c1b1c9 Paul Brook
    PageDesc *pd;
345 41c1b1c9 Paul Brook
    void **lp;
346 41c1b1c9 Paul Brook
    int i;
347 41c1b1c9 Paul Brook
348 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
349 2e9a5713 Paul Brook
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
350 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
351 5cd2c5b6 Richard Henderson
    do {                                                \
352 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
353 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
354 5cd2c5b6 Richard Henderson
    } while (0)
355 5cd2c5b6 Richard Henderson
#else
356 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
357 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
358 17e2377a pbrook
#endif
359 434929bf aliguori
360 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
361 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
362 5cd2c5b6 Richard Henderson
363 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
364 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
365 5cd2c5b6 Richard Henderson
        void **p = *lp;
366 5cd2c5b6 Richard Henderson
367 5cd2c5b6 Richard Henderson
        if (p == NULL) {
368 5cd2c5b6 Richard Henderson
            if (!alloc) {
369 5cd2c5b6 Richard Henderson
                return NULL;
370 5cd2c5b6 Richard Henderson
            }
371 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
372 5cd2c5b6 Richard Henderson
            *lp = p;
373 17e2377a pbrook
        }
374 5cd2c5b6 Richard Henderson
375 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
376 5cd2c5b6 Richard Henderson
    }
377 5cd2c5b6 Richard Henderson
378 5cd2c5b6 Richard Henderson
    pd = *lp;
379 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
380 5cd2c5b6 Richard Henderson
        if (!alloc) {
381 5cd2c5b6 Richard Henderson
            return NULL;
382 5cd2c5b6 Richard Henderson
        }
383 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
384 5cd2c5b6 Richard Henderson
        *lp = pd;
385 54936004 bellard
    }
386 5cd2c5b6 Richard Henderson
387 5cd2c5b6 Richard Henderson
#undef ALLOC
388 5cd2c5b6 Richard Henderson
389 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
390 54936004 bellard
}
391 54936004 bellard
392 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
393 54936004 bellard
{
394 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
395 fd6ce8f6 bellard
}
396 fd6ce8f6 bellard
397 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
398 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
399 92e873b9 bellard
{
400 e3f4e2a4 pbrook
    PhysPageDesc *pd;
401 5cd2c5b6 Richard Henderson
    void **lp;
402 5cd2c5b6 Richard Henderson
    int i;
403 92e873b9 bellard
404 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
405 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
406 108c49b8 bellard
407 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
408 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
409 5cd2c5b6 Richard Henderson
        void **p = *lp;
410 5cd2c5b6 Richard Henderson
        if (p == NULL) {
411 5cd2c5b6 Richard Henderson
            if (!alloc) {
412 5cd2c5b6 Richard Henderson
                return NULL;
413 5cd2c5b6 Richard Henderson
            }
414 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
415 5cd2c5b6 Richard Henderson
        }
416 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
417 108c49b8 bellard
    }
418 5cd2c5b6 Richard Henderson
419 e3f4e2a4 pbrook
    pd = *lp;
420 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
421 e3f4e2a4 pbrook
        int i;
422 5cd2c5b6 Richard Henderson
423 5cd2c5b6 Richard Henderson
        if (!alloc) {
424 108c49b8 bellard
            return NULL;
425 5cd2c5b6 Richard Henderson
        }
426 5cd2c5b6 Richard Henderson
427 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
428 5cd2c5b6 Richard Henderson
429 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
430 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
431 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
432 67c4d23c pbrook
        }
433 92e873b9 bellard
    }
434 5cd2c5b6 Richard Henderson
435 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
436 92e873b9 bellard
}
437 92e873b9 bellard
438 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
439 92e873b9 bellard
{
440 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
441 92e873b9 bellard
}
442 92e873b9 bellard
443 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
444 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
445 3a7d929e bellard
                                    target_ulong vaddr);
446 c8a706fe pbrook
#define mmap_lock() do { } while(0)
447 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
448 9fa3e853 bellard
#endif
449 fd6ce8f6 bellard
450 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
451 4369415f bellard
452 4369415f bellard
#if defined(CONFIG_USER_ONLY)
453 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
454 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
455 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
456 4369415f bellard
#endif
457 4369415f bellard
458 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
459 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
460 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
461 4369415f bellard
#endif
462 4369415f bellard
463 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
464 26a5f13b bellard
{
465 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
466 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
467 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
468 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
469 4369415f bellard
#else
470 26a5f13b bellard
    code_gen_buffer_size = tb_size;
471 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
472 4369415f bellard
#if defined(CONFIG_USER_ONLY)
473 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
474 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
475 4369415f bellard
#else
476 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
477 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
478 4369415f bellard
#endif
479 26a5f13b bellard
    }
480 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
481 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
482 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
483 26a5f13b bellard
       the host cpu and OS */
484 26a5f13b bellard
#if defined(__linux__) 
485 26a5f13b bellard
    {
486 26a5f13b bellard
        int flags;
487 141ac468 blueswir1
        void *start = NULL;
488 141ac468 blueswir1
489 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
490 26a5f13b bellard
#if defined(__x86_64__)
491 26a5f13b bellard
        flags |= MAP_32BIT;
492 26a5f13b bellard
        /* Cannot map more than that */
493 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
494 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
495 141ac468 blueswir1
#elif defined(__sparc_v9__)
496 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
497 141ac468 blueswir1
        flags |= MAP_FIXED;
498 141ac468 blueswir1
        start = (void *) 0x60000000UL;
499 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
500 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
501 1cb0661e balrog
#elif defined(__arm__)
502 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
503 1cb0661e balrog
        flags |= MAP_FIXED;
504 1cb0661e balrog
        start = (void *) 0x01000000UL;
505 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
506 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
507 eba0b893 Richard Henderson
#elif defined(__s390x__)
508 eba0b893 Richard Henderson
        /* Map the buffer so that we can use direct calls and branches.  */
509 eba0b893 Richard Henderson
        /* We have a +- 4GB range on the branches; leave some slop.  */
510 eba0b893 Richard Henderson
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 eba0b893 Richard Henderson
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 eba0b893 Richard Henderson
        }
513 eba0b893 Richard Henderson
        start = (void *)0x90000000UL;
514 26a5f13b bellard
#endif
515 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
516 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
517 26a5f13b bellard
                               flags, -1, 0);
518 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
519 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 26a5f13b bellard
            exit(1);
521 26a5f13b bellard
        }
522 26a5f13b bellard
    }
523 cbb608a5 Brad
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 cbb608a5 Brad
    || defined(__DragonFly__) || defined(__OpenBSD__)
525 06e67a82 aliguori
    {
526 06e67a82 aliguori
        int flags;
527 06e67a82 aliguori
        void *addr = NULL;
528 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
529 06e67a82 aliguori
#if defined(__x86_64__)
530 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 06e67a82 aliguori
         * 0x40000000 is free */
532 06e67a82 aliguori
        flags |= MAP_FIXED;
533 06e67a82 aliguori
        addr = (void *)0x40000000;
534 06e67a82 aliguori
        /* Cannot map more than that */
535 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
536 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
537 4cd31ad2 Blue Swirl
#elif defined(__sparc_v9__)
538 4cd31ad2 Blue Swirl
        // Map the buffer below 2G, so we can use direct calls and branches
539 4cd31ad2 Blue Swirl
        flags |= MAP_FIXED;
540 4cd31ad2 Blue Swirl
        addr = (void *) 0x60000000UL;
541 4cd31ad2 Blue Swirl
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
542 4cd31ad2 Blue Swirl
            code_gen_buffer_size = (512 * 1024 * 1024);
543 4cd31ad2 Blue Swirl
        }
544 06e67a82 aliguori
#endif
545 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
547 06e67a82 aliguori
                               flags, -1, 0);
548 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
549 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 06e67a82 aliguori
            exit(1);
551 06e67a82 aliguori
        }
552 06e67a82 aliguori
    }
553 26a5f13b bellard
#else
554 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
555 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
556 26a5f13b bellard
#endif
557 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
560 239fda31 Aurelien Jarno
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
561 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563 26a5f13b bellard
}
564 26a5f13b bellard
565 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
567 26a5f13b bellard
   size. */
568 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
569 26a5f13b bellard
{
570 26a5f13b bellard
    cpu_gen_init();
571 26a5f13b bellard
    code_gen_alloc(tb_size);
572 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
573 4369415f bellard
    page_init();
574 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
575 26a5f13b bellard
    io_mem_init();
576 e2eef170 pbrook
#endif
577 9002ec79 Richard Henderson
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 9002ec79 Richard Henderson
    /* There's no guest base to take into account, so go ahead and
579 9002ec79 Richard Henderson
       initialize the prologue now.  */
580 9002ec79 Richard Henderson
    tcg_prologue_init(&tcg_ctx);
581 9002ec79 Richard Henderson
#endif
582 26a5f13b bellard
}
583 26a5f13b bellard
584 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585 9656f324 pbrook
586 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
587 e7f4eff7 Juan Quintela
{
588 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
589 9656f324 pbrook
590 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 3098dba0 aurel32
       version_id is increased. */
592 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
593 9656f324 pbrook
    tlb_flush(env, 1);
594 9656f324 pbrook
595 9656f324 pbrook
    return 0;
596 9656f324 pbrook
}
597 e7f4eff7 Juan Quintela
598 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
599 e7f4eff7 Juan Quintela
    .name = "cpu_common",
600 e7f4eff7 Juan Quintela
    .version_id = 1,
601 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
602 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
603 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
604 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
605 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
606 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
607 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
608 e7f4eff7 Juan Quintela
    }
609 e7f4eff7 Juan Quintela
};
610 9656f324 pbrook
#endif
611 9656f324 pbrook
612 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
613 950f1472 Glauber Costa
{
614 950f1472 Glauber Costa
    CPUState *env = first_cpu;
615 950f1472 Glauber Costa
616 950f1472 Glauber Costa
    while (env) {
617 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
618 950f1472 Glauber Costa
            break;
619 950f1472 Glauber Costa
        env = env->next_cpu;
620 950f1472 Glauber Costa
    }
621 950f1472 Glauber Costa
622 950f1472 Glauber Costa
    return env;
623 950f1472 Glauber Costa
}
624 950f1472 Glauber Costa
625 6a00d601 bellard
void cpu_exec_init(CPUState *env)
626 fd6ce8f6 bellard
{
627 6a00d601 bellard
    CPUState **penv;
628 6a00d601 bellard
    int cpu_index;
629 6a00d601 bellard
630 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
631 c2764719 pbrook
    cpu_list_lock();
632 c2764719 pbrook
#endif
633 6a00d601 bellard
    env->next_cpu = NULL;
634 6a00d601 bellard
    penv = &first_cpu;
635 6a00d601 bellard
    cpu_index = 0;
636 6a00d601 bellard
    while (*penv != NULL) {
637 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
638 6a00d601 bellard
        cpu_index++;
639 6a00d601 bellard
    }
640 6a00d601 bellard
    env->cpu_index = cpu_index;
641 268a362c aliguori
    env->numa_node = 0;
642 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
643 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
644 dc7a09cf Jan Kiszka
#ifndef CONFIG_USER_ONLY
645 dc7a09cf Jan Kiszka
    env->thread_id = qemu_get_thread_id();
646 dc7a09cf Jan Kiszka
#endif
647 6a00d601 bellard
    *penv = env;
648 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
649 c2764719 pbrook
    cpu_list_unlock();
650 c2764719 pbrook
#endif
651 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652 0be71e32 Alex Williamson
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
653 0be71e32 Alex Williamson
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
654 b3c7724c pbrook
                    cpu_save, cpu_load, env);
655 b3c7724c pbrook
#endif
656 fd6ce8f6 bellard
}
657 fd6ce8f6 bellard
658 d1a1eb74 Tristan Gingold
/* Allocate a new translation block. Flush the translation buffer if
659 d1a1eb74 Tristan Gingold
   too many translation blocks or too much generated code. */
660 d1a1eb74 Tristan Gingold
static TranslationBlock *tb_alloc(target_ulong pc)
661 d1a1eb74 Tristan Gingold
{
662 d1a1eb74 Tristan Gingold
    TranslationBlock *tb;
663 d1a1eb74 Tristan Gingold
664 d1a1eb74 Tristan Gingold
    if (nb_tbs >= code_gen_max_blocks ||
665 d1a1eb74 Tristan Gingold
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
666 d1a1eb74 Tristan Gingold
        return NULL;
667 d1a1eb74 Tristan Gingold
    tb = &tbs[nb_tbs++];
668 d1a1eb74 Tristan Gingold
    tb->pc = pc;
669 d1a1eb74 Tristan Gingold
    tb->cflags = 0;
670 d1a1eb74 Tristan Gingold
    return tb;
671 d1a1eb74 Tristan Gingold
}
672 d1a1eb74 Tristan Gingold
673 d1a1eb74 Tristan Gingold
void tb_free(TranslationBlock *tb)
674 d1a1eb74 Tristan Gingold
{
675 d1a1eb74 Tristan Gingold
    /* In practice this is mostly used for single use temporary TB
676 d1a1eb74 Tristan Gingold
       Ignore the hard cases and just back up if this TB happens to
677 d1a1eb74 Tristan Gingold
       be the last one generated.  */
678 d1a1eb74 Tristan Gingold
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
679 d1a1eb74 Tristan Gingold
        code_gen_ptr = tb->tc_ptr;
680 d1a1eb74 Tristan Gingold
        nb_tbs--;
681 d1a1eb74 Tristan Gingold
    }
682 d1a1eb74 Tristan Gingold
}
683 d1a1eb74 Tristan Gingold
684 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
685 9fa3e853 bellard
{
686 9fa3e853 bellard
    if (p->code_bitmap) {
687 59817ccb bellard
        qemu_free(p->code_bitmap);
688 9fa3e853 bellard
        p->code_bitmap = NULL;
689 9fa3e853 bellard
    }
690 9fa3e853 bellard
    p->code_write_count = 0;
691 9fa3e853 bellard
}
692 9fa3e853 bellard
693 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
694 5cd2c5b6 Richard Henderson
695 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
696 fd6ce8f6 bellard
{
697 5cd2c5b6 Richard Henderson
    int i;
698 fd6ce8f6 bellard
699 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
700 5cd2c5b6 Richard Henderson
        return;
701 5cd2c5b6 Richard Henderson
    }
702 5cd2c5b6 Richard Henderson
    if (level == 0) {
703 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
704 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
705 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
706 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
707 fd6ce8f6 bellard
        }
708 5cd2c5b6 Richard Henderson
    } else {
709 5cd2c5b6 Richard Henderson
        void **pp = *lp;
710 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
711 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
712 5cd2c5b6 Richard Henderson
        }
713 5cd2c5b6 Richard Henderson
    }
714 5cd2c5b6 Richard Henderson
}
715 5cd2c5b6 Richard Henderson
716 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
717 5cd2c5b6 Richard Henderson
{
718 5cd2c5b6 Richard Henderson
    int i;
719 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
720 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
721 fd6ce8f6 bellard
    }
722 fd6ce8f6 bellard
}
723 fd6ce8f6 bellard
724 fd6ce8f6 bellard
/* flush all the translation blocks */
725 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
726 6a00d601 bellard
void tb_flush(CPUState *env1)
727 fd6ce8f6 bellard
{
728 6a00d601 bellard
    CPUState *env;
729 0124311e bellard
#if defined(DEBUG_FLUSH)
730 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
731 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
732 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
733 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
734 fd6ce8f6 bellard
#endif
735 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
736 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
737 a208e54a pbrook
738 fd6ce8f6 bellard
    nb_tbs = 0;
739 3b46e624 ths
740 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
741 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
742 6a00d601 bellard
    }
743 9fa3e853 bellard
744 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
745 fd6ce8f6 bellard
    page_flush_tb();
746 9fa3e853 bellard
747 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
748 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
749 d4e8164f bellard
       expensive */
750 e3db7226 bellard
    tb_flush_count++;
751 fd6ce8f6 bellard
}
752 fd6ce8f6 bellard
753 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
754 fd6ce8f6 bellard
755 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
756 fd6ce8f6 bellard
{
757 fd6ce8f6 bellard
    TranslationBlock *tb;
758 fd6ce8f6 bellard
    int i;
759 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
760 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
761 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
762 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
763 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
764 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
765 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
766 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
767 fd6ce8f6 bellard
            }
768 fd6ce8f6 bellard
        }
769 fd6ce8f6 bellard
    }
770 fd6ce8f6 bellard
}
771 fd6ce8f6 bellard
772 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
773 fd6ce8f6 bellard
static void tb_page_check(void)
774 fd6ce8f6 bellard
{
775 fd6ce8f6 bellard
    TranslationBlock *tb;
776 fd6ce8f6 bellard
    int i, flags1, flags2;
777 3b46e624 ths
778 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
779 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
780 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
781 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
782 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
783 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
784 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
785 fd6ce8f6 bellard
            }
786 fd6ce8f6 bellard
        }
787 fd6ce8f6 bellard
    }
788 fd6ce8f6 bellard
}
789 fd6ce8f6 bellard
790 fd6ce8f6 bellard
#endif
791 fd6ce8f6 bellard
792 fd6ce8f6 bellard
/* invalidate one TB */
793 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
794 fd6ce8f6 bellard
                             int next_offset)
795 fd6ce8f6 bellard
{
796 fd6ce8f6 bellard
    TranslationBlock *tb1;
797 fd6ce8f6 bellard
    for(;;) {
798 fd6ce8f6 bellard
        tb1 = *ptb;
799 fd6ce8f6 bellard
        if (tb1 == tb) {
800 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
801 fd6ce8f6 bellard
            break;
802 fd6ce8f6 bellard
        }
803 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
804 fd6ce8f6 bellard
    }
805 fd6ce8f6 bellard
}
806 fd6ce8f6 bellard
807 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
808 9fa3e853 bellard
{
809 9fa3e853 bellard
    TranslationBlock *tb1;
810 9fa3e853 bellard
    unsigned int n1;
811 9fa3e853 bellard
812 9fa3e853 bellard
    for(;;) {
813 9fa3e853 bellard
        tb1 = *ptb;
814 9fa3e853 bellard
        n1 = (long)tb1 & 3;
815 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
816 9fa3e853 bellard
        if (tb1 == tb) {
817 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
818 9fa3e853 bellard
            break;
819 9fa3e853 bellard
        }
820 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
821 9fa3e853 bellard
    }
822 9fa3e853 bellard
}
823 9fa3e853 bellard
824 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
825 d4e8164f bellard
{
826 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
827 d4e8164f bellard
    unsigned int n1;
828 d4e8164f bellard
829 d4e8164f bellard
    ptb = &tb->jmp_next[n];
830 d4e8164f bellard
    tb1 = *ptb;
831 d4e8164f bellard
    if (tb1) {
832 d4e8164f bellard
        /* find tb(n) in circular list */
833 d4e8164f bellard
        for(;;) {
834 d4e8164f bellard
            tb1 = *ptb;
835 d4e8164f bellard
            n1 = (long)tb1 & 3;
836 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 d4e8164f bellard
            if (n1 == n && tb1 == tb)
838 d4e8164f bellard
                break;
839 d4e8164f bellard
            if (n1 == 2) {
840 d4e8164f bellard
                ptb = &tb1->jmp_first;
841 d4e8164f bellard
            } else {
842 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
843 d4e8164f bellard
            }
844 d4e8164f bellard
        }
845 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
846 d4e8164f bellard
        *ptb = tb->jmp_next[n];
847 d4e8164f bellard
848 d4e8164f bellard
        tb->jmp_next[n] = NULL;
849 d4e8164f bellard
    }
850 d4e8164f bellard
}
851 d4e8164f bellard
852 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
853 d4e8164f bellard
   another TB */
854 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
855 d4e8164f bellard
{
856 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
857 d4e8164f bellard
}
858 d4e8164f bellard
859 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
860 fd6ce8f6 bellard
{
861 6a00d601 bellard
    CPUState *env;
862 8a40a180 bellard
    PageDesc *p;
863 d4e8164f bellard
    unsigned int h, n1;
864 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
865 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
866 3b46e624 ths
867 8a40a180 bellard
    /* remove the TB from the hash list */
868 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
869 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
870 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
871 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
872 8a40a180 bellard
873 8a40a180 bellard
    /* remove the TB from the page list */
874 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
875 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
876 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
877 8a40a180 bellard
        invalidate_page_bitmap(p);
878 8a40a180 bellard
    }
879 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
880 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
881 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
882 8a40a180 bellard
        invalidate_page_bitmap(p);
883 8a40a180 bellard
    }
884 8a40a180 bellard
885 36bdbe54 bellard
    tb_invalidated_flag = 1;
886 59817ccb bellard
887 fd6ce8f6 bellard
    /* remove the TB from the hash list */
888 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
889 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
890 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
891 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
892 6a00d601 bellard
    }
893 d4e8164f bellard
894 d4e8164f bellard
    /* suppress this TB from the two jump lists */
895 d4e8164f bellard
    tb_jmp_remove(tb, 0);
896 d4e8164f bellard
    tb_jmp_remove(tb, 1);
897 d4e8164f bellard
898 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
899 d4e8164f bellard
    tb1 = tb->jmp_first;
900 d4e8164f bellard
    for(;;) {
901 d4e8164f bellard
        n1 = (long)tb1 & 3;
902 d4e8164f bellard
        if (n1 == 2)
903 d4e8164f bellard
            break;
904 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
905 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
906 d4e8164f bellard
        tb_reset_jump(tb1, n1);
907 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
908 d4e8164f bellard
        tb1 = tb2;
909 d4e8164f bellard
    }
910 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
911 9fa3e853 bellard
912 e3db7226 bellard
    tb_phys_invalidate_count++;
913 9fa3e853 bellard
}
914 9fa3e853 bellard
915 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
916 9fa3e853 bellard
{
917 9fa3e853 bellard
    int end, mask, end1;
918 9fa3e853 bellard
919 9fa3e853 bellard
    end = start + len;
920 9fa3e853 bellard
    tab += start >> 3;
921 9fa3e853 bellard
    mask = 0xff << (start & 7);
922 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
923 9fa3e853 bellard
        if (start < end) {
924 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
925 9fa3e853 bellard
            *tab |= mask;
926 9fa3e853 bellard
        }
927 9fa3e853 bellard
    } else {
928 9fa3e853 bellard
        *tab++ |= mask;
929 9fa3e853 bellard
        start = (start + 8) & ~7;
930 9fa3e853 bellard
        end1 = end & ~7;
931 9fa3e853 bellard
        while (start < end1) {
932 9fa3e853 bellard
            *tab++ = 0xff;
933 9fa3e853 bellard
            start += 8;
934 9fa3e853 bellard
        }
935 9fa3e853 bellard
        if (start < end) {
936 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
937 9fa3e853 bellard
            *tab |= mask;
938 9fa3e853 bellard
        }
939 9fa3e853 bellard
    }
940 9fa3e853 bellard
}
941 9fa3e853 bellard
942 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
943 9fa3e853 bellard
{
944 9fa3e853 bellard
    int n, tb_start, tb_end;
945 9fa3e853 bellard
    TranslationBlock *tb;
946 3b46e624 ths
947 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
948 9fa3e853 bellard
949 9fa3e853 bellard
    tb = p->first_tb;
950 9fa3e853 bellard
    while (tb != NULL) {
951 9fa3e853 bellard
        n = (long)tb & 3;
952 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
953 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
954 9fa3e853 bellard
        if (n == 0) {
955 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
956 9fa3e853 bellard
               it is not a problem */
957 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
958 9fa3e853 bellard
            tb_end = tb_start + tb->size;
959 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
960 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
961 9fa3e853 bellard
        } else {
962 9fa3e853 bellard
            tb_start = 0;
963 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 9fa3e853 bellard
        }
965 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
966 9fa3e853 bellard
        tb = tb->page_next[n];
967 9fa3e853 bellard
    }
968 9fa3e853 bellard
}
969 9fa3e853 bellard
970 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
971 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
972 2e70f6ef pbrook
                              int flags, int cflags)
973 d720b93d bellard
{
974 d720b93d bellard
    TranslationBlock *tb;
975 d720b93d bellard
    uint8_t *tc_ptr;
976 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
977 41c1b1c9 Paul Brook
    target_ulong virt_page2;
978 d720b93d bellard
    int code_gen_size;
979 d720b93d bellard
980 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
981 c27004ec bellard
    tb = tb_alloc(pc);
982 d720b93d bellard
    if (!tb) {
983 d720b93d bellard
        /* flush must be done */
984 d720b93d bellard
        tb_flush(env);
985 d720b93d bellard
        /* cannot fail at this point */
986 c27004ec bellard
        tb = tb_alloc(pc);
987 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
988 2e70f6ef pbrook
        tb_invalidated_flag = 1;
989 d720b93d bellard
    }
990 d720b93d bellard
    tc_ptr = code_gen_ptr;
991 d720b93d bellard
    tb->tc_ptr = tc_ptr;
992 d720b93d bellard
    tb->cs_base = cs_base;
993 d720b93d bellard
    tb->flags = flags;
994 d720b93d bellard
    tb->cflags = cflags;
995 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
996 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
997 3b46e624 ths
998 d720b93d bellard
    /* check next page if needed */
999 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1000 d720b93d bellard
    phys_page2 = -1;
1001 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1002 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
1003 d720b93d bellard
    }
1004 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
1005 2e70f6ef pbrook
    return tb;
1006 d720b93d bellard
}
1007 3b46e624 ths
1008 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1009 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1010 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1011 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1012 d720b93d bellard
   TB if code is modified inside this TB. */
1013 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1014 d720b93d bellard
                                   int is_cpu_write_access)
1015 d720b93d bellard
{
1016 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1017 d720b93d bellard
    CPUState *env = cpu_single_env;
1018 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1019 6b917547 aliguori
    PageDesc *p;
1020 6b917547 aliguori
    int n;
1021 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1022 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1023 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1024 6b917547 aliguori
    int current_tb_modified = 0;
1025 6b917547 aliguori
    target_ulong current_pc = 0;
1026 6b917547 aliguori
    target_ulong current_cs_base = 0;
1027 6b917547 aliguori
    int current_flags = 0;
1028 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1029 9fa3e853 bellard
1030 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1031 5fafdf24 ths
    if (!p)
1032 9fa3e853 bellard
        return;
1033 5fafdf24 ths
    if (!p->code_bitmap &&
1034 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1035 d720b93d bellard
        is_cpu_write_access) {
1036 9fa3e853 bellard
        /* build code bitmap */
1037 9fa3e853 bellard
        build_page_bitmap(p);
1038 9fa3e853 bellard
    }
1039 9fa3e853 bellard
1040 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1041 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1042 9fa3e853 bellard
    tb = p->first_tb;
1043 9fa3e853 bellard
    while (tb != NULL) {
1044 9fa3e853 bellard
        n = (long)tb & 3;
1045 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1046 9fa3e853 bellard
        tb_next = tb->page_next[n];
1047 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1048 9fa3e853 bellard
        if (n == 0) {
1049 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1050 9fa3e853 bellard
               it is not a problem */
1051 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1052 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1053 9fa3e853 bellard
        } else {
1054 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1055 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1056 9fa3e853 bellard
        }
1057 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1058 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1059 d720b93d bellard
            if (current_tb_not_found) {
1060 d720b93d bellard
                current_tb_not_found = 0;
1061 d720b93d bellard
                current_tb = NULL;
1062 2e70f6ef pbrook
                if (env->mem_io_pc) {
1063 d720b93d bellard
                    /* now we have a real cpu fault */
1064 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1065 d720b93d bellard
                }
1066 d720b93d bellard
            }
1067 d720b93d bellard
            if (current_tb == tb &&
1068 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1069 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1070 d720b93d bellard
                its execution. We could be more precise by checking
1071 d720b93d bellard
                that the modification is after the current PC, but it
1072 d720b93d bellard
                would require a specialized function to partially
1073 d720b93d bellard
                restore the CPU state */
1074 3b46e624 ths
1075 d720b93d bellard
                current_tb_modified = 1;
1076 618ba8e6 Stefan Weil
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1077 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1078 6b917547 aliguori
                                     &current_flags);
1079 d720b93d bellard
            }
1080 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1081 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1082 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1083 6f5a9f7e bellard
            saved_tb = NULL;
1084 6f5a9f7e bellard
            if (env) {
1085 6f5a9f7e bellard
                saved_tb = env->current_tb;
1086 6f5a9f7e bellard
                env->current_tb = NULL;
1087 6f5a9f7e bellard
            }
1088 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1089 6f5a9f7e bellard
            if (env) {
1090 6f5a9f7e bellard
                env->current_tb = saved_tb;
1091 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1092 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1093 6f5a9f7e bellard
            }
1094 9fa3e853 bellard
        }
1095 9fa3e853 bellard
        tb = tb_next;
1096 9fa3e853 bellard
    }
1097 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1098 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1099 9fa3e853 bellard
    if (!p->first_tb) {
1100 9fa3e853 bellard
        invalidate_page_bitmap(p);
1101 d720b93d bellard
        if (is_cpu_write_access) {
1102 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103 d720b93d bellard
        }
1104 d720b93d bellard
    }
1105 d720b93d bellard
#endif
1106 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1107 d720b93d bellard
    if (current_tb_modified) {
1108 d720b93d bellard
        /* we generate a block containing just the instruction
1109 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1110 d720b93d bellard
           itself */
1111 ea1c1802 bellard
        env->current_tb = NULL;
1112 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1114 9fa3e853 bellard
    }
1115 fd6ce8f6 bellard
#endif
1116 9fa3e853 bellard
}
1117 fd6ce8f6 bellard
1118 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1119 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1120 9fa3e853 bellard
{
1121 9fa3e853 bellard
    PageDesc *p;
1122 9fa3e853 bellard
    int offset, b;
1123 59817ccb bellard
#if 0
1124 a4193c8a bellard
    if (1) {
1125 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1126 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1127 93fcfe39 aliguori
                  cpu_single_env->eip,
1128 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1129 59817ccb bellard
    }
1130 59817ccb bellard
#endif
1131 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1132 5fafdf24 ths
    if (!p)
1133 9fa3e853 bellard
        return;
1134 9fa3e853 bellard
    if (p->code_bitmap) {
1135 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1136 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1137 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1138 9fa3e853 bellard
            goto do_invalidate;
1139 9fa3e853 bellard
    } else {
1140 9fa3e853 bellard
    do_invalidate:
1141 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1142 9fa3e853 bellard
    }
1143 9fa3e853 bellard
}
1144 9fa3e853 bellard
1145 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1146 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1147 d720b93d bellard
                                    unsigned long pc, void *puc)
1148 9fa3e853 bellard
{
1149 6b917547 aliguori
    TranslationBlock *tb;
1150 9fa3e853 bellard
    PageDesc *p;
1151 6b917547 aliguori
    int n;
1152 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1153 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1154 d720b93d bellard
    CPUState *env = cpu_single_env;
1155 6b917547 aliguori
    int current_tb_modified = 0;
1156 6b917547 aliguori
    target_ulong current_pc = 0;
1157 6b917547 aliguori
    target_ulong current_cs_base = 0;
1158 6b917547 aliguori
    int current_flags = 0;
1159 d720b93d bellard
#endif
1160 9fa3e853 bellard
1161 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1162 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1163 5fafdf24 ths
    if (!p)
1164 9fa3e853 bellard
        return;
1165 9fa3e853 bellard
    tb = p->first_tb;
1166 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1167 d720b93d bellard
    if (tb && pc != 0) {
1168 d720b93d bellard
        current_tb = tb_find_pc(pc);
1169 d720b93d bellard
    }
1170 d720b93d bellard
#endif
1171 9fa3e853 bellard
    while (tb != NULL) {
1172 9fa3e853 bellard
        n = (long)tb & 3;
1173 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1174 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1175 d720b93d bellard
        if (current_tb == tb &&
1176 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1177 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1178 d720b93d bellard
                   its execution. We could be more precise by checking
1179 d720b93d bellard
                   that the modification is after the current PC, but it
1180 d720b93d bellard
                   would require a specialized function to partially
1181 d720b93d bellard
                   restore the CPU state */
1182 3b46e624 ths
1183 d720b93d bellard
            current_tb_modified = 1;
1184 618ba8e6 Stefan Weil
            cpu_restore_state(current_tb, env, pc);
1185 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1186 6b917547 aliguori
                                 &current_flags);
1187 d720b93d bellard
        }
1188 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1189 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1190 9fa3e853 bellard
        tb = tb->page_next[n];
1191 9fa3e853 bellard
    }
1192 fd6ce8f6 bellard
    p->first_tb = NULL;
1193 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1194 d720b93d bellard
    if (current_tb_modified) {
1195 d720b93d bellard
        /* we generate a block containing just the instruction
1196 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1197 d720b93d bellard
           itself */
1198 ea1c1802 bellard
        env->current_tb = NULL;
1199 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1200 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1201 d720b93d bellard
    }
1202 d720b93d bellard
#endif
1203 fd6ce8f6 bellard
}
1204 9fa3e853 bellard
#endif
1205 fd6ce8f6 bellard
1206 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1207 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1208 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1209 fd6ce8f6 bellard
{
1210 fd6ce8f6 bellard
    PageDesc *p;
1211 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1212 9fa3e853 bellard
1213 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1214 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1215 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1216 9fa3e853 bellard
    last_first_tb = p->first_tb;
1217 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1218 9fa3e853 bellard
    invalidate_page_bitmap(p);
1219 fd6ce8f6 bellard
1220 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1221 d720b93d bellard
1222 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1223 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1224 53a5960a pbrook
        target_ulong addr;
1225 53a5960a pbrook
        PageDesc *p2;
1226 9fa3e853 bellard
        int prot;
1227 9fa3e853 bellard
1228 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1229 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1230 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1231 fd6ce8f6 bellard
        prot = 0;
1232 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1233 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1234 53a5960a pbrook
1235 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1236 53a5960a pbrook
            if (!p2)
1237 53a5960a pbrook
                continue;
1238 53a5960a pbrook
            prot |= p2->flags;
1239 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1240 53a5960a pbrook
          }
1241 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1242 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1243 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1244 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1245 53a5960a pbrook
               page_addr);
1246 fd6ce8f6 bellard
#endif
1247 fd6ce8f6 bellard
    }
1248 9fa3e853 bellard
#else
1249 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1250 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1251 9fa3e853 bellard
       allocated in a physical page */
1252 9fa3e853 bellard
    if (!last_first_tb) {
1253 6a00d601 bellard
        tlb_protect_code(page_addr);
1254 9fa3e853 bellard
    }
1255 9fa3e853 bellard
#endif
1256 d720b93d bellard
1257 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1258 fd6ce8f6 bellard
}
1259 fd6ce8f6 bellard
1260 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1261 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1262 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1263 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1264 d4e8164f bellard
{
1265 9fa3e853 bellard
    unsigned int h;
1266 9fa3e853 bellard
    TranslationBlock **ptb;
1267 9fa3e853 bellard
1268 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1269 c8a706fe pbrook
       before we are done.  */
1270 c8a706fe pbrook
    mmap_lock();
1271 9fa3e853 bellard
    /* add in the physical hash table */
1272 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1273 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1274 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1275 9fa3e853 bellard
    *ptb = tb;
1276 fd6ce8f6 bellard
1277 fd6ce8f6 bellard
    /* add in the page list */
1278 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1279 9fa3e853 bellard
    if (phys_page2 != -1)
1280 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1281 9fa3e853 bellard
    else
1282 9fa3e853 bellard
        tb->page_addr[1] = -1;
1283 9fa3e853 bellard
1284 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1285 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1286 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1287 d4e8164f bellard
1288 d4e8164f bellard
    /* init original jump addresses */
1289 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1290 d4e8164f bellard
        tb_reset_jump(tb, 0);
1291 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1292 d4e8164f bellard
        tb_reset_jump(tb, 1);
1293 8a40a180 bellard
1294 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1295 8a40a180 bellard
    tb_page_check();
1296 8a40a180 bellard
#endif
1297 c8a706fe pbrook
    mmap_unlock();
1298 fd6ce8f6 bellard
}
1299 fd6ce8f6 bellard
1300 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1301 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1302 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1303 fd6ce8f6 bellard
{
1304 9fa3e853 bellard
    int m_min, m_max, m;
1305 9fa3e853 bellard
    unsigned long v;
1306 9fa3e853 bellard
    TranslationBlock *tb;
1307 a513fe19 bellard
1308 a513fe19 bellard
    if (nb_tbs <= 0)
1309 a513fe19 bellard
        return NULL;
1310 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1311 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1312 a513fe19 bellard
        return NULL;
1313 a513fe19 bellard
    /* binary search (cf Knuth) */
1314 a513fe19 bellard
    m_min = 0;
1315 a513fe19 bellard
    m_max = nb_tbs - 1;
1316 a513fe19 bellard
    while (m_min <= m_max) {
1317 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1318 a513fe19 bellard
        tb = &tbs[m];
1319 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1320 a513fe19 bellard
        if (v == tc_ptr)
1321 a513fe19 bellard
            return tb;
1322 a513fe19 bellard
        else if (tc_ptr < v) {
1323 a513fe19 bellard
            m_max = m - 1;
1324 a513fe19 bellard
        } else {
1325 a513fe19 bellard
            m_min = m + 1;
1326 a513fe19 bellard
        }
1327 5fafdf24 ths
    }
1328 a513fe19 bellard
    return &tbs[m_max];
1329 a513fe19 bellard
}
1330 7501267e bellard
1331 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1332 ea041c0e bellard
1333 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1334 ea041c0e bellard
{
1335 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1336 ea041c0e bellard
    unsigned int n1;
1337 ea041c0e bellard
1338 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1339 ea041c0e bellard
    if (tb1 != NULL) {
1340 ea041c0e bellard
        /* find head of list */
1341 ea041c0e bellard
        for(;;) {
1342 ea041c0e bellard
            n1 = (long)tb1 & 3;
1343 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1344 ea041c0e bellard
            if (n1 == 2)
1345 ea041c0e bellard
                break;
1346 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1347 ea041c0e bellard
        }
1348 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1349 ea041c0e bellard
        tb_next = tb1;
1350 ea041c0e bellard
1351 ea041c0e bellard
        /* remove tb from the jmp_first list */
1352 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1353 ea041c0e bellard
        for(;;) {
1354 ea041c0e bellard
            tb1 = *ptb;
1355 ea041c0e bellard
            n1 = (long)tb1 & 3;
1356 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1357 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1358 ea041c0e bellard
                break;
1359 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1360 ea041c0e bellard
        }
1361 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1362 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1363 3b46e624 ths
1364 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1365 ea041c0e bellard
        tb_reset_jump(tb, n);
1366 ea041c0e bellard
1367 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1368 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1369 ea041c0e bellard
    }
1370 ea041c0e bellard
}
1371 ea041c0e bellard
1372 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1373 ea041c0e bellard
{
1374 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1375 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1376 ea041c0e bellard
}
1377 ea041c0e bellard
1378 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1379 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1380 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381 94df27fd Paul Brook
{
1382 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1383 94df27fd Paul Brook
}
1384 94df27fd Paul Brook
#else
1385 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1386 d720b93d bellard
{
1387 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1388 9b3c35e0 j_mayer
    target_ulong pd;
1389 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1390 c2f07f81 pbrook
    PhysPageDesc *p;
1391 d720b93d bellard
1392 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1393 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1394 c2f07f81 pbrook
    if (!p) {
1395 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1396 c2f07f81 pbrook
    } else {
1397 c2f07f81 pbrook
        pd = p->phys_offset;
1398 c2f07f81 pbrook
    }
1399 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1400 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1401 d720b93d bellard
}
1402 c27004ec bellard
#endif
1403 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1404 d720b93d bellard
1405 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1406 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1407 c527ee8f Paul Brook
1408 c527ee8f Paul Brook
{
1409 c527ee8f Paul Brook
}
1410 c527ee8f Paul Brook
1411 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1412 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1413 c527ee8f Paul Brook
{
1414 c527ee8f Paul Brook
    return -ENOSYS;
1415 c527ee8f Paul Brook
}
1416 c527ee8f Paul Brook
#else
1417 6658ffb8 pbrook
/* Add a watchpoint.  */
1418 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1419 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1420 6658ffb8 pbrook
{
1421 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1422 c0ce998e aliguori
    CPUWatchpoint *wp;
1423 6658ffb8 pbrook
1424 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1425 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1426 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1427 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1428 b4051334 aliguori
        return -EINVAL;
1429 b4051334 aliguori
    }
1430 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1431 a1d1bb31 aliguori
1432 a1d1bb31 aliguori
    wp->vaddr = addr;
1433 b4051334 aliguori
    wp->len_mask = len_mask;
1434 a1d1bb31 aliguori
    wp->flags = flags;
1435 a1d1bb31 aliguori
1436 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1437 c0ce998e aliguori
    if (flags & BP_GDB)
1438 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1439 c0ce998e aliguori
    else
1440 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1441 6658ffb8 pbrook
1442 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1443 a1d1bb31 aliguori
1444 a1d1bb31 aliguori
    if (watchpoint)
1445 a1d1bb31 aliguori
        *watchpoint = wp;
1446 a1d1bb31 aliguori
    return 0;
1447 6658ffb8 pbrook
}
1448 6658ffb8 pbrook
1449 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1450 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1451 a1d1bb31 aliguori
                          int flags)
1452 6658ffb8 pbrook
{
1453 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1454 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1455 6658ffb8 pbrook
1456 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1457 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1458 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1459 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1460 6658ffb8 pbrook
            return 0;
1461 6658ffb8 pbrook
        }
1462 6658ffb8 pbrook
    }
1463 a1d1bb31 aliguori
    return -ENOENT;
1464 6658ffb8 pbrook
}
1465 6658ffb8 pbrook
1466 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1467 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1468 a1d1bb31 aliguori
{
1469 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1470 7d03f82f edgar_igl
1471 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1472 a1d1bb31 aliguori
1473 a1d1bb31 aliguori
    qemu_free(watchpoint);
1474 a1d1bb31 aliguori
}
1475 a1d1bb31 aliguori
1476 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1477 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1478 a1d1bb31 aliguori
{
1479 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1480 a1d1bb31 aliguori
1481 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1482 a1d1bb31 aliguori
        if (wp->flags & mask)
1483 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1484 c0ce998e aliguori
    }
1485 7d03f82f edgar_igl
}
1486 c527ee8f Paul Brook
#endif
1487 7d03f82f edgar_igl
1488 a1d1bb31 aliguori
/* Add a breakpoint.  */
1489 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1490 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1491 4c3a88a2 bellard
{
1492 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1493 c0ce998e aliguori
    CPUBreakpoint *bp;
1494 3b46e624 ths
1495 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1496 4c3a88a2 bellard
1497 a1d1bb31 aliguori
    bp->pc = pc;
1498 a1d1bb31 aliguori
    bp->flags = flags;
1499 a1d1bb31 aliguori
1500 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1501 c0ce998e aliguori
    if (flags & BP_GDB)
1502 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1503 c0ce998e aliguori
    else
1504 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1505 3b46e624 ths
1506 d720b93d bellard
    breakpoint_invalidate(env, pc);
1507 a1d1bb31 aliguori
1508 a1d1bb31 aliguori
    if (breakpoint)
1509 a1d1bb31 aliguori
        *breakpoint = bp;
1510 4c3a88a2 bellard
    return 0;
1511 4c3a88a2 bellard
#else
1512 a1d1bb31 aliguori
    return -ENOSYS;
1513 4c3a88a2 bellard
#endif
1514 4c3a88a2 bellard
}
1515 4c3a88a2 bellard
1516 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1517 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1518 a1d1bb31 aliguori
{
1519 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1520 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1521 a1d1bb31 aliguori
1522 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1523 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1524 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1525 a1d1bb31 aliguori
            return 0;
1526 a1d1bb31 aliguori
        }
1527 7d03f82f edgar_igl
    }
1528 a1d1bb31 aliguori
    return -ENOENT;
1529 a1d1bb31 aliguori
#else
1530 a1d1bb31 aliguori
    return -ENOSYS;
1531 7d03f82f edgar_igl
#endif
1532 7d03f82f edgar_igl
}
1533 7d03f82f edgar_igl
1534 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1535 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1536 4c3a88a2 bellard
{
1537 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1538 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1539 d720b93d bellard
1540 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1541 a1d1bb31 aliguori
1542 a1d1bb31 aliguori
    qemu_free(breakpoint);
1543 a1d1bb31 aliguori
#endif
1544 a1d1bb31 aliguori
}
1545 a1d1bb31 aliguori
1546 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1547 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1548 a1d1bb31 aliguori
{
1549 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1550 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1551 a1d1bb31 aliguori
1552 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1553 a1d1bb31 aliguori
        if (bp->flags & mask)
1554 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1555 c0ce998e aliguori
    }
1556 4c3a88a2 bellard
#endif
1557 4c3a88a2 bellard
}
1558 4c3a88a2 bellard
1559 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1560 c33a346e bellard
   CPU loop after each instruction */
1561 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1562 c33a346e bellard
{
1563 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1564 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1565 c33a346e bellard
        env->singlestep_enabled = enabled;
1566 e22a25c9 aliguori
        if (kvm_enabled())
1567 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1568 e22a25c9 aliguori
        else {
1569 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1570 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1571 e22a25c9 aliguori
            tb_flush(env);
1572 e22a25c9 aliguori
        }
1573 c33a346e bellard
    }
1574 c33a346e bellard
#endif
1575 c33a346e bellard
}
1576 c33a346e bellard
1577 34865134 bellard
/* enable or disable low levels log */
1578 34865134 bellard
void cpu_set_log(int log_flags)
1579 34865134 bellard
{
1580 34865134 bellard
    loglevel = log_flags;
1581 34865134 bellard
    if (loglevel && !logfile) {
1582 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1583 34865134 bellard
        if (!logfile) {
1584 34865134 bellard
            perror(logfilename);
1585 34865134 bellard
            _exit(1);
1586 34865134 bellard
        }
1587 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1588 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1589 9fa3e853 bellard
        {
1590 b55266b5 blueswir1
            static char logfile_buf[4096];
1591 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1592 9fa3e853 bellard
        }
1593 bf65f53f Filip Navara
#elif !defined(_WIN32)
1594 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1595 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1596 9fa3e853 bellard
#endif
1597 e735b91c pbrook
        log_append = 1;
1598 e735b91c pbrook
    }
1599 e735b91c pbrook
    if (!loglevel && logfile) {
1600 e735b91c pbrook
        fclose(logfile);
1601 e735b91c pbrook
        logfile = NULL;
1602 34865134 bellard
    }
1603 34865134 bellard
}
1604 34865134 bellard
1605 34865134 bellard
void cpu_set_log_filename(const char *filename)
1606 34865134 bellard
{
1607 34865134 bellard
    logfilename = strdup(filename);
1608 e735b91c pbrook
    if (logfile) {
1609 e735b91c pbrook
        fclose(logfile);
1610 e735b91c pbrook
        logfile = NULL;
1611 e735b91c pbrook
    }
1612 e735b91c pbrook
    cpu_set_log(loglevel);
1613 34865134 bellard
}
1614 c33a346e bellard
1615 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1616 ea041c0e bellard
{
1617 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1618 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1619 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1620 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1621 ea041c0e bellard
    TranslationBlock *tb;
1622 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1623 59817ccb bellard
1624 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1625 3098dba0 aurel32
    tb = env->current_tb;
1626 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1627 3098dba0 aurel32
       all the potentially executing TB */
1628 f76cfe56 Riku Voipio
    if (tb) {
1629 3098dba0 aurel32
        env->current_tb = NULL;
1630 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1631 be214e6c aurel32
    }
1632 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1633 3098dba0 aurel32
}
1634 3098dba0 aurel32
1635 97ffbd8d Jan Kiszka
#ifndef CONFIG_USER_ONLY
1636 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1637 ec6959d0 Jan Kiszka
static void tcg_handle_interrupt(CPUState *env, int mask)
1638 3098dba0 aurel32
{
1639 3098dba0 aurel32
    int old_mask;
1640 be214e6c aurel32
1641 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1642 68a79315 bellard
    env->interrupt_request |= mask;
1643 3098dba0 aurel32
1644 8edac960 aliguori
    /*
1645 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1646 8edac960 aliguori
     * case its halted.
1647 8edac960 aliguori
     */
1648 b7680cb6 Jan Kiszka
    if (!qemu_cpu_is_self(env)) {
1649 8edac960 aliguori
        qemu_cpu_kick(env);
1650 8edac960 aliguori
        return;
1651 8edac960 aliguori
    }
1652 8edac960 aliguori
1653 2e70f6ef pbrook
    if (use_icount) {
1654 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1655 2e70f6ef pbrook
        if (!can_do_io(env)
1656 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1657 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1658 2e70f6ef pbrook
        }
1659 2e70f6ef pbrook
    } else {
1660 3098dba0 aurel32
        cpu_unlink_tb(env);
1661 ea041c0e bellard
    }
1662 ea041c0e bellard
}
1663 ea041c0e bellard
1664 ec6959d0 Jan Kiszka
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1665 ec6959d0 Jan Kiszka
1666 97ffbd8d Jan Kiszka
#else /* CONFIG_USER_ONLY */
1667 97ffbd8d Jan Kiszka
1668 97ffbd8d Jan Kiszka
void cpu_interrupt(CPUState *env, int mask)
1669 97ffbd8d Jan Kiszka
{
1670 97ffbd8d Jan Kiszka
    env->interrupt_request |= mask;
1671 97ffbd8d Jan Kiszka
    cpu_unlink_tb(env);
1672 97ffbd8d Jan Kiszka
}
1673 97ffbd8d Jan Kiszka
#endif /* CONFIG_USER_ONLY */
1674 97ffbd8d Jan Kiszka
1675 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1676 b54ad049 bellard
{
1677 b54ad049 bellard
    env->interrupt_request &= ~mask;
1678 b54ad049 bellard
}
1679 b54ad049 bellard
1680 3098dba0 aurel32
void cpu_exit(CPUState *env)
1681 3098dba0 aurel32
{
1682 3098dba0 aurel32
    env->exit_request = 1;
1683 3098dba0 aurel32
    cpu_unlink_tb(env);
1684 3098dba0 aurel32
}
1685 3098dba0 aurel32
1686 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1687 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1688 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1689 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1690 f193c797 bellard
      "show target assembly code for each compiled TB" },
1691 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1692 57fec1fe bellard
      "show micro ops for each compiled TB" },
1693 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1694 e01a1157 blueswir1
      "show micro ops "
1695 e01a1157 blueswir1
#ifdef TARGET_I386
1696 e01a1157 blueswir1
      "before eflags optimization and "
1697 f193c797 bellard
#endif
1698 e01a1157 blueswir1
      "after liveness analysis" },
1699 f193c797 bellard
    { CPU_LOG_INT, "int",
1700 f193c797 bellard
      "show interrupts/exceptions in short format" },
1701 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1702 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1703 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1704 e91c8a77 ths
      "show CPU state before block translation" },
1705 f193c797 bellard
#ifdef TARGET_I386
1706 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1707 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1708 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1709 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1710 f193c797 bellard
#endif
1711 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1712 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1713 fd872598 bellard
      "show all i/o ports accesses" },
1714 8e3a9fd2 bellard
#endif
1715 f193c797 bellard
    { 0, NULL, NULL },
1716 f193c797 bellard
};
1717 f193c797 bellard
1718 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1719 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1720 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1721 f6f3fbca Michael S. Tsirkin
1722 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1723 9742bf26 Yoshiaki Tamura
                                  ram_addr_t size,
1724 0fd542fb Michael S. Tsirkin
                                  ram_addr_t phys_offset,
1725 0fd542fb Michael S. Tsirkin
                                  bool log_dirty)
1726 f6f3fbca Michael S. Tsirkin
{
1727 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1728 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1729 0fd542fb Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1730 f6f3fbca Michael S. Tsirkin
    }
1731 f6f3fbca Michael S. Tsirkin
}
1732 f6f3fbca Michael S. Tsirkin
1733 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1734 9742bf26 Yoshiaki Tamura
                                        target_phys_addr_t end)
1735 f6f3fbca Michael S. Tsirkin
{
1736 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1737 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1738 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1739 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1740 f6f3fbca Michael S. Tsirkin
            return r;
1741 f6f3fbca Michael S. Tsirkin
    }
1742 f6f3fbca Michael S. Tsirkin
    return 0;
1743 f6f3fbca Michael S. Tsirkin
}
1744 f6f3fbca Michael S. Tsirkin
1745 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1746 f6f3fbca Michael S. Tsirkin
{
1747 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1748 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1749 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1750 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1751 f6f3fbca Michael S. Tsirkin
            return r;
1752 f6f3fbca Michael S. Tsirkin
    }
1753 f6f3fbca Michael S. Tsirkin
    return 0;
1754 f6f3fbca Michael S. Tsirkin
}
1755 f6f3fbca Michael S. Tsirkin
1756 8d4c78e7 Alex Williamson
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1757 8d4c78e7 Alex Williamson
 * address.  Each intermediate table provides the next L2_BITs of guest
1758 8d4c78e7 Alex Williamson
 * physical address space.  The number of levels vary based on host and
1759 8d4c78e7 Alex Williamson
 * guest configuration, making it efficient to build the final guest
1760 8d4c78e7 Alex Williamson
 * physical address by seeding the L1 offset and shifting and adding in
1761 8d4c78e7 Alex Williamson
 * each L2 offset as we recurse through them. */
1762 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1763 8d4c78e7 Alex Williamson
                                 int level, void **lp, target_phys_addr_t addr)
1764 f6f3fbca Michael S. Tsirkin
{
1765 5cd2c5b6 Richard Henderson
    int i;
1766 f6f3fbca Michael S. Tsirkin
1767 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1768 5cd2c5b6 Richard Henderson
        return;
1769 5cd2c5b6 Richard Henderson
    }
1770 5cd2c5b6 Richard Henderson
    if (level == 0) {
1771 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1772 8d4c78e7 Alex Williamson
        addr <<= L2_BITS + TARGET_PAGE_BITS;
1773 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1774 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1775 8d4c78e7 Alex Williamson
                client->set_memory(client, addr | i << TARGET_PAGE_BITS,
1776 0fd542fb Michael S. Tsirkin
                                   TARGET_PAGE_SIZE, pd[i].phys_offset, false);
1777 f6f3fbca Michael S. Tsirkin
            }
1778 5cd2c5b6 Richard Henderson
        }
1779 5cd2c5b6 Richard Henderson
    } else {
1780 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1781 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1782 8d4c78e7 Alex Williamson
            phys_page_for_each_1(client, level - 1, pp + i,
1783 8d4c78e7 Alex Williamson
                                 (addr << L2_BITS) | i);
1784 f6f3fbca Michael S. Tsirkin
        }
1785 f6f3fbca Michael S. Tsirkin
    }
1786 f6f3fbca Michael S. Tsirkin
}
1787 f6f3fbca Michael S. Tsirkin
1788 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1789 f6f3fbca Michael S. Tsirkin
{
1790 5cd2c5b6 Richard Henderson
    int i;
1791 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1792 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1793 8d4c78e7 Alex Williamson
                             l1_phys_map + i, i);
1794 f6f3fbca Michael S. Tsirkin
    }
1795 f6f3fbca Michael S. Tsirkin
}
1796 f6f3fbca Michael S. Tsirkin
1797 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1798 f6f3fbca Michael S. Tsirkin
{
1799 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1800 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1801 f6f3fbca Michael S. Tsirkin
}
1802 f6f3fbca Michael S. Tsirkin
1803 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1804 f6f3fbca Michael S. Tsirkin
{
1805 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1806 f6f3fbca Michael S. Tsirkin
}
1807 f6f3fbca Michael S. Tsirkin
#endif
1808 f6f3fbca Michael S. Tsirkin
1809 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1810 f193c797 bellard
{
1811 f193c797 bellard
    if (strlen(s2) != n)
1812 f193c797 bellard
        return 0;
1813 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1814 f193c797 bellard
}
1815 3b46e624 ths
1816 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1817 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1818 f193c797 bellard
{
1819 c7cd6a37 blueswir1
    const CPULogItem *item;
1820 f193c797 bellard
    int mask;
1821 f193c797 bellard
    const char *p, *p1;
1822 f193c797 bellard
1823 f193c797 bellard
    p = str;
1824 f193c797 bellard
    mask = 0;
1825 f193c797 bellard
    for(;;) {
1826 f193c797 bellard
        p1 = strchr(p, ',');
1827 f193c797 bellard
        if (!p1)
1828 f193c797 bellard
            p1 = p + strlen(p);
1829 9742bf26 Yoshiaki Tamura
        if(cmp1(p,p1-p,"all")) {
1830 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1831 9742bf26 Yoshiaki Tamura
                mask |= item->mask;
1832 9742bf26 Yoshiaki Tamura
            }
1833 9742bf26 Yoshiaki Tamura
        } else {
1834 9742bf26 Yoshiaki Tamura
            for(item = cpu_log_items; item->mask != 0; item++) {
1835 9742bf26 Yoshiaki Tamura
                if (cmp1(p, p1 - p, item->name))
1836 9742bf26 Yoshiaki Tamura
                    goto found;
1837 9742bf26 Yoshiaki Tamura
            }
1838 9742bf26 Yoshiaki Tamura
            return 0;
1839 f193c797 bellard
        }
1840 f193c797 bellard
    found:
1841 f193c797 bellard
        mask |= item->mask;
1842 f193c797 bellard
        if (*p1 != ',')
1843 f193c797 bellard
            break;
1844 f193c797 bellard
        p = p1 + 1;
1845 f193c797 bellard
    }
1846 f193c797 bellard
    return mask;
1847 f193c797 bellard
}
1848 ea041c0e bellard
1849 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1850 7501267e bellard
{
1851 7501267e bellard
    va_list ap;
1852 493ae1f0 pbrook
    va_list ap2;
1853 7501267e bellard
1854 7501267e bellard
    va_start(ap, fmt);
1855 493ae1f0 pbrook
    va_copy(ap2, ap);
1856 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1857 7501267e bellard
    vfprintf(stderr, fmt, ap);
1858 7501267e bellard
    fprintf(stderr, "\n");
1859 7501267e bellard
#ifdef TARGET_I386
1860 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1861 7fe48483 bellard
#else
1862 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1863 7501267e bellard
#endif
1864 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1865 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1866 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1867 93fcfe39 aliguori
        qemu_log("\n");
1868 f9373291 j_mayer
#ifdef TARGET_I386
1869 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1870 f9373291 j_mayer
#else
1871 93fcfe39 aliguori
        log_cpu_state(env, 0);
1872 f9373291 j_mayer
#endif
1873 31b1a7b4 aliguori
        qemu_log_flush();
1874 93fcfe39 aliguori
        qemu_log_close();
1875 924edcae balrog
    }
1876 493ae1f0 pbrook
    va_end(ap2);
1877 f9373291 j_mayer
    va_end(ap);
1878 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1879 fd052bf6 Riku Voipio
    {
1880 fd052bf6 Riku Voipio
        struct sigaction act;
1881 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1882 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1883 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1884 fd052bf6 Riku Voipio
    }
1885 fd052bf6 Riku Voipio
#endif
1886 7501267e bellard
    abort();
1887 7501267e bellard
}
1888 7501267e bellard
1889 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1890 c5be9f08 ths
{
1891 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1892 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1893 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1894 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1895 5a38f081 aliguori
    CPUBreakpoint *bp;
1896 5a38f081 aliguori
    CPUWatchpoint *wp;
1897 5a38f081 aliguori
#endif
1898 5a38f081 aliguori
1899 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1900 5a38f081 aliguori
1901 5a38f081 aliguori
    /* Preserve chaining and index. */
1902 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1903 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1904 5a38f081 aliguori
1905 5a38f081 aliguori
    /* Clone all break/watchpoints.
1906 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1907 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1908 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1909 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1910 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1911 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1912 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1913 5a38f081 aliguori
    }
1914 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1915 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1916 5a38f081 aliguori
                              wp->flags, NULL);
1917 5a38f081 aliguori
    }
1918 5a38f081 aliguori
#endif
1919 5a38f081 aliguori
1920 c5be9f08 ths
    return new_env;
1921 c5be9f08 ths
}
1922 c5be9f08 ths
1923 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1924 0124311e bellard
1925 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1926 5c751e99 edgar_igl
{
1927 5c751e99 edgar_igl
    unsigned int i;
1928 5c751e99 edgar_igl
1929 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1930 5c751e99 edgar_igl
       overlap the flushed page.  */
1931 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1932 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1933 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1934 5c751e99 edgar_igl
1935 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1936 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1937 9742bf26 Yoshiaki Tamura
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1938 5c751e99 edgar_igl
}
1939 5c751e99 edgar_igl
1940 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1941 08738984 Igor Kovalenko
    .addr_read  = -1,
1942 08738984 Igor Kovalenko
    .addr_write = -1,
1943 08738984 Igor Kovalenko
    .addr_code  = -1,
1944 08738984 Igor Kovalenko
    .addend     = -1,
1945 08738984 Igor Kovalenko
};
1946 08738984 Igor Kovalenko
1947 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1948 ee8b7021 bellard
   implemented yet) */
1949 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1950 33417e70 bellard
{
1951 33417e70 bellard
    int i;
1952 0124311e bellard
1953 9fa3e853 bellard
#if defined(DEBUG_TLB)
1954 9fa3e853 bellard
    printf("tlb_flush:\n");
1955 9fa3e853 bellard
#endif
1956 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1957 0124311e bellard
       links while we are modifying them */
1958 0124311e bellard
    env->current_tb = NULL;
1959 0124311e bellard
1960 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1961 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1962 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1963 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1964 cfde4bd9 Isaku Yamahata
        }
1965 33417e70 bellard
    }
1966 9fa3e853 bellard
1967 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1968 9fa3e853 bellard
1969 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1970 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1971 e3db7226 bellard
    tlb_flush_count++;
1972 33417e70 bellard
}
1973 33417e70 bellard
1974 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1975 61382a50 bellard
{
1976 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1977 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1978 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1979 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1980 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1981 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1982 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1983 84b7b8e7 bellard
    }
1984 61382a50 bellard
}
1985 61382a50 bellard
1986 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1987 33417e70 bellard
{
1988 8a40a180 bellard
    int i;
1989 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1990 0124311e bellard
1991 9fa3e853 bellard
#if defined(DEBUG_TLB)
1992 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1993 9fa3e853 bellard
#endif
1994 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1995 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1996 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
1997 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
1998 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1999 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
2000 d4c430a8 Paul Brook
#endif
2001 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2002 d4c430a8 Paul Brook
        return;
2003 d4c430a8 Paul Brook
    }
2004 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2005 0124311e bellard
       links while we are modifying them */
2006 0124311e bellard
    env->current_tb = NULL;
2007 61382a50 bellard
2008 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2009 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2011 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2012 0124311e bellard
2013 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2014 9fa3e853 bellard
}
2015 9fa3e853 bellard
2016 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2017 9fa3e853 bellard
   can be detected */
2018 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2019 9fa3e853 bellard
{
2020 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2021 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2022 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2023 9fa3e853 bellard
}
2024 9fa3e853 bellard
2025 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2026 3a7d929e bellard
   tested for self modifying code */
2027 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2028 3a7d929e bellard
                                    target_ulong vaddr)
2029 9fa3e853 bellard
{
2030 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2031 1ccde1cb bellard
}
2032 1ccde1cb bellard
2033 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2034 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2035 1ccde1cb bellard
{
2036 1ccde1cb bellard
    unsigned long addr;
2037 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2038 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2039 1ccde1cb bellard
        if ((addr - start) < length) {
2040 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2041 1ccde1cb bellard
        }
2042 1ccde1cb bellard
    }
2043 1ccde1cb bellard
}
2044 1ccde1cb bellard
2045 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2046 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2047 0a962c02 bellard
                                     int dirty_flags)
2048 1ccde1cb bellard
{
2049 1ccde1cb bellard
    CPUState *env;
2050 4f2ac237 bellard
    unsigned long length, start1;
2051 f7c11b53 Yoshiaki Tamura
    int i;
2052 1ccde1cb bellard
2053 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2054 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2055 1ccde1cb bellard
2056 1ccde1cb bellard
    length = end - start;
2057 1ccde1cb bellard
    if (length == 0)
2058 1ccde1cb bellard
        return;
2059 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2060 f23db169 bellard
2061 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2062 1ccde1cb bellard
       when accessing the range */
2063 b2e0a138 Michael S. Tsirkin
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2064 a57d23e4 Stefan Weil
    /* Check that we don't span multiple blocks - this breaks the
2065 5579c7f3 pbrook
       address comparisons below.  */
2066 b2e0a138 Michael S. Tsirkin
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2067 5579c7f3 pbrook
            != (end - 1) - start) {
2068 5579c7f3 pbrook
        abort();
2069 5579c7f3 pbrook
    }
2070 5579c7f3 pbrook
2071 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2072 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2073 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2074 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2075 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2076 cfde4bd9 Isaku Yamahata
                                      start1, length);
2077 cfde4bd9 Isaku Yamahata
        }
2078 6a00d601 bellard
    }
2079 1ccde1cb bellard
}
2080 1ccde1cb bellard
2081 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2082 74576198 aliguori
{
2083 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2084 74576198 aliguori
    in_migration = enable;
2085 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2086 f6f3fbca Michael S. Tsirkin
    return ret;
2087 74576198 aliguori
}
2088 74576198 aliguori
2089 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2090 74576198 aliguori
{
2091 74576198 aliguori
    return in_migration;
2092 74576198 aliguori
}
2093 74576198 aliguori
2094 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2095 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2096 2bec46dc aliguori
{
2097 7b8f3b78 Michael S. Tsirkin
    int ret;
2098 151f7749 Jan Kiszka
2099 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2100 151f7749 Jan Kiszka
    return ret;
2101 2bec46dc aliguori
}
2102 2bec46dc aliguori
2103 e5896b12 Anthony PERARD
int cpu_physical_log_start(target_phys_addr_t start_addr,
2104 e5896b12 Anthony PERARD
                           ram_addr_t size)
2105 e5896b12 Anthony PERARD
{
2106 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2107 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2108 e5896b12 Anthony PERARD
        if (client->log_start) {
2109 e5896b12 Anthony PERARD
            int r = client->log_start(client, start_addr, size);
2110 e5896b12 Anthony PERARD
            if (r < 0) {
2111 e5896b12 Anthony PERARD
                return r;
2112 e5896b12 Anthony PERARD
            }
2113 e5896b12 Anthony PERARD
        }
2114 e5896b12 Anthony PERARD
    }
2115 e5896b12 Anthony PERARD
    return 0;
2116 e5896b12 Anthony PERARD
}
2117 e5896b12 Anthony PERARD
2118 e5896b12 Anthony PERARD
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2119 e5896b12 Anthony PERARD
                          ram_addr_t size)
2120 e5896b12 Anthony PERARD
{
2121 e5896b12 Anthony PERARD
    CPUPhysMemoryClient *client;
2122 e5896b12 Anthony PERARD
    QLIST_FOREACH(client, &memory_client_list, list) {
2123 e5896b12 Anthony PERARD
        if (client->log_stop) {
2124 e5896b12 Anthony PERARD
            int r = client->log_stop(client, start_addr, size);
2125 e5896b12 Anthony PERARD
            if (r < 0) {
2126 e5896b12 Anthony PERARD
                return r;
2127 e5896b12 Anthony PERARD
            }
2128 e5896b12 Anthony PERARD
        }
2129 e5896b12 Anthony PERARD
    }
2130 e5896b12 Anthony PERARD
    return 0;
2131 e5896b12 Anthony PERARD
}
2132 e5896b12 Anthony PERARD
2133 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2134 3a7d929e bellard
{
2135 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2136 5579c7f3 pbrook
    void *p;
2137 3a7d929e bellard
2138 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2139 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2140 5579c7f3 pbrook
            + tlb_entry->addend);
2141 e890261f Marcelo Tosatti
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2142 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2143 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2144 3a7d929e bellard
        }
2145 3a7d929e bellard
    }
2146 3a7d929e bellard
}
2147 3a7d929e bellard
2148 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2149 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2150 3a7d929e bellard
{
2151 3a7d929e bellard
    int i;
2152 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2153 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2154 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2155 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2156 cfde4bd9 Isaku Yamahata
    }
2157 3a7d929e bellard
}
2158 3a7d929e bellard
2159 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2160 1ccde1cb bellard
{
2161 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2162 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2163 1ccde1cb bellard
}
2164 1ccde1cb bellard
2165 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2166 0f459d16 pbrook
   so that it is no longer dirty */
2167 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2168 1ccde1cb bellard
{
2169 1ccde1cb bellard
    int i;
2170 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2171 1ccde1cb bellard
2172 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2173 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2174 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2175 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2176 9fa3e853 bellard
}
2177 9fa3e853 bellard
2178 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2179 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2180 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2181 d4c430a8 Paul Brook
                               target_ulong size)
2182 d4c430a8 Paul Brook
{
2183 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2184 d4c430a8 Paul Brook
2185 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2186 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2187 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2188 d4c430a8 Paul Brook
        return;
2189 d4c430a8 Paul Brook
    }
2190 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2191 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2192 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2193 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2194 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2195 d4c430a8 Paul Brook
        mask <<= 1;
2196 d4c430a8 Paul Brook
    }
2197 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2198 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2199 d4c430a8 Paul Brook
}
2200 d4c430a8 Paul Brook
2201 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2202 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2203 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2204 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2205 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2206 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2207 9fa3e853 bellard
{
2208 92e873b9 bellard
    PhysPageDesc *p;
2209 4f2ac237 bellard
    unsigned long pd;
2210 9fa3e853 bellard
    unsigned int index;
2211 4f2ac237 bellard
    target_ulong address;
2212 0f459d16 pbrook
    target_ulong code_address;
2213 355b1943 Paul Brook
    unsigned long addend;
2214 84b7b8e7 bellard
    CPUTLBEntry *te;
2215 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2216 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2217 9fa3e853 bellard
2218 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2219 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2220 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2221 d4c430a8 Paul Brook
    }
2222 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2223 9fa3e853 bellard
    if (!p) {
2224 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2225 9fa3e853 bellard
    } else {
2226 9fa3e853 bellard
        pd = p->phys_offset;
2227 9fa3e853 bellard
    }
2228 9fa3e853 bellard
#if defined(DEBUG_TLB)
2229 7fd3f494 Stefan Weil
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2230 7fd3f494 Stefan Weil
           " prot=%x idx=%d pd=0x%08lx\n",
2231 7fd3f494 Stefan Weil
           vaddr, paddr, prot, mmu_idx, pd);
2232 9fa3e853 bellard
#endif
2233 9fa3e853 bellard
2234 0f459d16 pbrook
    address = vaddr;
2235 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2236 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2237 0f459d16 pbrook
        address |= TLB_MMIO;
2238 0f459d16 pbrook
    }
2239 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2240 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2241 0f459d16 pbrook
        /* Normal RAM.  */
2242 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2243 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2244 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2245 0f459d16 pbrook
        else
2246 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2247 0f459d16 pbrook
    } else {
2248 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2249 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2250 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2251 0f459d16 pbrook
           and avoid full address decoding in every device.
2252 0f459d16 pbrook
           We can't use the high bits of pd for this because
2253 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2254 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2255 8da3ff18 pbrook
        if (p) {
2256 8da3ff18 pbrook
            iotlb += p->region_offset;
2257 8da3ff18 pbrook
        } else {
2258 8da3ff18 pbrook
            iotlb += paddr;
2259 8da3ff18 pbrook
        }
2260 0f459d16 pbrook
    }
2261 0f459d16 pbrook
2262 0f459d16 pbrook
    code_address = address;
2263 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2264 0f459d16 pbrook
       watchpoint trap routines.  */
2265 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2266 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2267 bf298f83 Jun Koi
            /* Avoid trapping reads of pages with a write breakpoint. */
2268 bf298f83 Jun Koi
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2269 bf298f83 Jun Koi
                iotlb = io_mem_watch + paddr;
2270 bf298f83 Jun Koi
                address |= TLB_MMIO;
2271 bf298f83 Jun Koi
                break;
2272 bf298f83 Jun Koi
            }
2273 6658ffb8 pbrook
        }
2274 0f459d16 pbrook
    }
2275 d79acba4 balrog
2276 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2277 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2278 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2279 0f459d16 pbrook
    te->addend = addend - vaddr;
2280 0f459d16 pbrook
    if (prot & PAGE_READ) {
2281 0f459d16 pbrook
        te->addr_read = address;
2282 0f459d16 pbrook
    } else {
2283 0f459d16 pbrook
        te->addr_read = -1;
2284 0f459d16 pbrook
    }
2285 5c751e99 edgar_igl
2286 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2287 0f459d16 pbrook
        te->addr_code = code_address;
2288 0f459d16 pbrook
    } else {
2289 0f459d16 pbrook
        te->addr_code = -1;
2290 0f459d16 pbrook
    }
2291 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2292 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2293 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2294 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2295 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2296 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2297 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2298 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2299 9fa3e853 bellard
        } else {
2300 0f459d16 pbrook
            te->addr_write = address;
2301 9fa3e853 bellard
        }
2302 0f459d16 pbrook
    } else {
2303 0f459d16 pbrook
        te->addr_write = -1;
2304 9fa3e853 bellard
    }
2305 9fa3e853 bellard
}
2306 9fa3e853 bellard
2307 0124311e bellard
#else
2308 0124311e bellard
2309 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2310 0124311e bellard
{
2311 0124311e bellard
}
2312 0124311e bellard
2313 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2314 0124311e bellard
{
2315 0124311e bellard
}
2316 0124311e bellard
2317 edf8e2af Mika Westerberg
/*
2318 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2319 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2320 edf8e2af Mika Westerberg
 */
2321 5cd2c5b6 Richard Henderson
2322 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2323 5cd2c5b6 Richard Henderson
{
2324 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2325 5cd2c5b6 Richard Henderson
    void *priv;
2326 5cd2c5b6 Richard Henderson
    unsigned long start;
2327 5cd2c5b6 Richard Henderson
    int prot;
2328 5cd2c5b6 Richard Henderson
};
2329 5cd2c5b6 Richard Henderson
2330 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2331 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2332 5cd2c5b6 Richard Henderson
{
2333 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2334 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2335 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2336 5cd2c5b6 Richard Henderson
            return rc;
2337 5cd2c5b6 Richard Henderson
        }
2338 5cd2c5b6 Richard Henderson
    }
2339 5cd2c5b6 Richard Henderson
2340 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2341 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2342 5cd2c5b6 Richard Henderson
2343 5cd2c5b6 Richard Henderson
    return 0;
2344 5cd2c5b6 Richard Henderson
}
2345 5cd2c5b6 Richard Henderson
2346 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2347 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2348 5cd2c5b6 Richard Henderson
{
2349 b480d9b7 Paul Brook
    abi_ulong pa;
2350 5cd2c5b6 Richard Henderson
    int i, rc;
2351 5cd2c5b6 Richard Henderson
2352 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2353 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2354 5cd2c5b6 Richard Henderson
    }
2355 5cd2c5b6 Richard Henderson
2356 5cd2c5b6 Richard Henderson
    if (level == 0) {
2357 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2358 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2359 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2360 5cd2c5b6 Richard Henderson
2361 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2362 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2363 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2364 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2365 5cd2c5b6 Richard Henderson
                    return rc;
2366 9fa3e853 bellard
                }
2367 9fa3e853 bellard
            }
2368 5cd2c5b6 Richard Henderson
        }
2369 5cd2c5b6 Richard Henderson
    } else {
2370 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2371 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2372 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2373 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2374 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2375 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2376 5cd2c5b6 Richard Henderson
                return rc;
2377 5cd2c5b6 Richard Henderson
            }
2378 5cd2c5b6 Richard Henderson
        }
2379 5cd2c5b6 Richard Henderson
    }
2380 5cd2c5b6 Richard Henderson
2381 5cd2c5b6 Richard Henderson
    return 0;
2382 5cd2c5b6 Richard Henderson
}
2383 5cd2c5b6 Richard Henderson
2384 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2385 5cd2c5b6 Richard Henderson
{
2386 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2387 5cd2c5b6 Richard Henderson
    unsigned long i;
2388 5cd2c5b6 Richard Henderson
2389 5cd2c5b6 Richard Henderson
    data.fn = fn;
2390 5cd2c5b6 Richard Henderson
    data.priv = priv;
2391 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2392 5cd2c5b6 Richard Henderson
    data.prot = 0;
2393 5cd2c5b6 Richard Henderson
2394 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2395 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2396 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2397 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2398 5cd2c5b6 Richard Henderson
            return rc;
2399 9fa3e853 bellard
        }
2400 33417e70 bellard
    }
2401 5cd2c5b6 Richard Henderson
2402 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2403 edf8e2af Mika Westerberg
}
2404 edf8e2af Mika Westerberg
2405 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2406 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2407 edf8e2af Mika Westerberg
{
2408 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2409 edf8e2af Mika Westerberg
2410 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2411 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2412 edf8e2af Mika Westerberg
        start, end, end - start,
2413 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2414 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2415 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2416 edf8e2af Mika Westerberg
2417 edf8e2af Mika Westerberg
    return (0);
2418 edf8e2af Mika Westerberg
}
2419 edf8e2af Mika Westerberg
2420 edf8e2af Mika Westerberg
/* dump memory mappings */
2421 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2422 edf8e2af Mika Westerberg
{
2423 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2424 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2425 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2426 33417e70 bellard
}
2427 33417e70 bellard
2428 53a5960a pbrook
int page_get_flags(target_ulong address)
2429 33417e70 bellard
{
2430 9fa3e853 bellard
    PageDesc *p;
2431 9fa3e853 bellard
2432 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2433 33417e70 bellard
    if (!p)
2434 9fa3e853 bellard
        return 0;
2435 9fa3e853 bellard
    return p->flags;
2436 9fa3e853 bellard
}
2437 9fa3e853 bellard
2438 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2439 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2440 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2441 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2442 9fa3e853 bellard
{
2443 376a7909 Richard Henderson
    target_ulong addr, len;
2444 376a7909 Richard Henderson
2445 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2446 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2447 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2448 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2449 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2450 376a7909 Richard Henderson
#endif
2451 376a7909 Richard Henderson
    assert(start < end);
2452 9fa3e853 bellard
2453 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2454 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2455 376a7909 Richard Henderson
2456 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2457 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2458 376a7909 Richard Henderson
    }
2459 376a7909 Richard Henderson
2460 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2461 376a7909 Richard Henderson
         len != 0;
2462 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2463 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2464 376a7909 Richard Henderson
2465 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2466 376a7909 Richard Henderson
           the code inside.  */
2467 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2468 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2469 9fa3e853 bellard
            p->first_tb) {
2470 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2471 9fa3e853 bellard
        }
2472 9fa3e853 bellard
        p->flags = flags;
2473 9fa3e853 bellard
    }
2474 33417e70 bellard
}
2475 33417e70 bellard
2476 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2477 3d97b40b ths
{
2478 3d97b40b ths
    PageDesc *p;
2479 3d97b40b ths
    target_ulong end;
2480 3d97b40b ths
    target_ulong addr;
2481 3d97b40b ths
2482 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2483 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2484 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2485 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2486 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2487 376a7909 Richard Henderson
#endif
2488 376a7909 Richard Henderson
2489 3e0650a9 Richard Henderson
    if (len == 0) {
2490 3e0650a9 Richard Henderson
        return 0;
2491 3e0650a9 Richard Henderson
    }
2492 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2493 376a7909 Richard Henderson
        /* We've wrapped around.  */
2494 55f280c9 balrog
        return -1;
2495 376a7909 Richard Henderson
    }
2496 55f280c9 balrog
2497 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2498 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2499 3d97b40b ths
2500 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2501 376a7909 Richard Henderson
         len != 0;
2502 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2504 3d97b40b ths
        if( !p )
2505 3d97b40b ths
            return -1;
2506 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2507 3d97b40b ths
            return -1;
2508 3d97b40b ths
2509 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2510 3d97b40b ths
            return -1;
2511 dae3270c bellard
        if (flags & PAGE_WRITE) {
2512 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2513 dae3270c bellard
                return -1;
2514 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2515 dae3270c bellard
               contains translated code */
2516 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2517 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2518 dae3270c bellard
                    return -1;
2519 dae3270c bellard
            }
2520 dae3270c bellard
            return 0;
2521 dae3270c bellard
        }
2522 3d97b40b ths
    }
2523 3d97b40b ths
    return 0;
2524 3d97b40b ths
}
2525 3d97b40b ths
2526 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2527 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2528 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2529 9fa3e853 bellard
{
2530 45d679d6 Aurelien Jarno
    unsigned int prot;
2531 45d679d6 Aurelien Jarno
    PageDesc *p;
2532 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2533 9fa3e853 bellard
2534 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2535 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2536 c8a706fe pbrook
       practice it seems to be ok.  */
2537 c8a706fe pbrook
    mmap_lock();
2538 c8a706fe pbrook
2539 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2540 45d679d6 Aurelien Jarno
    if (!p) {
2541 c8a706fe pbrook
        mmap_unlock();
2542 9fa3e853 bellard
        return 0;
2543 c8a706fe pbrook
    }
2544 45d679d6 Aurelien Jarno
2545 9fa3e853 bellard
    /* if the page was really writable, then we change its
2546 9fa3e853 bellard
       protection back to writable */
2547 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2548 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2549 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2550 45d679d6 Aurelien Jarno
2551 45d679d6 Aurelien Jarno
        prot = 0;
2552 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2553 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2554 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2555 45d679d6 Aurelien Jarno
            prot |= p->flags;
2556 45d679d6 Aurelien Jarno
2557 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2558 9fa3e853 bellard
               the corresponding translated code. */
2559 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2560 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2561 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2562 9fa3e853 bellard
#endif
2563 9fa3e853 bellard
        }
2564 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2565 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2566 45d679d6 Aurelien Jarno
2567 45d679d6 Aurelien Jarno
        mmap_unlock();
2568 45d679d6 Aurelien Jarno
        return 1;
2569 9fa3e853 bellard
    }
2570 c8a706fe pbrook
    mmap_unlock();
2571 9fa3e853 bellard
    return 0;
2572 9fa3e853 bellard
}
2573 9fa3e853 bellard
2574 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2575 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2576 1ccde1cb bellard
{
2577 1ccde1cb bellard
}
2578 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2579 9fa3e853 bellard
2580 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2581 8da3ff18 pbrook
2582 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2583 c04b2b78 Paul Brook
typedef struct subpage_t {
2584 c04b2b78 Paul Brook
    target_phys_addr_t base;
2585 f6405247 Richard Henderson
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2586 f6405247 Richard Henderson
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2587 c04b2b78 Paul Brook
} subpage_t;
2588 c04b2b78 Paul Brook
2589 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2590 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2591 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2592 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
2593 f6405247 Richard Henderson
                                ram_addr_t region_offset);
2594 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2595 db7b5426 blueswir1
                      need_subpage)                                     \
2596 db7b5426 blueswir1
    do {                                                                \
2597 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2598 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2599 db7b5426 blueswir1
        else {                                                          \
2600 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2601 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2602 db7b5426 blueswir1
                need_subpage = 1;                                       \
2603 db7b5426 blueswir1
        }                                                               \
2604 db7b5426 blueswir1
                                                                        \
2605 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2606 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2607 db7b5426 blueswir1
        else {                                                          \
2608 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2609 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2610 db7b5426 blueswir1
                need_subpage = 1;                                       \
2611 db7b5426 blueswir1
        }                                                               \
2612 db7b5426 blueswir1
    } while (0)
2613 db7b5426 blueswir1
2614 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2615 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2616 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2617 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2618 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2619 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2620 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2621 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2622 0fd542fb Michael S. Tsirkin
void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2623 c227f099 Anthony Liguori
                                         ram_addr_t size,
2624 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2625 0fd542fb Michael S. Tsirkin
                                         ram_addr_t region_offset,
2626 0fd542fb Michael S. Tsirkin
                                         bool log_dirty)
2627 33417e70 bellard
{
2628 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2629 92e873b9 bellard
    PhysPageDesc *p;
2630 9d42037b bellard
    CPUState *env;
2631 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2632 f6405247 Richard Henderson
    subpage_t *subpage;
2633 33417e70 bellard
2634 3b8e6a2d Edgar E. Iglesias
    assert(size);
2635 0fd542fb Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2636 f6f3fbca Michael S. Tsirkin
2637 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2638 67c4d23c pbrook
        region_offset = start_addr;
2639 67c4d23c pbrook
    }
2640 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2641 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2642 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2643 3b8e6a2d Edgar E. Iglesias
2644 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
2645 3b8e6a2d Edgar E. Iglesias
    do {
2646 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2647 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2648 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2649 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2650 db7b5426 blueswir1
            int need_subpage = 0;
2651 db7b5426 blueswir1
2652 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2653 db7b5426 blueswir1
                          need_subpage);
2654 f6405247 Richard Henderson
            if (need_subpage) {
2655 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2656 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2657 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2658 8da3ff18 pbrook
                                           p->region_offset);
2659 db7b5426 blueswir1
                } else {
2660 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2661 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2662 db7b5426 blueswir1
                }
2663 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2664 8da3ff18 pbrook
                                 region_offset);
2665 8da3ff18 pbrook
                p->region_offset = 0;
2666 db7b5426 blueswir1
            } else {
2667 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2668 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2669 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2670 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2671 db7b5426 blueswir1
            }
2672 db7b5426 blueswir1
        } else {
2673 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2674 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2675 8da3ff18 pbrook
            p->region_offset = region_offset;
2676 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2677 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2678 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2679 0e8f0967 pbrook
            } else {
2680 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2681 db7b5426 blueswir1
                int need_subpage = 0;
2682 db7b5426 blueswir1
2683 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2684 db7b5426 blueswir1
                              end_addr2, need_subpage);
2685 db7b5426 blueswir1
2686 f6405247 Richard Henderson
                if (need_subpage) {
2687 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2688 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2689 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2690 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2691 8da3ff18 pbrook
                                     phys_offset, region_offset);
2692 8da3ff18 pbrook
                    p->region_offset = 0;
2693 db7b5426 blueswir1
                }
2694 db7b5426 blueswir1
            }
2695 db7b5426 blueswir1
        }
2696 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2697 3b8e6a2d Edgar E. Iglesias
        addr += TARGET_PAGE_SIZE;
2698 3b8e6a2d Edgar E. Iglesias
    } while (addr != end_addr);
2699 3b46e624 ths
2700 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2701 9d42037b bellard
       reset the modified entries */
2702 9d42037b bellard
    /* XXX: slow ! */
2703 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2704 9d42037b bellard
        tlb_flush(env, 1);
2705 9d42037b bellard
    }
2706 33417e70 bellard
}
2707 33417e70 bellard
2708 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2709 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2710 ba863458 bellard
{
2711 ba863458 bellard
    PhysPageDesc *p;
2712 ba863458 bellard
2713 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2714 ba863458 bellard
    if (!p)
2715 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2716 ba863458 bellard
    return p->phys_offset;
2717 ba863458 bellard
}
2718 ba863458 bellard
2719 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2720 f65ed4c1 aliguori
{
2721 f65ed4c1 aliguori
    if (kvm_enabled())
2722 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2723 f65ed4c1 aliguori
}
2724 f65ed4c1 aliguori
2725 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2726 f65ed4c1 aliguori
{
2727 f65ed4c1 aliguori
    if (kvm_enabled())
2728 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2729 f65ed4c1 aliguori
}
2730 f65ed4c1 aliguori
2731 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2732 62a2744c Sheng Yang
{
2733 62a2744c Sheng Yang
    if (kvm_enabled())
2734 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2735 62a2744c Sheng Yang
}
2736 62a2744c Sheng Yang
2737 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2738 c902760f Marcelo Tosatti
2739 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2740 c902760f Marcelo Tosatti
2741 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2742 c902760f Marcelo Tosatti
2743 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2744 c902760f Marcelo Tosatti
{
2745 c902760f Marcelo Tosatti
    struct statfs fs;
2746 c902760f Marcelo Tosatti
    int ret;
2747 c902760f Marcelo Tosatti
2748 c902760f Marcelo Tosatti
    do {
2749 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
2750 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2751 c902760f Marcelo Tosatti
2752 c902760f Marcelo Tosatti
    if (ret != 0) {
2753 9742bf26 Yoshiaki Tamura
        perror(path);
2754 9742bf26 Yoshiaki Tamura
        return 0;
2755 c902760f Marcelo Tosatti
    }
2756 c902760f Marcelo Tosatti
2757 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2758 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2759 c902760f Marcelo Tosatti
2760 c902760f Marcelo Tosatti
    return fs.f_bsize;
2761 c902760f Marcelo Tosatti
}
2762 c902760f Marcelo Tosatti
2763 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
2764 04b16653 Alex Williamson
                            ram_addr_t memory,
2765 04b16653 Alex Williamson
                            const char *path)
2766 c902760f Marcelo Tosatti
{
2767 c902760f Marcelo Tosatti
    char *filename;
2768 c902760f Marcelo Tosatti
    void *area;
2769 c902760f Marcelo Tosatti
    int fd;
2770 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2771 c902760f Marcelo Tosatti
    int flags;
2772 c902760f Marcelo Tosatti
#endif
2773 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2774 c902760f Marcelo Tosatti
2775 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2776 c902760f Marcelo Tosatti
    if (!hpagesize) {
2777 9742bf26 Yoshiaki Tamura
        return NULL;
2778 c902760f Marcelo Tosatti
    }
2779 c902760f Marcelo Tosatti
2780 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2781 c902760f Marcelo Tosatti
        return NULL;
2782 c902760f Marcelo Tosatti
    }
2783 c902760f Marcelo Tosatti
2784 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2785 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2786 c902760f Marcelo Tosatti
        return NULL;
2787 c902760f Marcelo Tosatti
    }
2788 c902760f Marcelo Tosatti
2789 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2790 9742bf26 Yoshiaki Tamura
        return NULL;
2791 c902760f Marcelo Tosatti
    }
2792 c902760f Marcelo Tosatti
2793 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2794 c902760f Marcelo Tosatti
    if (fd < 0) {
2795 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
2796 9742bf26 Yoshiaki Tamura
        free(filename);
2797 9742bf26 Yoshiaki Tamura
        return NULL;
2798 c902760f Marcelo Tosatti
    }
2799 c902760f Marcelo Tosatti
    unlink(filename);
2800 c902760f Marcelo Tosatti
    free(filename);
2801 c902760f Marcelo Tosatti
2802 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2803 c902760f Marcelo Tosatti
2804 c902760f Marcelo Tosatti
    /*
2805 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2806 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2807 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2808 c902760f Marcelo Tosatti
     * mmap will fail.
2809 c902760f Marcelo Tosatti
     */
2810 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2811 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
2812 c902760f Marcelo Tosatti
2813 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2814 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2815 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2816 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2817 c902760f Marcelo Tosatti
     */
2818 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2819 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2820 c902760f Marcelo Tosatti
#else
2821 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2822 c902760f Marcelo Tosatti
#endif
2823 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2824 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
2825 9742bf26 Yoshiaki Tamura
        close(fd);
2826 9742bf26 Yoshiaki Tamura
        return (NULL);
2827 c902760f Marcelo Tosatti
    }
2828 04b16653 Alex Williamson
    block->fd = fd;
2829 c902760f Marcelo Tosatti
    return area;
2830 c902760f Marcelo Tosatti
}
2831 c902760f Marcelo Tosatti
#endif
2832 c902760f Marcelo Tosatti
2833 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
2834 d17b5288 Alex Williamson
{
2835 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
2836 09d7ae90 Blue Swirl
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2837 04b16653 Alex Williamson
2838 04b16653 Alex Williamson
    if (QLIST_EMPTY(&ram_list.blocks))
2839 04b16653 Alex Williamson
        return 0;
2840 04b16653 Alex Williamson
2841 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2842 04b16653 Alex Williamson
        ram_addr_t end, next = ULONG_MAX;
2843 04b16653 Alex Williamson
2844 04b16653 Alex Williamson
        end = block->offset + block->length;
2845 04b16653 Alex Williamson
2846 04b16653 Alex Williamson
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2847 04b16653 Alex Williamson
            if (next_block->offset >= end) {
2848 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
2849 04b16653 Alex Williamson
            }
2850 04b16653 Alex Williamson
        }
2851 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
2852 04b16653 Alex Williamson
            offset =  end;
2853 04b16653 Alex Williamson
            mingap = next - end;
2854 04b16653 Alex Williamson
        }
2855 04b16653 Alex Williamson
    }
2856 04b16653 Alex Williamson
    return offset;
2857 04b16653 Alex Williamson
}
2858 04b16653 Alex Williamson
2859 04b16653 Alex Williamson
static ram_addr_t last_ram_offset(void)
2860 04b16653 Alex Williamson
{
2861 d17b5288 Alex Williamson
    RAMBlock *block;
2862 d17b5288 Alex Williamson
    ram_addr_t last = 0;
2863 d17b5288 Alex Williamson
2864 d17b5288 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next)
2865 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
2866 d17b5288 Alex Williamson
2867 d17b5288 Alex Williamson
    return last;
2868 d17b5288 Alex Williamson
}
2869 d17b5288 Alex Williamson
2870 84b89d78 Cam Macdonell
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2871 6977dfe6 Yoshiaki Tamura
                                   ram_addr_t size, void *host)
2872 84b89d78 Cam Macdonell
{
2873 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
2874 84b89d78 Cam Macdonell
2875 84b89d78 Cam Macdonell
    size = TARGET_PAGE_ALIGN(size);
2876 84b89d78 Cam Macdonell
    new_block = qemu_mallocz(sizeof(*new_block));
2877 84b89d78 Cam Macdonell
2878 84b89d78 Cam Macdonell
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2879 84b89d78 Cam Macdonell
        char *id = dev->parent_bus->info->get_dev_path(dev);
2880 84b89d78 Cam Macdonell
        if (id) {
2881 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2882 84b89d78 Cam Macdonell
            qemu_free(id);
2883 84b89d78 Cam Macdonell
        }
2884 84b89d78 Cam Macdonell
    }
2885 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2886 84b89d78 Cam Macdonell
2887 84b89d78 Cam Macdonell
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2888 84b89d78 Cam Macdonell
        if (!strcmp(block->idstr, new_block->idstr)) {
2889 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2890 84b89d78 Cam Macdonell
                    new_block->idstr);
2891 84b89d78 Cam Macdonell
            abort();
2892 84b89d78 Cam Macdonell
        }
2893 84b89d78 Cam Macdonell
    }
2894 84b89d78 Cam Macdonell
2895 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
2896 6977dfe6 Yoshiaki Tamura
    if (host) {
2897 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
2898 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
2899 6977dfe6 Yoshiaki Tamura
    } else {
2900 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
2901 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2902 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2903 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
2904 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
2905 e78815a5 Andreas Fรคrber
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2906 6977dfe6 Yoshiaki Tamura
            }
2907 c902760f Marcelo Tosatti
#else
2908 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
2909 6977dfe6 Yoshiaki Tamura
            exit(1);
2910 c902760f Marcelo Tosatti
#endif
2911 6977dfe6 Yoshiaki Tamura
        } else {
2912 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2913 ff83678a Christian Borntraeger
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2914 ff83678a Christian Borntraeger
               an system defined value, which is at least 256GB. Larger systems
2915 ff83678a Christian Borntraeger
               have larger values. We put the guest between the end of data
2916 ff83678a Christian Borntraeger
               segment (system break) and this value. We use 32GB as a base to
2917 ff83678a Christian Borntraeger
               have enough room for the system break to grow. */
2918 ff83678a Christian Borntraeger
            new_block->host = mmap((void*)0x800000000, size,
2919 6977dfe6 Yoshiaki Tamura
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2920 ff83678a Christian Borntraeger
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2921 fb8b2735 Alexander Graf
            if (new_block->host == MAP_FAILED) {
2922 fb8b2735 Alexander Graf
                fprintf(stderr, "Allocating RAM failed\n");
2923 fb8b2735 Alexander Graf
                abort();
2924 fb8b2735 Alexander Graf
            }
2925 6b02494d Alexander Graf
#else
2926 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
2927 432d268c Jun Nakajima
                xen_ram_alloc(new_block->offset, size);
2928 432d268c Jun Nakajima
            } else {
2929 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
2930 432d268c Jun Nakajima
            }
2931 6b02494d Alexander Graf
#endif
2932 e78815a5 Andreas Fรคrber
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2933 6977dfe6 Yoshiaki Tamura
        }
2934 c902760f Marcelo Tosatti
    }
2935 94a6b54f pbrook
    new_block->length = size;
2936 94a6b54f pbrook
2937 f471a17e Alex Williamson
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2938 94a6b54f pbrook
2939 f471a17e Alex Williamson
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2940 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2941 d17b5288 Alex Williamson
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2942 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2943 94a6b54f pbrook
2944 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2945 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2946 6f0437e8 Jan Kiszka
2947 94a6b54f pbrook
    return new_block->offset;
2948 94a6b54f pbrook
}
2949 e9a1ab19 bellard
2950 6977dfe6 Yoshiaki Tamura
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2951 6977dfe6 Yoshiaki Tamura
{
2952 6977dfe6 Yoshiaki Tamura
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2953 6977dfe6 Yoshiaki Tamura
}
2954 6977dfe6 Yoshiaki Tamura
2955 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
2956 1f2e98b6 Alex Williamson
{
2957 1f2e98b6 Alex Williamson
    RAMBlock *block;
2958 1f2e98b6 Alex Williamson
2959 1f2e98b6 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2960 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
2961 1f2e98b6 Alex Williamson
            QLIST_REMOVE(block, next);
2962 1f2e98b6 Alex Williamson
            qemu_free(block);
2963 1f2e98b6 Alex Williamson
            return;
2964 1f2e98b6 Alex Williamson
        }
2965 1f2e98b6 Alex Williamson
    }
2966 1f2e98b6 Alex Williamson
}
2967 1f2e98b6 Alex Williamson
2968 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2969 e9a1ab19 bellard
{
2970 04b16653 Alex Williamson
    RAMBlock *block;
2971 04b16653 Alex Williamson
2972 04b16653 Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 04b16653 Alex Williamson
        if (addr == block->offset) {
2974 04b16653 Alex Williamson
            QLIST_REMOVE(block, next);
2975 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
2976 cd19cfa2 Huang Ying
                ;
2977 cd19cfa2 Huang Ying
            } else if (mem_path) {
2978 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
2979 04b16653 Alex Williamson
                if (block->fd) {
2980 04b16653 Alex Williamson
                    munmap(block->host, block->length);
2981 04b16653 Alex Williamson
                    close(block->fd);
2982 04b16653 Alex Williamson
                } else {
2983 04b16653 Alex Williamson
                    qemu_vfree(block->host);
2984 04b16653 Alex Williamson
                }
2985 fd28aa13 Jan Kiszka
#else
2986 fd28aa13 Jan Kiszka
                abort();
2987 04b16653 Alex Williamson
#endif
2988 04b16653 Alex Williamson
            } else {
2989 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2990 04b16653 Alex Williamson
                munmap(block->host, block->length);
2991 04b16653 Alex Williamson
#else
2992 432d268c Jun Nakajima
                if (xen_mapcache_enabled()) {
2993 432d268c Jun Nakajima
                    qemu_invalidate_entry(block->host);
2994 432d268c Jun Nakajima
                } else {
2995 432d268c Jun Nakajima
                    qemu_vfree(block->host);
2996 432d268c Jun Nakajima
                }
2997 04b16653 Alex Williamson
#endif
2998 04b16653 Alex Williamson
            }
2999 04b16653 Alex Williamson
            qemu_free(block);
3000 04b16653 Alex Williamson
            return;
3001 04b16653 Alex Williamson
        }
3002 04b16653 Alex Williamson
    }
3003 04b16653 Alex Williamson
3004 e9a1ab19 bellard
}
3005 e9a1ab19 bellard
3006 cd19cfa2 Huang Ying
#ifndef _WIN32
3007 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3008 cd19cfa2 Huang Ying
{
3009 cd19cfa2 Huang Ying
    RAMBlock *block;
3010 cd19cfa2 Huang Ying
    ram_addr_t offset;
3011 cd19cfa2 Huang Ying
    int flags;
3012 cd19cfa2 Huang Ying
    void *area, *vaddr;
3013 cd19cfa2 Huang Ying
3014 cd19cfa2 Huang Ying
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3015 cd19cfa2 Huang Ying
        offset = addr - block->offset;
3016 cd19cfa2 Huang Ying
        if (offset < block->length) {
3017 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
3018 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
3019 cd19cfa2 Huang Ying
                ;
3020 cd19cfa2 Huang Ying
            } else {
3021 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
3022 cd19cfa2 Huang Ying
                munmap(vaddr, length);
3023 cd19cfa2 Huang Ying
                if (mem_path) {
3024 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
3025 cd19cfa2 Huang Ying
                    if (block->fd) {
3026 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
3027 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3028 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
3029 cd19cfa2 Huang Ying
#else
3030 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
3031 cd19cfa2 Huang Ying
#endif
3032 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3033 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
3034 cd19cfa2 Huang Ying
                    } else {
3035 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3036 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3037 cd19cfa2 Huang Ying
                                    flags, -1, 0);
3038 cd19cfa2 Huang Ying
                    }
3039 fd28aa13 Jan Kiszka
#else
3040 fd28aa13 Jan Kiszka
                    abort();
3041 cd19cfa2 Huang Ying
#endif
3042 cd19cfa2 Huang Ying
                } else {
3043 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3044 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
3045 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3046 cd19cfa2 Huang Ying
                                flags, -1, 0);
3047 cd19cfa2 Huang Ying
#else
3048 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3049 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3050 cd19cfa2 Huang Ying
                                flags, -1, 0);
3051 cd19cfa2 Huang Ying
#endif
3052 cd19cfa2 Huang Ying
                }
3053 cd19cfa2 Huang Ying
                if (area != vaddr) {
3054 cd19cfa2 Huang Ying
                    fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3055 cd19cfa2 Huang Ying
                            length, addr);
3056 cd19cfa2 Huang Ying
                    exit(1);
3057 cd19cfa2 Huang Ying
                }
3058 cd19cfa2 Huang Ying
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3059 cd19cfa2 Huang Ying
            }
3060 cd19cfa2 Huang Ying
            return;
3061 cd19cfa2 Huang Ying
        }
3062 cd19cfa2 Huang Ying
    }
3063 cd19cfa2 Huang Ying
}
3064 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
3065 cd19cfa2 Huang Ying
3066 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3067 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
3068 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
3069 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
3070 5579c7f3 pbrook

3071 5579c7f3 pbrook
   It should not be used for general purpose DMA.
3072 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3073 5579c7f3 pbrook
 */
3074 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
3075 dc828ca1 pbrook
{
3076 94a6b54f pbrook
    RAMBlock *block;
3077 94a6b54f pbrook
3078 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3079 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
3080 7d82af38 Vincent Palatin
            /* Move this entry to to start of the list.  */
3081 7d82af38 Vincent Palatin
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3082 7d82af38 Vincent Palatin
                QLIST_REMOVE(block, next);
3083 7d82af38 Vincent Palatin
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3084 7d82af38 Vincent Palatin
            }
3085 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3086 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3087 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3088 432d268c Jun Nakajima
                 */
3089 432d268c Jun Nakajima
                if (block->offset == 0) {
3090 432d268c Jun Nakajima
                    return qemu_map_cache(addr, 0, 1);
3091 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3092 432d268c Jun Nakajima
                    block->host = xen_map_block(block->offset, block->length);
3093 432d268c Jun Nakajima
                }
3094 432d268c Jun Nakajima
            }
3095 f471a17e Alex Williamson
            return block->host + (addr - block->offset);
3096 f471a17e Alex Williamson
        }
3097 94a6b54f pbrook
    }
3098 f471a17e Alex Williamson
3099 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3100 f471a17e Alex Williamson
    abort();
3101 f471a17e Alex Williamson
3102 f471a17e Alex Williamson
    return NULL;
3103 dc828ca1 pbrook
}
3104 dc828ca1 pbrook
3105 b2e0a138 Michael S. Tsirkin
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3106 b2e0a138 Michael S. Tsirkin
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3107 b2e0a138 Michael S. Tsirkin
 */
3108 b2e0a138 Michael S. Tsirkin
void *qemu_safe_ram_ptr(ram_addr_t addr)
3109 b2e0a138 Michael S. Tsirkin
{
3110 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
3111 b2e0a138 Michael S. Tsirkin
3112 b2e0a138 Michael S. Tsirkin
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3113 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
3114 432d268c Jun Nakajima
            if (xen_mapcache_enabled()) {
3115 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
3116 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
3117 432d268c Jun Nakajima
                 */
3118 432d268c Jun Nakajima
                if (block->offset == 0) {
3119 432d268c Jun Nakajima
                    return qemu_map_cache(addr, 0, 1);
3120 432d268c Jun Nakajima
                } else if (block->host == NULL) {
3121 432d268c Jun Nakajima
                    block->host = xen_map_block(block->offset, block->length);
3122 432d268c Jun Nakajima
                }
3123 432d268c Jun Nakajima
            }
3124 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
3125 b2e0a138 Michael S. Tsirkin
        }
3126 b2e0a138 Michael S. Tsirkin
    }
3127 b2e0a138 Michael S. Tsirkin
3128 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3129 b2e0a138 Michael S. Tsirkin
    abort();
3130 b2e0a138 Michael S. Tsirkin
3131 b2e0a138 Michael S. Tsirkin
    return NULL;
3132 b2e0a138 Michael S. Tsirkin
}
3133 b2e0a138 Michael S. Tsirkin
3134 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
3135 050a0ddf Anthony PERARD
{
3136 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
3137 050a0ddf Anthony PERARD
3138 050a0ddf Anthony PERARD
    if (xen_mapcache_enabled()) {
3139 050a0ddf Anthony PERARD
        RAMBlock *block;
3140 050a0ddf Anthony PERARD
3141 050a0ddf Anthony PERARD
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3142 050a0ddf Anthony PERARD
            if (addr == block->host) {
3143 050a0ddf Anthony PERARD
                break;
3144 050a0ddf Anthony PERARD
            }
3145 050a0ddf Anthony PERARD
        }
3146 050a0ddf Anthony PERARD
        if (block && block->host) {
3147 050a0ddf Anthony PERARD
            xen_unmap_block(block->host, block->length);
3148 050a0ddf Anthony PERARD
            block->host = NULL;
3149 050a0ddf Anthony PERARD
        } else {
3150 050a0ddf Anthony PERARD
            qemu_map_cache_unlock(addr);
3151 050a0ddf Anthony PERARD
        }
3152 050a0ddf Anthony PERARD
    }
3153 050a0ddf Anthony PERARD
}
3154 050a0ddf Anthony PERARD
3155 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3156 5579c7f3 pbrook
{
3157 94a6b54f pbrook
    RAMBlock *block;
3158 94a6b54f pbrook
    uint8_t *host = ptr;
3159 94a6b54f pbrook
3160 f471a17e Alex Williamson
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3161 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
3162 432d268c Jun Nakajima
        if (block->host == NULL) {
3163 432d268c Jun Nakajima
            continue;
3164 432d268c Jun Nakajima
        }
3165 f471a17e Alex Williamson
        if (host - block->host < block->length) {
3166 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
3167 e890261f Marcelo Tosatti
            return 0;
3168 f471a17e Alex Williamson
        }
3169 94a6b54f pbrook
    }
3170 432d268c Jun Nakajima
3171 432d268c Jun Nakajima
    if (xen_mapcache_enabled()) {
3172 432d268c Jun Nakajima
        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3173 432d268c Jun Nakajima
        return 0;
3174 432d268c Jun Nakajima
    }
3175 432d268c Jun Nakajima
3176 e890261f Marcelo Tosatti
    return -1;
3177 e890261f Marcelo Tosatti
}
3178 f471a17e Alex Williamson
3179 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
3180 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
3181 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3182 e890261f Marcelo Tosatti
{
3183 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
3184 f471a17e Alex Williamson
3185 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3186 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3187 e890261f Marcelo Tosatti
        abort();
3188 e890261f Marcelo Tosatti
    }
3189 e890261f Marcelo Tosatti
    return ram_addr;
3190 5579c7f3 pbrook
}
3191 5579c7f3 pbrook
3192 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3193 33417e70 bellard
{
3194 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3195 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3196 67d3b957 pbrook
#endif
3197 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3198 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
3199 e18231a3 blueswir1
#endif
3200 e18231a3 blueswir1
    return 0;
3201 e18231a3 blueswir1
}
3202 e18231a3 blueswir1
3203 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3204 e18231a3 blueswir1
{
3205 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3206 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3207 e18231a3 blueswir1
#endif
3208 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3209 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
3210 e18231a3 blueswir1
#endif
3211 e18231a3 blueswir1
    return 0;
3212 e18231a3 blueswir1
}
3213 e18231a3 blueswir1
3214 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3215 e18231a3 blueswir1
{
3216 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3217 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3218 e18231a3 blueswir1
#endif
3219 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3220 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
3221 b4f0a316 blueswir1
#endif
3222 33417e70 bellard
    return 0;
3223 33417e70 bellard
}
3224 33417e70 bellard
3225 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3226 33417e70 bellard
{
3227 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
3228 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3229 67d3b957 pbrook
#endif
3230 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3231 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
3232 e18231a3 blueswir1
#endif
3233 e18231a3 blueswir1
}
3234 e18231a3 blueswir1
3235 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3236 e18231a3 blueswir1
{
3237 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3238 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3239 e18231a3 blueswir1
#endif
3240 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3241 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
3242 e18231a3 blueswir1
#endif
3243 e18231a3 blueswir1
}
3244 e18231a3 blueswir1
3245 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3246 e18231a3 blueswir1
{
3247 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
3248 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3249 e18231a3 blueswir1
#endif
3250 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3251 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
3252 b4f0a316 blueswir1
#endif
3253 33417e70 bellard
}
3254 33417e70 bellard
3255 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3256 33417e70 bellard
    unassigned_mem_readb,
3257 e18231a3 blueswir1
    unassigned_mem_readw,
3258 e18231a3 blueswir1
    unassigned_mem_readl,
3259 33417e70 bellard
};
3260 33417e70 bellard
3261 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3262 33417e70 bellard
    unassigned_mem_writeb,
3263 e18231a3 blueswir1
    unassigned_mem_writew,
3264 e18231a3 blueswir1
    unassigned_mem_writel,
3265 33417e70 bellard
};
3266 33417e70 bellard
3267 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3268 0f459d16 pbrook
                                uint32_t val)
3269 9fa3e853 bellard
{
3270 3a7d929e bellard
    int dirty_flags;
3271 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3272 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3273 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3274 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
3275 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3276 9fa3e853 bellard
#endif
3277 3a7d929e bellard
    }
3278 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3279 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3280 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3281 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3282 f23db169 bellard
       flushed */
3283 f23db169 bellard
    if (dirty_flags == 0xff)
3284 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3285 9fa3e853 bellard
}
3286 9fa3e853 bellard
3287 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3288 0f459d16 pbrook
                                uint32_t val)
3289 9fa3e853 bellard
{
3290 3a7d929e bellard
    int dirty_flags;
3291 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3292 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3293 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3294 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3295 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3296 9fa3e853 bellard
#endif
3297 3a7d929e bellard
    }
3298 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3299 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3300 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3301 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3302 f23db169 bellard
       flushed */
3303 f23db169 bellard
    if (dirty_flags == 0xff)
3304 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3305 9fa3e853 bellard
}
3306 9fa3e853 bellard
3307 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3308 0f459d16 pbrook
                                uint32_t val)
3309 9fa3e853 bellard
{
3310 3a7d929e bellard
    int dirty_flags;
3311 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3312 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3313 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3314 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3315 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3316 9fa3e853 bellard
#endif
3317 3a7d929e bellard
    }
3318 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3319 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3320 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3321 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3322 f23db169 bellard
       flushed */
3323 f23db169 bellard
    if (dirty_flags == 0xff)
3324 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3325 9fa3e853 bellard
}
3326 9fa3e853 bellard
3327 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3328 9fa3e853 bellard
    NULL, /* never used */
3329 9fa3e853 bellard
    NULL, /* never used */
3330 9fa3e853 bellard
    NULL, /* never used */
3331 9fa3e853 bellard
};
3332 9fa3e853 bellard
3333 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3334 1ccde1cb bellard
    notdirty_mem_writeb,
3335 1ccde1cb bellard
    notdirty_mem_writew,
3336 1ccde1cb bellard
    notdirty_mem_writel,
3337 1ccde1cb bellard
};
3338 1ccde1cb bellard
3339 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3340 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3341 0f459d16 pbrook
{
3342 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3343 06d55cc1 aliguori
    target_ulong pc, cs_base;
3344 06d55cc1 aliguori
    TranslationBlock *tb;
3345 0f459d16 pbrook
    target_ulong vaddr;
3346 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3347 06d55cc1 aliguori
    int cpu_flags;
3348 0f459d16 pbrook
3349 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3350 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3351 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3352 06d55cc1 aliguori
         * current instruction. */
3353 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3354 06d55cc1 aliguori
        return;
3355 06d55cc1 aliguori
    }
3356 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3357 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3358 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3359 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3360 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3361 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3362 6e140f28 aliguori
                env->watchpoint_hit = wp;
3363 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3364 6e140f28 aliguori
                if (!tb) {
3365 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3366 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3367 6e140f28 aliguori
                }
3368 618ba8e6 Stefan Weil
                cpu_restore_state(tb, env, env->mem_io_pc);
3369 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3370 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3371 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3372 6e140f28 aliguori
                } else {
3373 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3374 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3375 6e140f28 aliguori
                }
3376 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3377 06d55cc1 aliguori
            }
3378 6e140f28 aliguori
        } else {
3379 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3380 0f459d16 pbrook
        }
3381 0f459d16 pbrook
    }
3382 0f459d16 pbrook
}
3383 0f459d16 pbrook
3384 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3385 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3386 6658ffb8 pbrook
   phys routines.  */
3387 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3388 6658ffb8 pbrook
{
3389 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3390 6658ffb8 pbrook
    return ldub_phys(addr);
3391 6658ffb8 pbrook
}
3392 6658ffb8 pbrook
3393 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3394 6658ffb8 pbrook
{
3395 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3396 6658ffb8 pbrook
    return lduw_phys(addr);
3397 6658ffb8 pbrook
}
3398 6658ffb8 pbrook
3399 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3400 6658ffb8 pbrook
{
3401 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3402 6658ffb8 pbrook
    return ldl_phys(addr);
3403 6658ffb8 pbrook
}
3404 6658ffb8 pbrook
3405 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3406 6658ffb8 pbrook
                             uint32_t val)
3407 6658ffb8 pbrook
{
3408 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3409 6658ffb8 pbrook
    stb_phys(addr, val);
3410 6658ffb8 pbrook
}
3411 6658ffb8 pbrook
3412 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3413 6658ffb8 pbrook
                             uint32_t val)
3414 6658ffb8 pbrook
{
3415 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3416 6658ffb8 pbrook
    stw_phys(addr, val);
3417 6658ffb8 pbrook
}
3418 6658ffb8 pbrook
3419 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3420 6658ffb8 pbrook
                             uint32_t val)
3421 6658ffb8 pbrook
{
3422 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3423 6658ffb8 pbrook
    stl_phys(addr, val);
3424 6658ffb8 pbrook
}
3425 6658ffb8 pbrook
3426 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3427 6658ffb8 pbrook
    watch_mem_readb,
3428 6658ffb8 pbrook
    watch_mem_readw,
3429 6658ffb8 pbrook
    watch_mem_readl,
3430 6658ffb8 pbrook
};
3431 6658ffb8 pbrook
3432 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3433 6658ffb8 pbrook
    watch_mem_writeb,
3434 6658ffb8 pbrook
    watch_mem_writew,
3435 6658ffb8 pbrook
    watch_mem_writel,
3436 6658ffb8 pbrook
};
3437 6658ffb8 pbrook
3438 f6405247 Richard Henderson
static inline uint32_t subpage_readlen (subpage_t *mmio,
3439 f6405247 Richard Henderson
                                        target_phys_addr_t addr,
3440 f6405247 Richard Henderson
                                        unsigned int len)
3441 db7b5426 blueswir1
{
3442 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3443 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3444 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3445 db7b5426 blueswir1
           mmio, len, addr, idx);
3446 db7b5426 blueswir1
#endif
3447 db7b5426 blueswir1
3448 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3449 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3450 f6405247 Richard Henderson
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3451 db7b5426 blueswir1
}
3452 db7b5426 blueswir1
3453 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3454 f6405247 Richard Henderson
                                     uint32_t value, unsigned int len)
3455 db7b5426 blueswir1
{
3456 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
3457 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3458 f6405247 Richard Henderson
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3459 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
3460 db7b5426 blueswir1
#endif
3461 f6405247 Richard Henderson
3462 f6405247 Richard Henderson
    addr += mmio->region_offset[idx];
3463 f6405247 Richard Henderson
    idx = mmio->sub_io_index[idx];
3464 f6405247 Richard Henderson
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3465 db7b5426 blueswir1
}
3466 db7b5426 blueswir1
3467 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3468 db7b5426 blueswir1
{
3469 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3470 db7b5426 blueswir1
}
3471 db7b5426 blueswir1
3472 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3473 db7b5426 blueswir1
                            uint32_t value)
3474 db7b5426 blueswir1
{
3475 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3476 db7b5426 blueswir1
}
3477 db7b5426 blueswir1
3478 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3479 db7b5426 blueswir1
{
3480 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3481 db7b5426 blueswir1
}
3482 db7b5426 blueswir1
3483 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3484 db7b5426 blueswir1
                            uint32_t value)
3485 db7b5426 blueswir1
{
3486 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3487 db7b5426 blueswir1
}
3488 db7b5426 blueswir1
3489 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3490 db7b5426 blueswir1
{
3491 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3492 db7b5426 blueswir1
}
3493 db7b5426 blueswir1
3494 f6405247 Richard Henderson
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3495 f6405247 Richard Henderson
                            uint32_t value)
3496 db7b5426 blueswir1
{
3497 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3498 db7b5426 blueswir1
}
3499 db7b5426 blueswir1
3500 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3501 db7b5426 blueswir1
    &subpage_readb,
3502 db7b5426 blueswir1
    &subpage_readw,
3503 db7b5426 blueswir1
    &subpage_readl,
3504 db7b5426 blueswir1
};
3505 db7b5426 blueswir1
3506 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3507 db7b5426 blueswir1
    &subpage_writeb,
3508 db7b5426 blueswir1
    &subpage_writew,
3509 db7b5426 blueswir1
    &subpage_writel,
3510 db7b5426 blueswir1
};
3511 db7b5426 blueswir1
3512 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3513 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3514 db7b5426 blueswir1
{
3515 db7b5426 blueswir1
    int idx, eidx;
3516 db7b5426 blueswir1
3517 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3518 db7b5426 blueswir1
        return -1;
3519 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3520 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3521 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3522 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3523 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3524 db7b5426 blueswir1
#endif
3525 95c318f5 Gleb Natapov
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3526 95c318f5 Gleb Natapov
        memory = IO_MEM_UNASSIGNED;
3527 f6405247 Richard Henderson
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3528 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3529 f6405247 Richard Henderson
        mmio->sub_io_index[idx] = memory;
3530 f6405247 Richard Henderson
        mmio->region_offset[idx] = region_offset;
3531 db7b5426 blueswir1
    }
3532 db7b5426 blueswir1
3533 db7b5426 blueswir1
    return 0;
3534 db7b5426 blueswir1
}
3535 db7b5426 blueswir1
3536 f6405247 Richard Henderson
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3537 f6405247 Richard Henderson
                                ram_addr_t orig_memory,
3538 f6405247 Richard Henderson
                                ram_addr_t region_offset)
3539 db7b5426 blueswir1
{
3540 c227f099 Anthony Liguori
    subpage_t *mmio;
3541 db7b5426 blueswir1
    int subpage_memory;
3542 db7b5426 blueswir1
3543 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3544 1eec614b aliguori
3545 1eec614b aliguori
    mmio->base = base;
3546 2507c12a Alexander Graf
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3547 2507c12a Alexander Graf
                                            DEVICE_NATIVE_ENDIAN);
3548 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3549 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3550 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3551 db7b5426 blueswir1
#endif
3552 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3553 f6405247 Richard Henderson
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3554 db7b5426 blueswir1
3555 db7b5426 blueswir1
    return mmio;
3556 db7b5426 blueswir1
}
3557 db7b5426 blueswir1
3558 88715657 aliguori
static int get_free_io_mem_idx(void)
3559 88715657 aliguori
{
3560 88715657 aliguori
    int i;
3561 88715657 aliguori
3562 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3563 88715657 aliguori
        if (!io_mem_used[i]) {
3564 88715657 aliguori
            io_mem_used[i] = 1;
3565 88715657 aliguori
            return i;
3566 88715657 aliguori
        }
3567 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3568 88715657 aliguori
    return -1;
3569 88715657 aliguori
}
3570 88715657 aliguori
3571 dd310534 Alexander Graf
/*
3572 dd310534 Alexander Graf
 * Usually, devices operate in little endian mode. There are devices out
3573 dd310534 Alexander Graf
 * there that operate in big endian too. Each device gets byte swapped
3574 dd310534 Alexander Graf
 * mmio if plugged onto a CPU that does the other endianness.
3575 dd310534 Alexander Graf
 *
3576 dd310534 Alexander Graf
 * CPU          Device           swap?
3577 dd310534 Alexander Graf
 *
3578 dd310534 Alexander Graf
 * little       little           no
3579 dd310534 Alexander Graf
 * little       big              yes
3580 dd310534 Alexander Graf
 * big          little           yes
3581 dd310534 Alexander Graf
 * big          big              no
3582 dd310534 Alexander Graf
 */
3583 dd310534 Alexander Graf
3584 dd310534 Alexander Graf
typedef struct SwapEndianContainer {
3585 dd310534 Alexander Graf
    CPUReadMemoryFunc *read[3];
3586 dd310534 Alexander Graf
    CPUWriteMemoryFunc *write[3];
3587 dd310534 Alexander Graf
    void *opaque;
3588 dd310534 Alexander Graf
} SwapEndianContainer;
3589 dd310534 Alexander Graf
3590 dd310534 Alexander Graf
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3591 dd310534 Alexander Graf
{
3592 dd310534 Alexander Graf
    uint32_t val;
3593 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3594 dd310534 Alexander Graf
    val = c->read[0](c->opaque, addr);
3595 dd310534 Alexander Graf
    return val;
3596 dd310534 Alexander Graf
}
3597 dd310534 Alexander Graf
3598 dd310534 Alexander Graf
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3599 dd310534 Alexander Graf
{
3600 dd310534 Alexander Graf
    uint32_t val;
3601 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3602 dd310534 Alexander Graf
    val = bswap16(c->read[1](c->opaque, addr));
3603 dd310534 Alexander Graf
    return val;
3604 dd310534 Alexander Graf
}
3605 dd310534 Alexander Graf
3606 dd310534 Alexander Graf
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3607 dd310534 Alexander Graf
{
3608 dd310534 Alexander Graf
    uint32_t val;
3609 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3610 dd310534 Alexander Graf
    val = bswap32(c->read[2](c->opaque, addr));
3611 dd310534 Alexander Graf
    return val;
3612 dd310534 Alexander Graf
}
3613 dd310534 Alexander Graf
3614 dd310534 Alexander Graf
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3615 dd310534 Alexander Graf
    swapendian_mem_readb,
3616 dd310534 Alexander Graf
    swapendian_mem_readw,
3617 dd310534 Alexander Graf
    swapendian_mem_readl
3618 dd310534 Alexander Graf
};
3619 dd310534 Alexander Graf
3620 dd310534 Alexander Graf
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3621 dd310534 Alexander Graf
                                  uint32_t val)
3622 dd310534 Alexander Graf
{
3623 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3624 dd310534 Alexander Graf
    c->write[0](c->opaque, addr, val);
3625 dd310534 Alexander Graf
}
3626 dd310534 Alexander Graf
3627 dd310534 Alexander Graf
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3628 dd310534 Alexander Graf
                                  uint32_t val)
3629 dd310534 Alexander Graf
{
3630 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3631 dd310534 Alexander Graf
    c->write[1](c->opaque, addr, bswap16(val));
3632 dd310534 Alexander Graf
}
3633 dd310534 Alexander Graf
3634 dd310534 Alexander Graf
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3635 dd310534 Alexander Graf
                                  uint32_t val)
3636 dd310534 Alexander Graf
{
3637 dd310534 Alexander Graf
    SwapEndianContainer *c = opaque;
3638 dd310534 Alexander Graf
    c->write[2](c->opaque, addr, bswap32(val));
3639 dd310534 Alexander Graf
}
3640 dd310534 Alexander Graf
3641 dd310534 Alexander Graf
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3642 dd310534 Alexander Graf
    swapendian_mem_writeb,
3643 dd310534 Alexander Graf
    swapendian_mem_writew,
3644 dd310534 Alexander Graf
    swapendian_mem_writel
3645 dd310534 Alexander Graf
};
3646 dd310534 Alexander Graf
3647 dd310534 Alexander Graf
static void swapendian_init(int io_index)
3648 dd310534 Alexander Graf
{
3649 dd310534 Alexander Graf
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3650 dd310534 Alexander Graf
    int i;
3651 dd310534 Alexander Graf
3652 dd310534 Alexander Graf
    /* Swap mmio for big endian targets */
3653 dd310534 Alexander Graf
    c->opaque = io_mem_opaque[io_index];
3654 dd310534 Alexander Graf
    for (i = 0; i < 3; i++) {
3655 dd310534 Alexander Graf
        c->read[i] = io_mem_read[io_index][i];
3656 dd310534 Alexander Graf
        c->write[i] = io_mem_write[io_index][i];
3657 dd310534 Alexander Graf
3658 dd310534 Alexander Graf
        io_mem_read[io_index][i] = swapendian_readfn[i];
3659 dd310534 Alexander Graf
        io_mem_write[io_index][i] = swapendian_writefn[i];
3660 dd310534 Alexander Graf
    }
3661 dd310534 Alexander Graf
    io_mem_opaque[io_index] = c;
3662 dd310534 Alexander Graf
}
3663 dd310534 Alexander Graf
3664 dd310534 Alexander Graf
static void swapendian_del(int io_index)
3665 dd310534 Alexander Graf
{
3666 dd310534 Alexander Graf
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3667 dd310534 Alexander Graf
        qemu_free(io_mem_opaque[io_index]);
3668 dd310534 Alexander Graf
    }
3669 dd310534 Alexander Graf
}
3670 dd310534 Alexander Graf
3671 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3672 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3673 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3674 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3675 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3676 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3677 4254fab8 blueswir1
   returned if error. */
3678 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3679 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3680 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3681 dd310534 Alexander Graf
                                        void *opaque, enum device_endian endian)
3682 33417e70 bellard
{
3683 3cab721d Richard Henderson
    int i;
3684 3cab721d Richard Henderson
3685 33417e70 bellard
    if (io_index <= 0) {
3686 88715657 aliguori
        io_index = get_free_io_mem_idx();
3687 88715657 aliguori
        if (io_index == -1)
3688 88715657 aliguori
            return io_index;
3689 33417e70 bellard
    } else {
3690 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3691 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3692 33417e70 bellard
            return -1;
3693 33417e70 bellard
    }
3694 b5ff1b31 bellard
3695 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3696 3cab721d Richard Henderson
        io_mem_read[io_index][i]
3697 3cab721d Richard Henderson
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3698 3cab721d Richard Henderson
    }
3699 3cab721d Richard Henderson
    for (i = 0; i < 3; ++i) {
3700 3cab721d Richard Henderson
        io_mem_write[io_index][i]
3701 3cab721d Richard Henderson
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3702 3cab721d Richard Henderson
    }
3703 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3704 f6405247 Richard Henderson
3705 dd310534 Alexander Graf
    switch (endian) {
3706 dd310534 Alexander Graf
    case DEVICE_BIG_ENDIAN:
3707 dd310534 Alexander Graf
#ifndef TARGET_WORDS_BIGENDIAN
3708 dd310534 Alexander Graf
        swapendian_init(io_index);
3709 dd310534 Alexander Graf
#endif
3710 dd310534 Alexander Graf
        break;
3711 dd310534 Alexander Graf
    case DEVICE_LITTLE_ENDIAN:
3712 dd310534 Alexander Graf
#ifdef TARGET_WORDS_BIGENDIAN
3713 dd310534 Alexander Graf
        swapendian_init(io_index);
3714 dd310534 Alexander Graf
#endif
3715 dd310534 Alexander Graf
        break;
3716 dd310534 Alexander Graf
    case DEVICE_NATIVE_ENDIAN:
3717 dd310534 Alexander Graf
    default:
3718 dd310534 Alexander Graf
        break;
3719 dd310534 Alexander Graf
    }
3720 dd310534 Alexander Graf
3721 f6405247 Richard Henderson
    return (io_index << IO_MEM_SHIFT);
3722 33417e70 bellard
}
3723 61382a50 bellard
3724 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3725 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3726 dd310534 Alexander Graf
                           void *opaque, enum device_endian endian)
3727 1eed09cb Avi Kivity
{
3728 2507c12a Alexander Graf
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3729 1eed09cb Avi Kivity
}
3730 1eed09cb Avi Kivity
3731 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3732 88715657 aliguori
{
3733 88715657 aliguori
    int i;
3734 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3735 88715657 aliguori
3736 dd310534 Alexander Graf
    swapendian_del(io_index);
3737 dd310534 Alexander Graf
3738 88715657 aliguori
    for (i=0;i < 3; i++) {
3739 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3740 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3741 88715657 aliguori
    }
3742 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3743 88715657 aliguori
    io_mem_used[io_index] = 0;
3744 88715657 aliguori
}
3745 88715657 aliguori
3746 e9179ce1 Avi Kivity
static void io_mem_init(void)
3747 e9179ce1 Avi Kivity
{
3748 e9179ce1 Avi Kivity
    int i;
3749 e9179ce1 Avi Kivity
3750 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3751 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3752 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3753 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3754 2507c12a Alexander Graf
                                 unassigned_mem_write, NULL,
3755 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3756 2507c12a Alexander Graf
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3757 2507c12a Alexander Graf
                                 notdirty_mem_write, NULL,
3758 2507c12a Alexander Graf
                                 DEVICE_NATIVE_ENDIAN);
3759 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3760 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3761 e9179ce1 Avi Kivity
3762 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3763 2507c12a Alexander Graf
                                          watch_mem_write, NULL,
3764 2507c12a Alexander Graf
                                          DEVICE_NATIVE_ENDIAN);
3765 e9179ce1 Avi Kivity
}
3766 e9179ce1 Avi Kivity
3767 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3768 e2eef170 pbrook
3769 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3770 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3771 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3772 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3773 13eb76e0 bellard
{
3774 13eb76e0 bellard
    int l, flags;
3775 13eb76e0 bellard
    target_ulong page;
3776 53a5960a pbrook
    void * p;
3777 13eb76e0 bellard
3778 13eb76e0 bellard
    while (len > 0) {
3779 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3780 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3781 13eb76e0 bellard
        if (l > len)
3782 13eb76e0 bellard
            l = len;
3783 13eb76e0 bellard
        flags = page_get_flags(page);
3784 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3785 a68fe89c Paul Brook
            return -1;
3786 13eb76e0 bellard
        if (is_write) {
3787 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3788 a68fe89c Paul Brook
                return -1;
3789 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3790 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3791 a68fe89c Paul Brook
                return -1;
3792 72fb7daa aurel32
            memcpy(p, buf, l);
3793 72fb7daa aurel32
            unlock_user(p, addr, l);
3794 13eb76e0 bellard
        } else {
3795 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3796 a68fe89c Paul Brook
                return -1;
3797 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3798 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3799 a68fe89c Paul Brook
                return -1;
3800 72fb7daa aurel32
            memcpy(buf, p, l);
3801 5b257578 aurel32
            unlock_user(p, addr, 0);
3802 13eb76e0 bellard
        }
3803 13eb76e0 bellard
        len -= l;
3804 13eb76e0 bellard
        buf += l;
3805 13eb76e0 bellard
        addr += l;
3806 13eb76e0 bellard
    }
3807 a68fe89c Paul Brook
    return 0;
3808 13eb76e0 bellard
}
3809 8df1cd07 bellard
3810 13eb76e0 bellard
#else
3811 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3812 13eb76e0 bellard
                            int len, int is_write)
3813 13eb76e0 bellard
{
3814 13eb76e0 bellard
    int l, io_index;
3815 13eb76e0 bellard
    uint8_t *ptr;
3816 13eb76e0 bellard
    uint32_t val;
3817 c227f099 Anthony Liguori
    target_phys_addr_t page;
3818 2e12669a bellard
    unsigned long pd;
3819 92e873b9 bellard
    PhysPageDesc *p;
3820 3b46e624 ths
3821 13eb76e0 bellard
    while (len > 0) {
3822 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3823 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3824 13eb76e0 bellard
        if (l > len)
3825 13eb76e0 bellard
            l = len;
3826 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3827 13eb76e0 bellard
        if (!p) {
3828 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3829 13eb76e0 bellard
        } else {
3830 13eb76e0 bellard
            pd = p->phys_offset;
3831 13eb76e0 bellard
        }
3832 3b46e624 ths
3833 13eb76e0 bellard
        if (is_write) {
3834 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3835 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3836 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3837 8da3ff18 pbrook
                if (p)
3838 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3839 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3840 6a00d601 bellard
                   potential bugs */
3841 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3842 1c213d19 bellard
                    /* 32 bit write access */
3843 c27004ec bellard
                    val = ldl_p(buf);
3844 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3845 13eb76e0 bellard
                    l = 4;
3846 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3847 1c213d19 bellard
                    /* 16 bit write access */
3848 c27004ec bellard
                    val = lduw_p(buf);
3849 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3850 13eb76e0 bellard
                    l = 2;
3851 13eb76e0 bellard
                } else {
3852 1c213d19 bellard
                    /* 8 bit write access */
3853 c27004ec bellard
                    val = ldub_p(buf);
3854 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3855 13eb76e0 bellard
                    l = 1;
3856 13eb76e0 bellard
                }
3857 13eb76e0 bellard
            } else {
3858 b448f2f3 bellard
                unsigned long addr1;
3859 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3860 13eb76e0 bellard
                /* RAM case */
3861 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3862 13eb76e0 bellard
                memcpy(ptr, buf, l);
3863 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3864 3a7d929e bellard
                    /* invalidate code */
3865 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3866 3a7d929e bellard
                    /* set dirty bit */
3867 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3868 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3869 3a7d929e bellard
                }
3870 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3871 13eb76e0 bellard
            }
3872 13eb76e0 bellard
        } else {
3873 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3874 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3875 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3876 13eb76e0 bellard
                /* I/O case */
3877 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3878 8da3ff18 pbrook
                if (p)
3879 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3880 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3881 13eb76e0 bellard
                    /* 32 bit read access */
3882 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3883 c27004ec bellard
                    stl_p(buf, val);
3884 13eb76e0 bellard
                    l = 4;
3885 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3886 13eb76e0 bellard
                    /* 16 bit read access */
3887 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3888 c27004ec bellard
                    stw_p(buf, val);
3889 13eb76e0 bellard
                    l = 2;
3890 13eb76e0 bellard
                } else {
3891 1c213d19 bellard
                    /* 8 bit read access */
3892 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3893 c27004ec bellard
                    stb_p(buf, val);
3894 13eb76e0 bellard
                    l = 1;
3895 13eb76e0 bellard
                }
3896 13eb76e0 bellard
            } else {
3897 13eb76e0 bellard
                /* RAM case */
3898 050a0ddf Anthony PERARD
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3899 050a0ddf Anthony PERARD
                memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3900 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
3901 13eb76e0 bellard
            }
3902 13eb76e0 bellard
        }
3903 13eb76e0 bellard
        len -= l;
3904 13eb76e0 bellard
        buf += l;
3905 13eb76e0 bellard
        addr += l;
3906 13eb76e0 bellard
    }
3907 13eb76e0 bellard
}
3908 8df1cd07 bellard
3909 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3910 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3911 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3912 d0ecd2aa bellard
{
3913 d0ecd2aa bellard
    int l;
3914 d0ecd2aa bellard
    uint8_t *ptr;
3915 c227f099 Anthony Liguori
    target_phys_addr_t page;
3916 d0ecd2aa bellard
    unsigned long pd;
3917 d0ecd2aa bellard
    PhysPageDesc *p;
3918 3b46e624 ths
3919 d0ecd2aa bellard
    while (len > 0) {
3920 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3921 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3922 d0ecd2aa bellard
        if (l > len)
3923 d0ecd2aa bellard
            l = len;
3924 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3925 d0ecd2aa bellard
        if (!p) {
3926 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3927 d0ecd2aa bellard
        } else {
3928 d0ecd2aa bellard
            pd = p->phys_offset;
3929 d0ecd2aa bellard
        }
3930 3b46e624 ths
3931 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3932 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3933 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3934 d0ecd2aa bellard
            /* do nothing */
3935 d0ecd2aa bellard
        } else {
3936 d0ecd2aa bellard
            unsigned long addr1;
3937 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3938 d0ecd2aa bellard
            /* ROM/RAM case */
3939 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3940 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3941 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
3942 d0ecd2aa bellard
        }
3943 d0ecd2aa bellard
        len -= l;
3944 d0ecd2aa bellard
        buf += l;
3945 d0ecd2aa bellard
        addr += l;
3946 d0ecd2aa bellard
    }
3947 d0ecd2aa bellard
}
3948 d0ecd2aa bellard
3949 6d16c2f8 aliguori
typedef struct {
3950 6d16c2f8 aliguori
    void *buffer;
3951 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3952 c227f099 Anthony Liguori
    target_phys_addr_t len;
3953 6d16c2f8 aliguori
} BounceBuffer;
3954 6d16c2f8 aliguori
3955 6d16c2f8 aliguori
static BounceBuffer bounce;
3956 6d16c2f8 aliguori
3957 ba223c29 aliguori
typedef struct MapClient {
3958 ba223c29 aliguori
    void *opaque;
3959 ba223c29 aliguori
    void (*callback)(void *opaque);
3960 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3961 ba223c29 aliguori
} MapClient;
3962 ba223c29 aliguori
3963 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3964 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3965 ba223c29 aliguori
3966 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3967 ba223c29 aliguori
{
3968 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3969 ba223c29 aliguori
3970 ba223c29 aliguori
    client->opaque = opaque;
3971 ba223c29 aliguori
    client->callback = callback;
3972 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3973 ba223c29 aliguori
    return client;
3974 ba223c29 aliguori
}
3975 ba223c29 aliguori
3976 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3977 ba223c29 aliguori
{
3978 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3979 ba223c29 aliguori
3980 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3981 34d5e948 Isaku Yamahata
    qemu_free(client);
3982 ba223c29 aliguori
}
3983 ba223c29 aliguori
3984 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3985 ba223c29 aliguori
{
3986 ba223c29 aliguori
    MapClient *client;
3987 ba223c29 aliguori
3988 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3989 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3990 ba223c29 aliguori
        client->callback(client->opaque);
3991 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3992 ba223c29 aliguori
    }
3993 ba223c29 aliguori
}
3994 ba223c29 aliguori
3995 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3996 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3997 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3998 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3999 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
4000 ba223c29 aliguori
 * likely to succeed.
4001 6d16c2f8 aliguori
 */
4002 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
4003 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
4004 6d16c2f8 aliguori
                              int is_write)
4005 6d16c2f8 aliguori
{
4006 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
4007 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
4008 6d16c2f8 aliguori
    int l;
4009 6d16c2f8 aliguori
    uint8_t *ret = NULL;
4010 6d16c2f8 aliguori
    uint8_t *ptr;
4011 c227f099 Anthony Liguori
    target_phys_addr_t page;
4012 6d16c2f8 aliguori
    unsigned long pd;
4013 6d16c2f8 aliguori
    PhysPageDesc *p;
4014 6d16c2f8 aliguori
    unsigned long addr1;
4015 6d16c2f8 aliguori
4016 6d16c2f8 aliguori
    while (len > 0) {
4017 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
4018 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
4019 6d16c2f8 aliguori
        if (l > len)
4020 6d16c2f8 aliguori
            l = len;
4021 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
4022 6d16c2f8 aliguori
        if (!p) {
4023 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
4024 6d16c2f8 aliguori
        } else {
4025 6d16c2f8 aliguori
            pd = p->phys_offset;
4026 6d16c2f8 aliguori
        }
4027 6d16c2f8 aliguori
4028 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4029 6d16c2f8 aliguori
            if (done || bounce.buffer) {
4030 6d16c2f8 aliguori
                break;
4031 6d16c2f8 aliguori
            }
4032 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4033 6d16c2f8 aliguori
            bounce.addr = addr;
4034 6d16c2f8 aliguori
            bounce.len = l;
4035 6d16c2f8 aliguori
            if (!is_write) {
4036 54f7b4a3 Stefan Weil
                cpu_physical_memory_read(addr, bounce.buffer, l);
4037 6d16c2f8 aliguori
            }
4038 6d16c2f8 aliguori
            ptr = bounce.buffer;
4039 6d16c2f8 aliguori
        } else {
4040 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4041 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
4042 6d16c2f8 aliguori
        }
4043 6d16c2f8 aliguori
        if (!done) {
4044 6d16c2f8 aliguori
            ret = ptr;
4045 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
4046 6d16c2f8 aliguori
            break;
4047 6d16c2f8 aliguori
        }
4048 6d16c2f8 aliguori
4049 6d16c2f8 aliguori
        len -= l;
4050 6d16c2f8 aliguori
        addr += l;
4051 6d16c2f8 aliguori
        done += l;
4052 6d16c2f8 aliguori
    }
4053 6d16c2f8 aliguori
    *plen = done;
4054 6d16c2f8 aliguori
    return ret;
4055 6d16c2f8 aliguori
}
4056 6d16c2f8 aliguori
4057 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4058 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
4059 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
4060 6d16c2f8 aliguori
 */
4061 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4062 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
4063 6d16c2f8 aliguori
{
4064 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
4065 6d16c2f8 aliguori
        if (is_write) {
4066 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4067 6d16c2f8 aliguori
            while (access_len) {
4068 6d16c2f8 aliguori
                unsigned l;
4069 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
4070 6d16c2f8 aliguori
                if (l > access_len)
4071 6d16c2f8 aliguori
                    l = access_len;
4072 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
4073 6d16c2f8 aliguori
                    /* invalidate code */
4074 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4075 6d16c2f8 aliguori
                    /* set dirty bit */
4076 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
4077 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
4078 6d16c2f8 aliguori
                }
4079 6d16c2f8 aliguori
                addr1 += l;
4080 6d16c2f8 aliguori
                access_len -= l;
4081 6d16c2f8 aliguori
            }
4082 6d16c2f8 aliguori
        }
4083 050a0ddf Anthony PERARD
        if (xen_mapcache_enabled()) {
4084 050a0ddf Anthony PERARD
            uint8_t *buffer1 = buffer;
4085 050a0ddf Anthony PERARD
            uint8_t *end_buffer = buffer + len;
4086 050a0ddf Anthony PERARD
4087 050a0ddf Anthony PERARD
            while (buffer1 < end_buffer) {
4088 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(buffer1);
4089 050a0ddf Anthony PERARD
                buffer1 += TARGET_PAGE_SIZE;
4090 050a0ddf Anthony PERARD
            }
4091 050a0ddf Anthony PERARD
        }
4092 6d16c2f8 aliguori
        return;
4093 6d16c2f8 aliguori
    }
4094 6d16c2f8 aliguori
    if (is_write) {
4095 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4096 6d16c2f8 aliguori
    }
4097 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
4098 6d16c2f8 aliguori
    bounce.buffer = NULL;
4099 ba223c29 aliguori
    cpu_notify_map_clients();
4100 6d16c2f8 aliguori
}
4101 d0ecd2aa bellard
4102 8df1cd07 bellard
/* warning: addr must be aligned */
4103 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
4104 8df1cd07 bellard
{
4105 8df1cd07 bellard
    int io_index;
4106 8df1cd07 bellard
    uint8_t *ptr;
4107 8df1cd07 bellard
    uint32_t val;
4108 8df1cd07 bellard
    unsigned long pd;
4109 8df1cd07 bellard
    PhysPageDesc *p;
4110 8df1cd07 bellard
4111 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4112 8df1cd07 bellard
    if (!p) {
4113 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4114 8df1cd07 bellard
    } else {
4115 8df1cd07 bellard
        pd = p->phys_offset;
4116 8df1cd07 bellard
    }
4117 3b46e624 ths
4118 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4119 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4120 8df1cd07 bellard
        /* I/O case */
4121 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4122 8da3ff18 pbrook
        if (p)
4123 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4124 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4125 8df1cd07 bellard
    } else {
4126 8df1cd07 bellard
        /* RAM case */
4127 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4128 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
4129 8df1cd07 bellard
        val = ldl_p(ptr);
4130 8df1cd07 bellard
    }
4131 8df1cd07 bellard
    return val;
4132 8df1cd07 bellard
}
4133 8df1cd07 bellard
4134 84b7b8e7 bellard
/* warning: addr must be aligned */
4135 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
4136 84b7b8e7 bellard
{
4137 84b7b8e7 bellard
    int io_index;
4138 84b7b8e7 bellard
    uint8_t *ptr;
4139 84b7b8e7 bellard
    uint64_t val;
4140 84b7b8e7 bellard
    unsigned long pd;
4141 84b7b8e7 bellard
    PhysPageDesc *p;
4142 84b7b8e7 bellard
4143 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4144 84b7b8e7 bellard
    if (!p) {
4145 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
4146 84b7b8e7 bellard
    } else {
4147 84b7b8e7 bellard
        pd = p->phys_offset;
4148 84b7b8e7 bellard
    }
4149 3b46e624 ths
4150 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4151 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
4152 84b7b8e7 bellard
        /* I/O case */
4153 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4154 8da3ff18 pbrook
        if (p)
4155 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4156 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
4157 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4158 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4159 84b7b8e7 bellard
#else
4160 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4161 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4162 84b7b8e7 bellard
#endif
4163 84b7b8e7 bellard
    } else {
4164 84b7b8e7 bellard
        /* RAM case */
4165 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4166 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
4167 84b7b8e7 bellard
        val = ldq_p(ptr);
4168 84b7b8e7 bellard
    }
4169 84b7b8e7 bellard
    return val;
4170 84b7b8e7 bellard
}
4171 84b7b8e7 bellard
4172 aab33094 bellard
/* XXX: optimize */
4173 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
4174 aab33094 bellard
{
4175 aab33094 bellard
    uint8_t val;
4176 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
4177 aab33094 bellard
    return val;
4178 aab33094 bellard
}
4179 aab33094 bellard
4180 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4181 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
4182 aab33094 bellard
{
4183 733f0b02 Michael S. Tsirkin
    int io_index;
4184 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4185 733f0b02 Michael S. Tsirkin
    uint64_t val;
4186 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4187 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4188 733f0b02 Michael S. Tsirkin
4189 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4190 733f0b02 Michael S. Tsirkin
    if (!p) {
4191 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4192 733f0b02 Michael S. Tsirkin
    } else {
4193 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4194 733f0b02 Michael S. Tsirkin
    }
4195 733f0b02 Michael S. Tsirkin
4196 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4197 733f0b02 Michael S. Tsirkin
        !(pd & IO_MEM_ROMD)) {
4198 733f0b02 Michael S. Tsirkin
        /* I/O case */
4199 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4200 733f0b02 Michael S. Tsirkin
        if (p)
4201 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4202 733f0b02 Michael S. Tsirkin
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4203 733f0b02 Michael S. Tsirkin
    } else {
4204 733f0b02 Michael S. Tsirkin
        /* RAM case */
4205 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4206 733f0b02 Michael S. Tsirkin
            (addr & ~TARGET_PAGE_MASK);
4207 733f0b02 Michael S. Tsirkin
        val = lduw_p(ptr);
4208 733f0b02 Michael S. Tsirkin
    }
4209 733f0b02 Michael S. Tsirkin
    return val;
4210 aab33094 bellard
}
4211 aab33094 bellard
4212 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
4213 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
4214 8df1cd07 bellard
   bits are used to track modified PTEs */
4215 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4216 8df1cd07 bellard
{
4217 8df1cd07 bellard
    int io_index;
4218 8df1cd07 bellard
    uint8_t *ptr;
4219 8df1cd07 bellard
    unsigned long pd;
4220 8df1cd07 bellard
    PhysPageDesc *p;
4221 8df1cd07 bellard
4222 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4223 8df1cd07 bellard
    if (!p) {
4224 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4225 8df1cd07 bellard
    } else {
4226 8df1cd07 bellard
        pd = p->phys_offset;
4227 8df1cd07 bellard
    }
4228 3b46e624 ths
4229 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4230 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4231 8da3ff18 pbrook
        if (p)
4232 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4233 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4234 8df1cd07 bellard
    } else {
4235 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4236 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4237 8df1cd07 bellard
        stl_p(ptr, val);
4238 74576198 aliguori
4239 74576198 aliguori
        if (unlikely(in_migration)) {
4240 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
4241 74576198 aliguori
                /* invalidate code */
4242 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4243 74576198 aliguori
                /* set dirty bit */
4244 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
4245 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4246 74576198 aliguori
            }
4247 74576198 aliguori
        }
4248 8df1cd07 bellard
    }
4249 8df1cd07 bellard
}
4250 8df1cd07 bellard
4251 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4252 bc98a7ef j_mayer
{
4253 bc98a7ef j_mayer
    int io_index;
4254 bc98a7ef j_mayer
    uint8_t *ptr;
4255 bc98a7ef j_mayer
    unsigned long pd;
4256 bc98a7ef j_mayer
    PhysPageDesc *p;
4257 bc98a7ef j_mayer
4258 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4259 bc98a7ef j_mayer
    if (!p) {
4260 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
4261 bc98a7ef j_mayer
    } else {
4262 bc98a7ef j_mayer
        pd = p->phys_offset;
4263 bc98a7ef j_mayer
    }
4264 3b46e624 ths
4265 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4266 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4267 8da3ff18 pbrook
        if (p)
4268 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4269 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
4270 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4271 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4272 bc98a7ef j_mayer
#else
4273 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4274 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4275 bc98a7ef j_mayer
#endif
4276 bc98a7ef j_mayer
    } else {
4277 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4278 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
4279 bc98a7ef j_mayer
        stq_p(ptr, val);
4280 bc98a7ef j_mayer
    }
4281 bc98a7ef j_mayer
}
4282 bc98a7ef j_mayer
4283 8df1cd07 bellard
/* warning: addr must be aligned */
4284 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
4285 8df1cd07 bellard
{
4286 8df1cd07 bellard
    int io_index;
4287 8df1cd07 bellard
    uint8_t *ptr;
4288 8df1cd07 bellard
    unsigned long pd;
4289 8df1cd07 bellard
    PhysPageDesc *p;
4290 8df1cd07 bellard
4291 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4292 8df1cd07 bellard
    if (!p) {
4293 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
4294 8df1cd07 bellard
    } else {
4295 8df1cd07 bellard
        pd = p->phys_offset;
4296 8df1cd07 bellard
    }
4297 3b46e624 ths
4298 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4299 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4300 8da3ff18 pbrook
        if (p)
4301 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4302 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4303 8df1cd07 bellard
    } else {
4304 8df1cd07 bellard
        unsigned long addr1;
4305 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4306 8df1cd07 bellard
        /* RAM case */
4307 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
4308 8df1cd07 bellard
        stl_p(ptr, val);
4309 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
4310 3a7d929e bellard
            /* invalidate code */
4311 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4312 3a7d929e bellard
            /* set dirty bit */
4313 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
4314 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
4315 3a7d929e bellard
        }
4316 8df1cd07 bellard
    }
4317 8df1cd07 bellard
}
4318 8df1cd07 bellard
4319 aab33094 bellard
/* XXX: optimize */
4320 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
4321 aab33094 bellard
{
4322 aab33094 bellard
    uint8_t v = val;
4323 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
4324 aab33094 bellard
}
4325 aab33094 bellard
4326 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
4327 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
4328 aab33094 bellard
{
4329 733f0b02 Michael S. Tsirkin
    int io_index;
4330 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
4331 733f0b02 Michael S. Tsirkin
    unsigned long pd;
4332 733f0b02 Michael S. Tsirkin
    PhysPageDesc *p;
4333 733f0b02 Michael S. Tsirkin
4334 733f0b02 Michael S. Tsirkin
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4335 733f0b02 Michael S. Tsirkin
    if (!p) {
4336 733f0b02 Michael S. Tsirkin
        pd = IO_MEM_UNASSIGNED;
4337 733f0b02 Michael S. Tsirkin
    } else {
4338 733f0b02 Michael S. Tsirkin
        pd = p->phys_offset;
4339 733f0b02 Michael S. Tsirkin
    }
4340 733f0b02 Michael S. Tsirkin
4341 733f0b02 Michael S. Tsirkin
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4342 733f0b02 Michael S. Tsirkin
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4343 733f0b02 Michael S. Tsirkin
        if (p)
4344 733f0b02 Michael S. Tsirkin
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4345 733f0b02 Michael S. Tsirkin
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4346 733f0b02 Michael S. Tsirkin
    } else {
4347 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
4348 733f0b02 Michael S. Tsirkin
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4349 733f0b02 Michael S. Tsirkin
        /* RAM case */
4350 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
4351 733f0b02 Michael S. Tsirkin
        stw_p(ptr, val);
4352 733f0b02 Michael S. Tsirkin
        if (!cpu_physical_memory_is_dirty(addr1)) {
4353 733f0b02 Michael S. Tsirkin
            /* invalidate code */
4354 733f0b02 Michael S. Tsirkin
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4355 733f0b02 Michael S. Tsirkin
            /* set dirty bit */
4356 733f0b02 Michael S. Tsirkin
            cpu_physical_memory_set_dirty_flags(addr1,
4357 733f0b02 Michael S. Tsirkin
                (0xff & ~CODE_DIRTY_FLAG));
4358 733f0b02 Michael S. Tsirkin
        }
4359 733f0b02 Michael S. Tsirkin
    }
4360 aab33094 bellard
}
4361 aab33094 bellard
4362 aab33094 bellard
/* XXX: optimize */
4363 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
4364 aab33094 bellard
{
4365 aab33094 bellard
    val = tswap64(val);
4366 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
4367 aab33094 bellard
}
4368 aab33094 bellard
4369 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
4370 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4371 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
4372 13eb76e0 bellard
{
4373 13eb76e0 bellard
    int l;
4374 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
4375 9b3c35e0 j_mayer
    target_ulong page;
4376 13eb76e0 bellard
4377 13eb76e0 bellard
    while (len > 0) {
4378 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
4379 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
4380 13eb76e0 bellard
        /* if no physical page mapped, return an error */
4381 13eb76e0 bellard
        if (phys_addr == -1)
4382 13eb76e0 bellard
            return -1;
4383 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
4384 13eb76e0 bellard
        if (l > len)
4385 13eb76e0 bellard
            l = len;
4386 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4387 5e2972fd aliguori
        if (is_write)
4388 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4389 5e2972fd aliguori
        else
4390 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4391 13eb76e0 bellard
        len -= l;
4392 13eb76e0 bellard
        buf += l;
4393 13eb76e0 bellard
        addr += l;
4394 13eb76e0 bellard
    }
4395 13eb76e0 bellard
    return 0;
4396 13eb76e0 bellard
}
4397 a68fe89c Paul Brook
#endif
4398 13eb76e0 bellard
4399 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
4400 2e70f6ef pbrook
   must be at the end of the TB */
4401 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
4402 2e70f6ef pbrook
{
4403 2e70f6ef pbrook
    TranslationBlock *tb;
4404 2e70f6ef pbrook
    uint32_t n, cflags;
4405 2e70f6ef pbrook
    target_ulong pc, cs_base;
4406 2e70f6ef pbrook
    uint64_t flags;
4407 2e70f6ef pbrook
4408 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
4409 2e70f6ef pbrook
    if (!tb) {
4410 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4411 2e70f6ef pbrook
                  retaddr);
4412 2e70f6ef pbrook
    }
4413 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
4414 618ba8e6 Stefan Weil
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4415 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
4416 bf20dc07 ths
       occurred.  */
4417 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
4418 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
4419 2e70f6ef pbrook
    n++;
4420 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
4421 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
4422 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
4423 2e70f6ef pbrook
       branch.  */
4424 2e70f6ef pbrook
#if defined(TARGET_MIPS)
4425 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4426 2e70f6ef pbrook
        env->active_tc.PC -= 4;
4427 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4428 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
4429 2e70f6ef pbrook
    }
4430 2e70f6ef pbrook
#elif defined(TARGET_SH4)
4431 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4432 2e70f6ef pbrook
            && n > 1) {
4433 2e70f6ef pbrook
        env->pc -= 2;
4434 2e70f6ef pbrook
        env->icount_decr.u16.low++;
4435 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4436 2e70f6ef pbrook
    }
4437 2e70f6ef pbrook
#endif
4438 2e70f6ef pbrook
    /* This should never happen.  */
4439 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
4440 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
4441 2e70f6ef pbrook
4442 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
4443 2e70f6ef pbrook
    pc = tb->pc;
4444 2e70f6ef pbrook
    cs_base = tb->cs_base;
4445 2e70f6ef pbrook
    flags = tb->flags;
4446 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4447 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4448 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4449 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4450 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4451 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4452 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4453 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4454 2e70f6ef pbrook
       second new TB.  */
4455 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4456 2e70f6ef pbrook
}
4457 2e70f6ef pbrook
4458 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4459 b3755a91 Paul Brook
4460 055403b2 Stefan Weil
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4461 e3db7226 bellard
{
4462 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4463 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4464 e3db7226 bellard
    TranslationBlock *tb;
4465 3b46e624 ths
4466 e3db7226 bellard
    target_code_size = 0;
4467 e3db7226 bellard
    max_target_code_size = 0;
4468 e3db7226 bellard
    cross_page = 0;
4469 e3db7226 bellard
    direct_jmp_count = 0;
4470 e3db7226 bellard
    direct_jmp2_count = 0;
4471 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4472 e3db7226 bellard
        tb = &tbs[i];
4473 e3db7226 bellard
        target_code_size += tb->size;
4474 e3db7226 bellard
        if (tb->size > max_target_code_size)
4475 e3db7226 bellard
            max_target_code_size = tb->size;
4476 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4477 e3db7226 bellard
            cross_page++;
4478 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4479 e3db7226 bellard
            direct_jmp_count++;
4480 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4481 e3db7226 bellard
                direct_jmp2_count++;
4482 e3db7226 bellard
            }
4483 e3db7226 bellard
        }
4484 e3db7226 bellard
    }
4485 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4486 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4487 055403b2 Stefan Weil
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4488 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4489 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4490 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4491 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4492 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4493 e3db7226 bellard
                max_target_code_size);
4494 055403b2 Stefan Weil
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4495 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4496 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4497 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4498 5fafdf24 ths
            cross_page,
4499 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4500 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4501 5fafdf24 ths
                direct_jmp_count,
4502 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4503 e3db7226 bellard
                direct_jmp2_count,
4504 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4505 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4506 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4507 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4508 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4509 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4510 e3db7226 bellard
}
4511 e3db7226 bellard
4512 61382a50 bellard
#define MMUSUFFIX _cmmu
4513 61382a50 bellard
#define GETPC() NULL
4514 61382a50 bellard
#define env cpu_single_env
4515 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4516 61382a50 bellard
4517 61382a50 bellard
#define SHIFT 0
4518 61382a50 bellard
#include "softmmu_template.h"
4519 61382a50 bellard
4520 61382a50 bellard
#define SHIFT 1
4521 61382a50 bellard
#include "softmmu_template.h"
4522 61382a50 bellard
4523 61382a50 bellard
#define SHIFT 2
4524 61382a50 bellard
#include "softmmu_template.h"
4525 61382a50 bellard
4526 61382a50 bellard
#define SHIFT 3
4527 61382a50 bellard
#include "softmmu_template.h"
4528 61382a50 bellard
4529 61382a50 bellard
#undef env
4530 61382a50 bellard
4531 61382a50 bellard
#endif