Statistics
| Branch: | Revision:

root / exec.c @ fd436907

History | View | Annotate | Download (119.8 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 29e922b6 Blue Swirl
#include "qemu-timer.h"
42 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
43 53a5960a pbrook
#include <qemu.h>
44 fd052bf6 Riku Voipio
#include <signal.h>
45 f01576f1 Juergen Lock
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 f01576f1 Juergen Lock
#include <sys/param.h>
47 f01576f1 Juergen Lock
#if __FreeBSD_version >= 700104
48 f01576f1 Juergen Lock
#define HAVE_KINFO_GETVMMAP
49 f01576f1 Juergen Lock
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
50 f01576f1 Juergen Lock
#include <sys/time.h>
51 f01576f1 Juergen Lock
#include <sys/proc.h>
52 f01576f1 Juergen Lock
#include <machine/profile.h>
53 f01576f1 Juergen Lock
#define _KERNEL
54 f01576f1 Juergen Lock
#include <sys/user.h>
55 f01576f1 Juergen Lock
#undef _KERNEL
56 f01576f1 Juergen Lock
#undef sigqueue
57 f01576f1 Juergen Lock
#include <libutil.h>
58 f01576f1 Juergen Lock
#endif
59 f01576f1 Juergen Lock
#endif
60 53a5960a pbrook
#endif
61 54936004 bellard
62 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
63 66e85a21 bellard
//#define DEBUG_FLUSH
64 9fa3e853 bellard
//#define DEBUG_TLB
65 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
66 fd6ce8f6 bellard
67 fd6ce8f6 bellard
/* make various TB consistency checks */
68 5fafdf24 ths
//#define DEBUG_TB_CHECK
69 5fafdf24 ths
//#define DEBUG_TLB_CHECK
70 fd6ce8f6 bellard
71 1196be37 ths
//#define DEBUG_IOPORT
72 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
73 1196be37 ths
74 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
75 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
76 99773bd4 pbrook
#undef DEBUG_TB_CHECK
77 99773bd4 pbrook
#endif
78 99773bd4 pbrook
79 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
80 9fa3e853 bellard
81 bdaf78e0 blueswir1
static TranslationBlock *tbs;
82 26a5f13b bellard
int code_gen_max_blocks;
83 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 bdaf78e0 blueswir1
static int nb_tbs;
85 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
86 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 fd6ce8f6 bellard
88 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
89 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
91 d03d860b blueswir1
 section close to code segment. */
92 d03d860b blueswir1
#define code_gen_section                                \
93 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
94 d03d860b blueswir1
    __attribute__((aligned (32)))
95 f8e2af11 Stefan Weil
#elif defined(_WIN32)
96 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
97 f8e2af11 Stefan Weil
#define code_gen_section                                \
98 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
99 d03d860b blueswir1
#else
100 d03d860b blueswir1
#define code_gen_section                                \
101 d03d860b blueswir1
    __attribute__((aligned (32)))
102 d03d860b blueswir1
#endif
103 d03d860b blueswir1
104 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
105 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
106 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
107 26a5f13b bellard
/* threshold to flush the translated code buffer */
108 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
109 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
110 fd6ce8f6 bellard
111 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
112 9fa3e853 bellard
int phys_ram_fd;
113 1ccde1cb bellard
uint8_t *phys_ram_dirty;
114 74576198 aliguori
static int in_migration;
115 94a6b54f pbrook
116 94a6b54f pbrook
typedef struct RAMBlock {
117 94a6b54f pbrook
    uint8_t *host;
118 c227f099 Anthony Liguori
    ram_addr_t offset;
119 c227f099 Anthony Liguori
    ram_addr_t length;
120 94a6b54f pbrook
    struct RAMBlock *next;
121 94a6b54f pbrook
} RAMBlock;
122 94a6b54f pbrook
123 94a6b54f pbrook
static RAMBlock *ram_blocks;
124 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
126 94a6b54f pbrook
   of this variable will break.  */
127 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
128 e2eef170 pbrook
#endif
129 9fa3e853 bellard
130 6a00d601 bellard
CPUState *first_cpu;
131 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
132 6a00d601 bellard
   cpu_exec() */
133 5fafdf24 ths
CPUState *cpu_single_env;
134 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
135 bf20dc07 ths
   1 = Precise instruction counting.
136 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
137 2e70f6ef pbrook
int use_icount = 0;
138 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
139 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
140 2e70f6ef pbrook
int64_t qemu_icount;
141 6a00d601 bellard
142 54936004 bellard
typedef struct PageDesc {
143 92e873b9 bellard
    /* list of TBs intersecting this ram page */
144 fd6ce8f6 bellard
    TranslationBlock *first_tb;
145 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
146 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
147 9fa3e853 bellard
    unsigned int code_write_count;
148 9fa3e853 bellard
    uint8_t *code_bitmap;
149 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
150 9fa3e853 bellard
    unsigned long flags;
151 9fa3e853 bellard
#endif
152 54936004 bellard
} PageDesc;
153 54936004 bellard
154 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
155 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
156 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
157 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
159 41c1b1c9 Paul Brook
#else
160 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
161 41c1b1c9 Paul Brook
#endif
162 bedb69ea j_mayer
#else
163 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
164 bedb69ea j_mayer
#endif
165 54936004 bellard
166 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
167 5cd2c5b6 Richard Henderson
#define L2_BITS 10
168 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
169 54936004 bellard
170 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
171 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
172 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
174 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
177 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
178 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
179 5cd2c5b6 Richard Henderson
#else
180 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
181 5cd2c5b6 Richard Henderson
#endif
182 5cd2c5b6 Richard Henderson
183 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
184 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
185 5cd2c5b6 Richard Henderson
#else
186 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
187 5cd2c5b6 Richard Henderson
#endif
188 5cd2c5b6 Richard Henderson
189 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
190 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
191 5cd2c5b6 Richard Henderson
192 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194 5cd2c5b6 Richard Henderson
195 83fb7adf bellard
unsigned long qemu_real_host_page_size;
196 83fb7adf bellard
unsigned long qemu_host_page_bits;
197 83fb7adf bellard
unsigned long qemu_host_page_size;
198 83fb7adf bellard
unsigned long qemu_host_page_mask;
199 54936004 bellard
200 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
201 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
202 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
203 54936004 bellard
204 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
205 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
206 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
207 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
208 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
209 41c1b1c9 Paul Brook
} PhysPageDesc;
210 41c1b1c9 Paul Brook
211 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
212 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
213 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
214 6d9a1304 Paul Brook
215 e2eef170 pbrook
static void io_mem_init(void);
216 e2eef170 pbrook
217 33417e70 bellard
/* io memory support */
218 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
222 6658ffb8 pbrook
static int io_mem_watch;
223 6658ffb8 pbrook
#endif
224 33417e70 bellard
225 34865134 bellard
/* log support */
226 1e8b27ca Juha Riihimรคki
#ifdef WIN32
227 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
228 1e8b27ca Juha Riihimรคki
#else
229 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
230 1e8b27ca Juha Riihimรคki
#endif
231 34865134 bellard
FILE *logfile;
232 34865134 bellard
int loglevel;
233 e735b91c pbrook
static int log_append = 0;
234 34865134 bellard
235 e3db7226 bellard
/* statistics */
236 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
237 e3db7226 bellard
static int tlb_flush_count;
238 b3755a91 Paul Brook
#endif
239 e3db7226 bellard
static int tb_flush_count;
240 e3db7226 bellard
static int tb_phys_invalidate_count;
241 e3db7226 bellard
242 7cb69cae bellard
#ifdef _WIN32
243 7cb69cae bellard
static void map_exec(void *addr, long size)
244 7cb69cae bellard
{
245 7cb69cae bellard
    DWORD old_protect;
246 7cb69cae bellard
    VirtualProtect(addr, size,
247 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
248 7cb69cae bellard
    
249 7cb69cae bellard
}
250 7cb69cae bellard
#else
251 7cb69cae bellard
static void map_exec(void *addr, long size)
252 7cb69cae bellard
{
253 4369415f bellard
    unsigned long start, end, page_size;
254 7cb69cae bellard
    
255 4369415f bellard
    page_size = getpagesize();
256 7cb69cae bellard
    start = (unsigned long)addr;
257 4369415f bellard
    start &= ~(page_size - 1);
258 7cb69cae bellard
    
259 7cb69cae bellard
    end = (unsigned long)addr + size;
260 4369415f bellard
    end += page_size - 1;
261 4369415f bellard
    end &= ~(page_size - 1);
262 7cb69cae bellard
    
263 7cb69cae bellard
    mprotect((void *)start, end - start,
264 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
265 7cb69cae bellard
}
266 7cb69cae bellard
#endif
267 7cb69cae bellard
268 b346ff46 bellard
static void page_init(void)
269 54936004 bellard
{
270 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
271 54936004 bellard
       TARGET_PAGE_SIZE */
272 c2b48b69 aliguori
#ifdef _WIN32
273 c2b48b69 aliguori
    {
274 c2b48b69 aliguori
        SYSTEM_INFO system_info;
275 c2b48b69 aliguori
276 c2b48b69 aliguori
        GetSystemInfo(&system_info);
277 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
278 c2b48b69 aliguori
    }
279 c2b48b69 aliguori
#else
280 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
281 c2b48b69 aliguori
#endif
282 83fb7adf bellard
    if (qemu_host_page_size == 0)
283 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
284 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
286 83fb7adf bellard
    qemu_host_page_bits = 0;
287 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 83fb7adf bellard
        qemu_host_page_bits++;
289 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
290 50a9569b balrog
291 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
292 50a9569b balrog
    {
293 f01576f1 Juergen Lock
#ifdef HAVE_KINFO_GETVMMAP
294 f01576f1 Juergen Lock
        struct kinfo_vmentry *freep;
295 f01576f1 Juergen Lock
        int i, cnt;
296 f01576f1 Juergen Lock
297 f01576f1 Juergen Lock
        freep = kinfo_getvmmap(getpid(), &cnt);
298 f01576f1 Juergen Lock
        if (freep) {
299 f01576f1 Juergen Lock
            mmap_lock();
300 f01576f1 Juergen Lock
            for (i = 0; i < cnt; i++) {
301 f01576f1 Juergen Lock
                unsigned long startaddr, endaddr;
302 f01576f1 Juergen Lock
303 f01576f1 Juergen Lock
                startaddr = freep[i].kve_start;
304 f01576f1 Juergen Lock
                endaddr = freep[i].kve_end;
305 f01576f1 Juergen Lock
                if (h2g_valid(startaddr)) {
306 f01576f1 Juergen Lock
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307 f01576f1 Juergen Lock
308 f01576f1 Juergen Lock
                    if (h2g_valid(endaddr)) {
309 f01576f1 Juergen Lock
                        endaddr = h2g(endaddr);
310 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
311 f01576f1 Juergen Lock
                    } else {
312 f01576f1 Juergen Lock
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313 f01576f1 Juergen Lock
                        endaddr = ~0ul;
314 fd436907 Aurelien Jarno
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
315 f01576f1 Juergen Lock
#endif
316 f01576f1 Juergen Lock
                    }
317 f01576f1 Juergen Lock
                }
318 f01576f1 Juergen Lock
            }
319 f01576f1 Juergen Lock
            free(freep);
320 f01576f1 Juergen Lock
            mmap_unlock();
321 f01576f1 Juergen Lock
        }
322 f01576f1 Juergen Lock
#else
323 50a9569b balrog
        FILE *f;
324 50a9569b balrog
325 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
326 5cd2c5b6 Richard Henderson
327 fd436907 Aurelien Jarno
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
328 fd436907 Aurelien Jarno
        f = fopen("/compat/linux/proc/self/maps", "r");
329 fd436907 Aurelien Jarno
#else
330 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
331 fd436907 Aurelien Jarno
#endif
332 50a9569b balrog
        if (f) {
333 5cd2c5b6 Richard Henderson
            mmap_lock();
334 5cd2c5b6 Richard Henderson
335 50a9569b balrog
            do {
336 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
337 5cd2c5b6 Richard Henderson
                int n;
338 5cd2c5b6 Richard Henderson
339 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
340 5cd2c5b6 Richard Henderson
341 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
342 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
343 5cd2c5b6 Richard Henderson
344 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
345 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
346 5cd2c5b6 Richard Henderson
                    } else {
347 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
348 5cd2c5b6 Richard Henderson
                    }
349 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
350 50a9569b balrog
                }
351 50a9569b balrog
            } while (!feof(f));
352 5cd2c5b6 Richard Henderson
353 50a9569b balrog
            fclose(f);
354 5cd2c5b6 Richard Henderson
            mmap_unlock();
355 50a9569b balrog
        }
356 f01576f1 Juergen Lock
#endif
357 50a9569b balrog
    }
358 50a9569b balrog
#endif
359 54936004 bellard
}
360 54936004 bellard
361 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
362 54936004 bellard
{
363 41c1b1c9 Paul Brook
    PageDesc *pd;
364 41c1b1c9 Paul Brook
    void **lp;
365 41c1b1c9 Paul Brook
    int i;
366 41c1b1c9 Paul Brook
367 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
368 5cd2c5b6 Richard Henderson
    /* We can't use qemu_malloc because it may recurse into a locked mutex.
369 5cd2c5b6 Richard Henderson
       Neither can we record the new pages we reserve while allocating a
370 5cd2c5b6 Richard Henderson
       given page because that may recurse into an unallocated page table
371 5cd2c5b6 Richard Henderson
       entry.  Stuff the allocations we do make into a queue and process
372 5cd2c5b6 Richard Henderson
       them after having completed one entire page table allocation.  */
373 5cd2c5b6 Richard Henderson
374 5cd2c5b6 Richard Henderson
    unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
375 5cd2c5b6 Richard Henderson
    int reserve_idx = 0;
376 5cd2c5b6 Richard Henderson
377 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
378 5cd2c5b6 Richard Henderson
    do {                                                \
379 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
380 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
381 5cd2c5b6 Richard Henderson
        if (h2g_valid(P)) {                             \
382 5cd2c5b6 Richard Henderson
            reserve[reserve_idx] = h2g(P);              \
383 5cd2c5b6 Richard Henderson
            reserve[reserve_idx + 1] = SIZE;            \
384 5cd2c5b6 Richard Henderson
            reserve_idx += 2;                           \
385 5cd2c5b6 Richard Henderson
        }                                               \
386 5cd2c5b6 Richard Henderson
    } while (0)
387 5cd2c5b6 Richard Henderson
#else
388 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
389 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
390 17e2377a pbrook
#endif
391 434929bf aliguori
392 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
393 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
394 5cd2c5b6 Richard Henderson
395 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
396 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
397 5cd2c5b6 Richard Henderson
        void **p = *lp;
398 5cd2c5b6 Richard Henderson
399 5cd2c5b6 Richard Henderson
        if (p == NULL) {
400 5cd2c5b6 Richard Henderson
            if (!alloc) {
401 5cd2c5b6 Richard Henderson
                return NULL;
402 5cd2c5b6 Richard Henderson
            }
403 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
404 5cd2c5b6 Richard Henderson
            *lp = p;
405 17e2377a pbrook
        }
406 5cd2c5b6 Richard Henderson
407 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
408 5cd2c5b6 Richard Henderson
    }
409 5cd2c5b6 Richard Henderson
410 5cd2c5b6 Richard Henderson
    pd = *lp;
411 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
412 5cd2c5b6 Richard Henderson
        if (!alloc) {
413 5cd2c5b6 Richard Henderson
            return NULL;
414 5cd2c5b6 Richard Henderson
        }
415 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
416 5cd2c5b6 Richard Henderson
        *lp = pd;
417 54936004 bellard
    }
418 5cd2c5b6 Richard Henderson
419 5cd2c5b6 Richard Henderson
#undef ALLOC
420 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
421 5cd2c5b6 Richard Henderson
    for (i = 0; i < reserve_idx; i += 2) {
422 5cd2c5b6 Richard Henderson
        unsigned long addr = reserve[i];
423 5cd2c5b6 Richard Henderson
        unsigned long len = reserve[i + 1];
424 5cd2c5b6 Richard Henderson
425 5cd2c5b6 Richard Henderson
        page_set_flags(addr & TARGET_PAGE_MASK,
426 5cd2c5b6 Richard Henderson
                       TARGET_PAGE_ALIGN(addr + len),
427 5cd2c5b6 Richard Henderson
                       PAGE_RESERVED);
428 5cd2c5b6 Richard Henderson
    }
429 5cd2c5b6 Richard Henderson
#endif
430 5cd2c5b6 Richard Henderson
431 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
432 54936004 bellard
}
433 54936004 bellard
434 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
435 54936004 bellard
{
436 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
437 fd6ce8f6 bellard
}
438 fd6ce8f6 bellard
439 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
440 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
441 92e873b9 bellard
{
442 e3f4e2a4 pbrook
    PhysPageDesc *pd;
443 5cd2c5b6 Richard Henderson
    void **lp;
444 5cd2c5b6 Richard Henderson
    int i;
445 92e873b9 bellard
446 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
447 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
448 108c49b8 bellard
449 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
450 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
451 5cd2c5b6 Richard Henderson
        void **p = *lp;
452 5cd2c5b6 Richard Henderson
        if (p == NULL) {
453 5cd2c5b6 Richard Henderson
            if (!alloc) {
454 5cd2c5b6 Richard Henderson
                return NULL;
455 5cd2c5b6 Richard Henderson
            }
456 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
457 5cd2c5b6 Richard Henderson
        }
458 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
459 108c49b8 bellard
    }
460 5cd2c5b6 Richard Henderson
461 e3f4e2a4 pbrook
    pd = *lp;
462 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
463 e3f4e2a4 pbrook
        int i;
464 5cd2c5b6 Richard Henderson
465 5cd2c5b6 Richard Henderson
        if (!alloc) {
466 108c49b8 bellard
            return NULL;
467 5cd2c5b6 Richard Henderson
        }
468 5cd2c5b6 Richard Henderson
469 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
470 5cd2c5b6 Richard Henderson
471 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
472 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
473 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
474 67c4d23c pbrook
        }
475 92e873b9 bellard
    }
476 5cd2c5b6 Richard Henderson
477 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
478 92e873b9 bellard
}
479 92e873b9 bellard
480 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
481 92e873b9 bellard
{
482 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
483 92e873b9 bellard
}
484 92e873b9 bellard
485 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
486 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
487 3a7d929e bellard
                                    target_ulong vaddr);
488 c8a706fe pbrook
#define mmap_lock() do { } while(0)
489 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
490 9fa3e853 bellard
#endif
491 fd6ce8f6 bellard
492 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
493 4369415f bellard
494 4369415f bellard
#if defined(CONFIG_USER_ONLY)
495 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
496 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
497 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
498 4369415f bellard
#endif
499 4369415f bellard
500 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
501 ebf50fb3 Aurelien Jarno
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
502 ebf50fb3 Aurelien Jarno
               __attribute__((aligned (CODE_GEN_ALIGN)));
503 4369415f bellard
#endif
504 4369415f bellard
505 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
506 26a5f13b bellard
{
507 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
508 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
509 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
510 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
511 4369415f bellard
#else
512 26a5f13b bellard
    code_gen_buffer_size = tb_size;
513 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
514 4369415f bellard
#if defined(CONFIG_USER_ONLY)
515 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
516 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
517 4369415f bellard
#else
518 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
519 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
520 4369415f bellard
#endif
521 26a5f13b bellard
    }
522 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
523 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
524 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
525 26a5f13b bellard
       the host cpu and OS */
526 26a5f13b bellard
#if defined(__linux__) 
527 26a5f13b bellard
    {
528 26a5f13b bellard
        int flags;
529 141ac468 blueswir1
        void *start = NULL;
530 141ac468 blueswir1
531 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
532 26a5f13b bellard
#if defined(__x86_64__)
533 26a5f13b bellard
        flags |= MAP_32BIT;
534 26a5f13b bellard
        /* Cannot map more than that */
535 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
536 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
537 141ac468 blueswir1
#elif defined(__sparc_v9__)
538 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
539 141ac468 blueswir1
        flags |= MAP_FIXED;
540 141ac468 blueswir1
        start = (void *) 0x60000000UL;
541 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
542 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
543 1cb0661e balrog
#elif defined(__arm__)
544 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
545 1cb0661e balrog
        flags |= MAP_FIXED;
546 1cb0661e balrog
        start = (void *) 0x01000000UL;
547 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
548 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
549 26a5f13b bellard
#endif
550 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
551 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
552 26a5f13b bellard
                               flags, -1, 0);
553 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
554 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 26a5f13b bellard
            exit(1);
556 26a5f13b bellard
        }
557 26a5f13b bellard
    }
558 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
559 06e67a82 aliguori
    {
560 06e67a82 aliguori
        int flags;
561 06e67a82 aliguori
        void *addr = NULL;
562 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
563 06e67a82 aliguori
#if defined(__x86_64__)
564 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
565 06e67a82 aliguori
         * 0x40000000 is free */
566 06e67a82 aliguori
        flags |= MAP_FIXED;
567 06e67a82 aliguori
        addr = (void *)0x40000000;
568 06e67a82 aliguori
        /* Cannot map more than that */
569 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
570 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
571 06e67a82 aliguori
#endif
572 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
573 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
574 06e67a82 aliguori
                               flags, -1, 0);
575 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
576 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 06e67a82 aliguori
            exit(1);
578 06e67a82 aliguori
        }
579 06e67a82 aliguori
    }
580 26a5f13b bellard
#else
581 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
582 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
583 26a5f13b bellard
#endif
584 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
585 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
586 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
587 26a5f13b bellard
        code_gen_max_block_size();
588 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
589 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
590 26a5f13b bellard
}
591 26a5f13b bellard
592 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
593 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
594 26a5f13b bellard
   size. */
595 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
596 26a5f13b bellard
{
597 26a5f13b bellard
    cpu_gen_init();
598 26a5f13b bellard
    code_gen_alloc(tb_size);
599 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
600 4369415f bellard
    page_init();
601 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
602 26a5f13b bellard
    io_mem_init();
603 e2eef170 pbrook
#endif
604 26a5f13b bellard
}
605 26a5f13b bellard
606 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
607 9656f324 pbrook
608 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
609 e7f4eff7 Juan Quintela
{
610 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
611 9656f324 pbrook
612 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
613 3098dba0 aurel32
       version_id is increased. */
614 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
615 9656f324 pbrook
    tlb_flush(env, 1);
616 9656f324 pbrook
617 9656f324 pbrook
    return 0;
618 9656f324 pbrook
}
619 e7f4eff7 Juan Quintela
620 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
621 e7f4eff7 Juan Quintela
    .name = "cpu_common",
622 e7f4eff7 Juan Quintela
    .version_id = 1,
623 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
624 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
625 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
626 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
627 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
628 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
629 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
630 e7f4eff7 Juan Quintela
    }
631 e7f4eff7 Juan Quintela
};
632 9656f324 pbrook
#endif
633 9656f324 pbrook
634 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
635 950f1472 Glauber Costa
{
636 950f1472 Glauber Costa
    CPUState *env = first_cpu;
637 950f1472 Glauber Costa
638 950f1472 Glauber Costa
    while (env) {
639 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
640 950f1472 Glauber Costa
            break;
641 950f1472 Glauber Costa
        env = env->next_cpu;
642 950f1472 Glauber Costa
    }
643 950f1472 Glauber Costa
644 950f1472 Glauber Costa
    return env;
645 950f1472 Glauber Costa
}
646 950f1472 Glauber Costa
647 6a00d601 bellard
void cpu_exec_init(CPUState *env)
648 fd6ce8f6 bellard
{
649 6a00d601 bellard
    CPUState **penv;
650 6a00d601 bellard
    int cpu_index;
651 6a00d601 bellard
652 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
653 c2764719 pbrook
    cpu_list_lock();
654 c2764719 pbrook
#endif
655 6a00d601 bellard
    env->next_cpu = NULL;
656 6a00d601 bellard
    penv = &first_cpu;
657 6a00d601 bellard
    cpu_index = 0;
658 6a00d601 bellard
    while (*penv != NULL) {
659 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
660 6a00d601 bellard
        cpu_index++;
661 6a00d601 bellard
    }
662 6a00d601 bellard
    env->cpu_index = cpu_index;
663 268a362c aliguori
    env->numa_node = 0;
664 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
665 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
666 6a00d601 bellard
    *penv = env;
667 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
668 c2764719 pbrook
    cpu_list_unlock();
669 c2764719 pbrook
#endif
670 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
671 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
672 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
673 b3c7724c pbrook
                    cpu_save, cpu_load, env);
674 b3c7724c pbrook
#endif
675 fd6ce8f6 bellard
}
676 fd6ce8f6 bellard
677 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
678 9fa3e853 bellard
{
679 9fa3e853 bellard
    if (p->code_bitmap) {
680 59817ccb bellard
        qemu_free(p->code_bitmap);
681 9fa3e853 bellard
        p->code_bitmap = NULL;
682 9fa3e853 bellard
    }
683 9fa3e853 bellard
    p->code_write_count = 0;
684 9fa3e853 bellard
}
685 9fa3e853 bellard
686 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
687 5cd2c5b6 Richard Henderson
688 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
689 fd6ce8f6 bellard
{
690 5cd2c5b6 Richard Henderson
    int i;
691 fd6ce8f6 bellard
692 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
693 5cd2c5b6 Richard Henderson
        return;
694 5cd2c5b6 Richard Henderson
    }
695 5cd2c5b6 Richard Henderson
    if (level == 0) {
696 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
697 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
698 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
699 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
700 fd6ce8f6 bellard
        }
701 5cd2c5b6 Richard Henderson
    } else {
702 5cd2c5b6 Richard Henderson
        void **pp = *lp;
703 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
704 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
705 5cd2c5b6 Richard Henderson
        }
706 5cd2c5b6 Richard Henderson
    }
707 5cd2c5b6 Richard Henderson
}
708 5cd2c5b6 Richard Henderson
709 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
710 5cd2c5b6 Richard Henderson
{
711 5cd2c5b6 Richard Henderson
    int i;
712 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
713 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
714 fd6ce8f6 bellard
    }
715 fd6ce8f6 bellard
}
716 fd6ce8f6 bellard
717 fd6ce8f6 bellard
/* flush all the translation blocks */
718 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
719 6a00d601 bellard
void tb_flush(CPUState *env1)
720 fd6ce8f6 bellard
{
721 6a00d601 bellard
    CPUState *env;
722 0124311e bellard
#if defined(DEBUG_FLUSH)
723 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
724 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
725 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
726 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
727 fd6ce8f6 bellard
#endif
728 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
729 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
730 a208e54a pbrook
731 fd6ce8f6 bellard
    nb_tbs = 0;
732 3b46e624 ths
733 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
734 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
735 6a00d601 bellard
    }
736 9fa3e853 bellard
737 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
738 fd6ce8f6 bellard
    page_flush_tb();
739 9fa3e853 bellard
740 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
741 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
742 d4e8164f bellard
       expensive */
743 e3db7226 bellard
    tb_flush_count++;
744 fd6ce8f6 bellard
}
745 fd6ce8f6 bellard
746 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
747 fd6ce8f6 bellard
748 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
749 fd6ce8f6 bellard
{
750 fd6ce8f6 bellard
    TranslationBlock *tb;
751 fd6ce8f6 bellard
    int i;
752 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
753 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
754 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
755 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
756 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
757 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
758 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
759 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
760 fd6ce8f6 bellard
            }
761 fd6ce8f6 bellard
        }
762 fd6ce8f6 bellard
    }
763 fd6ce8f6 bellard
}
764 fd6ce8f6 bellard
765 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
766 fd6ce8f6 bellard
static void tb_page_check(void)
767 fd6ce8f6 bellard
{
768 fd6ce8f6 bellard
    TranslationBlock *tb;
769 fd6ce8f6 bellard
    int i, flags1, flags2;
770 3b46e624 ths
771 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
772 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
773 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
774 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
775 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
776 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
777 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
778 fd6ce8f6 bellard
            }
779 fd6ce8f6 bellard
        }
780 fd6ce8f6 bellard
    }
781 fd6ce8f6 bellard
}
782 fd6ce8f6 bellard
783 fd6ce8f6 bellard
#endif
784 fd6ce8f6 bellard
785 fd6ce8f6 bellard
/* invalidate one TB */
786 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
787 fd6ce8f6 bellard
                             int next_offset)
788 fd6ce8f6 bellard
{
789 fd6ce8f6 bellard
    TranslationBlock *tb1;
790 fd6ce8f6 bellard
    for(;;) {
791 fd6ce8f6 bellard
        tb1 = *ptb;
792 fd6ce8f6 bellard
        if (tb1 == tb) {
793 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
794 fd6ce8f6 bellard
            break;
795 fd6ce8f6 bellard
        }
796 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
797 fd6ce8f6 bellard
    }
798 fd6ce8f6 bellard
}
799 fd6ce8f6 bellard
800 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
801 9fa3e853 bellard
{
802 9fa3e853 bellard
    TranslationBlock *tb1;
803 9fa3e853 bellard
    unsigned int n1;
804 9fa3e853 bellard
805 9fa3e853 bellard
    for(;;) {
806 9fa3e853 bellard
        tb1 = *ptb;
807 9fa3e853 bellard
        n1 = (long)tb1 & 3;
808 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
809 9fa3e853 bellard
        if (tb1 == tb) {
810 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
811 9fa3e853 bellard
            break;
812 9fa3e853 bellard
        }
813 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
814 9fa3e853 bellard
    }
815 9fa3e853 bellard
}
816 9fa3e853 bellard
817 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
818 d4e8164f bellard
{
819 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
820 d4e8164f bellard
    unsigned int n1;
821 d4e8164f bellard
822 d4e8164f bellard
    ptb = &tb->jmp_next[n];
823 d4e8164f bellard
    tb1 = *ptb;
824 d4e8164f bellard
    if (tb1) {
825 d4e8164f bellard
        /* find tb(n) in circular list */
826 d4e8164f bellard
        for(;;) {
827 d4e8164f bellard
            tb1 = *ptb;
828 d4e8164f bellard
            n1 = (long)tb1 & 3;
829 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
830 d4e8164f bellard
            if (n1 == n && tb1 == tb)
831 d4e8164f bellard
                break;
832 d4e8164f bellard
            if (n1 == 2) {
833 d4e8164f bellard
                ptb = &tb1->jmp_first;
834 d4e8164f bellard
            } else {
835 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
836 d4e8164f bellard
            }
837 d4e8164f bellard
        }
838 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
839 d4e8164f bellard
        *ptb = tb->jmp_next[n];
840 d4e8164f bellard
841 d4e8164f bellard
        tb->jmp_next[n] = NULL;
842 d4e8164f bellard
    }
843 d4e8164f bellard
}
844 d4e8164f bellard
845 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
846 d4e8164f bellard
   another TB */
847 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
848 d4e8164f bellard
{
849 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
850 d4e8164f bellard
}
851 d4e8164f bellard
852 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
853 fd6ce8f6 bellard
{
854 6a00d601 bellard
    CPUState *env;
855 8a40a180 bellard
    PageDesc *p;
856 d4e8164f bellard
    unsigned int h, n1;
857 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
858 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
859 3b46e624 ths
860 8a40a180 bellard
    /* remove the TB from the hash list */
861 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
862 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
863 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
864 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
865 8a40a180 bellard
866 8a40a180 bellard
    /* remove the TB from the page list */
867 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
868 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
869 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
870 8a40a180 bellard
        invalidate_page_bitmap(p);
871 8a40a180 bellard
    }
872 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
873 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
874 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
875 8a40a180 bellard
        invalidate_page_bitmap(p);
876 8a40a180 bellard
    }
877 8a40a180 bellard
878 36bdbe54 bellard
    tb_invalidated_flag = 1;
879 59817ccb bellard
880 fd6ce8f6 bellard
    /* remove the TB from the hash list */
881 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
882 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
883 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
884 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
885 6a00d601 bellard
    }
886 d4e8164f bellard
887 d4e8164f bellard
    /* suppress this TB from the two jump lists */
888 d4e8164f bellard
    tb_jmp_remove(tb, 0);
889 d4e8164f bellard
    tb_jmp_remove(tb, 1);
890 d4e8164f bellard
891 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
892 d4e8164f bellard
    tb1 = tb->jmp_first;
893 d4e8164f bellard
    for(;;) {
894 d4e8164f bellard
        n1 = (long)tb1 & 3;
895 d4e8164f bellard
        if (n1 == 2)
896 d4e8164f bellard
            break;
897 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
898 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
899 d4e8164f bellard
        tb_reset_jump(tb1, n1);
900 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
901 d4e8164f bellard
        tb1 = tb2;
902 d4e8164f bellard
    }
903 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
904 9fa3e853 bellard
905 e3db7226 bellard
    tb_phys_invalidate_count++;
906 9fa3e853 bellard
}
907 9fa3e853 bellard
908 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
909 9fa3e853 bellard
{
910 9fa3e853 bellard
    int end, mask, end1;
911 9fa3e853 bellard
912 9fa3e853 bellard
    end = start + len;
913 9fa3e853 bellard
    tab += start >> 3;
914 9fa3e853 bellard
    mask = 0xff << (start & 7);
915 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
916 9fa3e853 bellard
        if (start < end) {
917 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
918 9fa3e853 bellard
            *tab |= mask;
919 9fa3e853 bellard
        }
920 9fa3e853 bellard
    } else {
921 9fa3e853 bellard
        *tab++ |= mask;
922 9fa3e853 bellard
        start = (start + 8) & ~7;
923 9fa3e853 bellard
        end1 = end & ~7;
924 9fa3e853 bellard
        while (start < end1) {
925 9fa3e853 bellard
            *tab++ = 0xff;
926 9fa3e853 bellard
            start += 8;
927 9fa3e853 bellard
        }
928 9fa3e853 bellard
        if (start < end) {
929 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
930 9fa3e853 bellard
            *tab |= mask;
931 9fa3e853 bellard
        }
932 9fa3e853 bellard
    }
933 9fa3e853 bellard
}
934 9fa3e853 bellard
935 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
936 9fa3e853 bellard
{
937 9fa3e853 bellard
    int n, tb_start, tb_end;
938 9fa3e853 bellard
    TranslationBlock *tb;
939 3b46e624 ths
940 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
941 9fa3e853 bellard
942 9fa3e853 bellard
    tb = p->first_tb;
943 9fa3e853 bellard
    while (tb != NULL) {
944 9fa3e853 bellard
        n = (long)tb & 3;
945 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
946 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
947 9fa3e853 bellard
        if (n == 0) {
948 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
949 9fa3e853 bellard
               it is not a problem */
950 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
951 9fa3e853 bellard
            tb_end = tb_start + tb->size;
952 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
953 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
954 9fa3e853 bellard
        } else {
955 9fa3e853 bellard
            tb_start = 0;
956 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
957 9fa3e853 bellard
        }
958 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
959 9fa3e853 bellard
        tb = tb->page_next[n];
960 9fa3e853 bellard
    }
961 9fa3e853 bellard
}
962 9fa3e853 bellard
963 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
964 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
965 2e70f6ef pbrook
                              int flags, int cflags)
966 d720b93d bellard
{
967 d720b93d bellard
    TranslationBlock *tb;
968 d720b93d bellard
    uint8_t *tc_ptr;
969 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
970 41c1b1c9 Paul Brook
    target_ulong virt_page2;
971 d720b93d bellard
    int code_gen_size;
972 d720b93d bellard
973 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
974 c27004ec bellard
    tb = tb_alloc(pc);
975 d720b93d bellard
    if (!tb) {
976 d720b93d bellard
        /* flush must be done */
977 d720b93d bellard
        tb_flush(env);
978 d720b93d bellard
        /* cannot fail at this point */
979 c27004ec bellard
        tb = tb_alloc(pc);
980 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
981 2e70f6ef pbrook
        tb_invalidated_flag = 1;
982 d720b93d bellard
    }
983 d720b93d bellard
    tc_ptr = code_gen_ptr;
984 d720b93d bellard
    tb->tc_ptr = tc_ptr;
985 d720b93d bellard
    tb->cs_base = cs_base;
986 d720b93d bellard
    tb->flags = flags;
987 d720b93d bellard
    tb->cflags = cflags;
988 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
989 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
990 3b46e624 ths
991 d720b93d bellard
    /* check next page if needed */
992 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
993 d720b93d bellard
    phys_page2 = -1;
994 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
995 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
996 d720b93d bellard
    }
997 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
998 2e70f6ef pbrook
    return tb;
999 d720b93d bellard
}
1000 3b46e624 ths
1001 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
1002 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
1003 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
1004 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
1005 d720b93d bellard
   TB if code is modified inside this TB. */
1006 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1007 d720b93d bellard
                                   int is_cpu_write_access)
1008 d720b93d bellard
{
1009 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
1010 d720b93d bellard
    CPUState *env = cpu_single_env;
1011 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
1012 6b917547 aliguori
    PageDesc *p;
1013 6b917547 aliguori
    int n;
1014 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
1015 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
1016 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1017 6b917547 aliguori
    int current_tb_modified = 0;
1018 6b917547 aliguori
    target_ulong current_pc = 0;
1019 6b917547 aliguori
    target_ulong current_cs_base = 0;
1020 6b917547 aliguori
    int current_flags = 0;
1021 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
1022 9fa3e853 bellard
1023 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1024 5fafdf24 ths
    if (!p)
1025 9fa3e853 bellard
        return;
1026 5fafdf24 ths
    if (!p->code_bitmap &&
1027 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1028 d720b93d bellard
        is_cpu_write_access) {
1029 9fa3e853 bellard
        /* build code bitmap */
1030 9fa3e853 bellard
        build_page_bitmap(p);
1031 9fa3e853 bellard
    }
1032 9fa3e853 bellard
1033 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
1034 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1035 9fa3e853 bellard
    tb = p->first_tb;
1036 9fa3e853 bellard
    while (tb != NULL) {
1037 9fa3e853 bellard
        n = (long)tb & 3;
1038 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1039 9fa3e853 bellard
        tb_next = tb->page_next[n];
1040 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
1041 9fa3e853 bellard
        if (n == 0) {
1042 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
1043 9fa3e853 bellard
               it is not a problem */
1044 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1045 9fa3e853 bellard
            tb_end = tb_start + tb->size;
1046 9fa3e853 bellard
        } else {
1047 9fa3e853 bellard
            tb_start = tb->page_addr[1];
1048 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1049 9fa3e853 bellard
        }
1050 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
1051 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1052 d720b93d bellard
            if (current_tb_not_found) {
1053 d720b93d bellard
                current_tb_not_found = 0;
1054 d720b93d bellard
                current_tb = NULL;
1055 2e70f6ef pbrook
                if (env->mem_io_pc) {
1056 d720b93d bellard
                    /* now we have a real cpu fault */
1057 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1058 d720b93d bellard
                }
1059 d720b93d bellard
            }
1060 d720b93d bellard
            if (current_tb == tb &&
1061 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1062 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1063 d720b93d bellard
                its execution. We could be more precise by checking
1064 d720b93d bellard
                that the modification is after the current PC, but it
1065 d720b93d bellard
                would require a specialized function to partially
1066 d720b93d bellard
                restore the CPU state */
1067 3b46e624 ths
1068 d720b93d bellard
                current_tb_modified = 1;
1069 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1070 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1071 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1072 6b917547 aliguori
                                     &current_flags);
1073 d720b93d bellard
            }
1074 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1075 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1076 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1077 6f5a9f7e bellard
            saved_tb = NULL;
1078 6f5a9f7e bellard
            if (env) {
1079 6f5a9f7e bellard
                saved_tb = env->current_tb;
1080 6f5a9f7e bellard
                env->current_tb = NULL;
1081 6f5a9f7e bellard
            }
1082 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1083 6f5a9f7e bellard
            if (env) {
1084 6f5a9f7e bellard
                env->current_tb = saved_tb;
1085 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1086 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1087 6f5a9f7e bellard
            }
1088 9fa3e853 bellard
        }
1089 9fa3e853 bellard
        tb = tb_next;
1090 9fa3e853 bellard
    }
1091 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1092 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1093 9fa3e853 bellard
    if (!p->first_tb) {
1094 9fa3e853 bellard
        invalidate_page_bitmap(p);
1095 d720b93d bellard
        if (is_cpu_write_access) {
1096 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1097 d720b93d bellard
        }
1098 d720b93d bellard
    }
1099 d720b93d bellard
#endif
1100 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1101 d720b93d bellard
    if (current_tb_modified) {
1102 d720b93d bellard
        /* we generate a block containing just the instruction
1103 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1104 d720b93d bellard
           itself */
1105 ea1c1802 bellard
        env->current_tb = NULL;
1106 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1107 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1108 9fa3e853 bellard
    }
1109 fd6ce8f6 bellard
#endif
1110 9fa3e853 bellard
}
1111 fd6ce8f6 bellard
1112 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1113 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1114 9fa3e853 bellard
{
1115 9fa3e853 bellard
    PageDesc *p;
1116 9fa3e853 bellard
    int offset, b;
1117 59817ccb bellard
#if 0
1118 a4193c8a bellard
    if (1) {
1119 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1120 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1121 93fcfe39 aliguori
                  cpu_single_env->eip,
1122 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1123 59817ccb bellard
    }
1124 59817ccb bellard
#endif
1125 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1126 5fafdf24 ths
    if (!p)
1127 9fa3e853 bellard
        return;
1128 9fa3e853 bellard
    if (p->code_bitmap) {
1129 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1130 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1131 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1132 9fa3e853 bellard
            goto do_invalidate;
1133 9fa3e853 bellard
    } else {
1134 9fa3e853 bellard
    do_invalidate:
1135 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1136 9fa3e853 bellard
    }
1137 9fa3e853 bellard
}
1138 9fa3e853 bellard
1139 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1140 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1141 d720b93d bellard
                                    unsigned long pc, void *puc)
1142 9fa3e853 bellard
{
1143 6b917547 aliguori
    TranslationBlock *tb;
1144 9fa3e853 bellard
    PageDesc *p;
1145 6b917547 aliguori
    int n;
1146 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1147 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1148 d720b93d bellard
    CPUState *env = cpu_single_env;
1149 6b917547 aliguori
    int current_tb_modified = 0;
1150 6b917547 aliguori
    target_ulong current_pc = 0;
1151 6b917547 aliguori
    target_ulong current_cs_base = 0;
1152 6b917547 aliguori
    int current_flags = 0;
1153 d720b93d bellard
#endif
1154 9fa3e853 bellard
1155 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1156 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1157 5fafdf24 ths
    if (!p)
1158 9fa3e853 bellard
        return;
1159 9fa3e853 bellard
    tb = p->first_tb;
1160 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1161 d720b93d bellard
    if (tb && pc != 0) {
1162 d720b93d bellard
        current_tb = tb_find_pc(pc);
1163 d720b93d bellard
    }
1164 d720b93d bellard
#endif
1165 9fa3e853 bellard
    while (tb != NULL) {
1166 9fa3e853 bellard
        n = (long)tb & 3;
1167 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1168 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1169 d720b93d bellard
        if (current_tb == tb &&
1170 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1171 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1172 d720b93d bellard
                   its execution. We could be more precise by checking
1173 d720b93d bellard
                   that the modification is after the current PC, but it
1174 d720b93d bellard
                   would require a specialized function to partially
1175 d720b93d bellard
                   restore the CPU state */
1176 3b46e624 ths
1177 d720b93d bellard
            current_tb_modified = 1;
1178 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1179 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1180 6b917547 aliguori
                                 &current_flags);
1181 d720b93d bellard
        }
1182 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1183 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1184 9fa3e853 bellard
        tb = tb->page_next[n];
1185 9fa3e853 bellard
    }
1186 fd6ce8f6 bellard
    p->first_tb = NULL;
1187 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1188 d720b93d bellard
    if (current_tb_modified) {
1189 d720b93d bellard
        /* we generate a block containing just the instruction
1190 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1191 d720b93d bellard
           itself */
1192 ea1c1802 bellard
        env->current_tb = NULL;
1193 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1194 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1195 d720b93d bellard
    }
1196 d720b93d bellard
#endif
1197 fd6ce8f6 bellard
}
1198 9fa3e853 bellard
#endif
1199 fd6ce8f6 bellard
1200 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1201 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1202 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1203 fd6ce8f6 bellard
{
1204 fd6ce8f6 bellard
    PageDesc *p;
1205 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1206 9fa3e853 bellard
1207 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1208 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1209 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1210 9fa3e853 bellard
    last_first_tb = p->first_tb;
1211 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1212 9fa3e853 bellard
    invalidate_page_bitmap(p);
1213 fd6ce8f6 bellard
1214 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1215 d720b93d bellard
1216 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1217 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1218 53a5960a pbrook
        target_ulong addr;
1219 53a5960a pbrook
        PageDesc *p2;
1220 9fa3e853 bellard
        int prot;
1221 9fa3e853 bellard
1222 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1223 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1224 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1225 fd6ce8f6 bellard
        prot = 0;
1226 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1227 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1228 53a5960a pbrook
1229 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1230 53a5960a pbrook
            if (!p2)
1231 53a5960a pbrook
                continue;
1232 53a5960a pbrook
            prot |= p2->flags;
1233 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1234 53a5960a pbrook
          }
1235 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1236 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1237 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1238 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1239 53a5960a pbrook
               page_addr);
1240 fd6ce8f6 bellard
#endif
1241 fd6ce8f6 bellard
    }
1242 9fa3e853 bellard
#else
1243 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1244 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1245 9fa3e853 bellard
       allocated in a physical page */
1246 9fa3e853 bellard
    if (!last_first_tb) {
1247 6a00d601 bellard
        tlb_protect_code(page_addr);
1248 9fa3e853 bellard
    }
1249 9fa3e853 bellard
#endif
1250 d720b93d bellard
1251 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1252 fd6ce8f6 bellard
}
1253 fd6ce8f6 bellard
1254 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1255 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1256 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1257 fd6ce8f6 bellard
{
1258 fd6ce8f6 bellard
    TranslationBlock *tb;
1259 fd6ce8f6 bellard
1260 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1261 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1262 d4e8164f bellard
        return NULL;
1263 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1264 fd6ce8f6 bellard
    tb->pc = pc;
1265 b448f2f3 bellard
    tb->cflags = 0;
1266 d4e8164f bellard
    return tb;
1267 d4e8164f bellard
}
1268 d4e8164f bellard
1269 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1270 2e70f6ef pbrook
{
1271 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1272 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1273 2e70f6ef pbrook
       be the last one generated.  */
1274 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1275 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1276 2e70f6ef pbrook
        nb_tbs--;
1277 2e70f6ef pbrook
    }
1278 2e70f6ef pbrook
}
1279 2e70f6ef pbrook
1280 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1281 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1282 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1283 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1284 d4e8164f bellard
{
1285 9fa3e853 bellard
    unsigned int h;
1286 9fa3e853 bellard
    TranslationBlock **ptb;
1287 9fa3e853 bellard
1288 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1289 c8a706fe pbrook
       before we are done.  */
1290 c8a706fe pbrook
    mmap_lock();
1291 9fa3e853 bellard
    /* add in the physical hash table */
1292 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1293 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1294 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1295 9fa3e853 bellard
    *ptb = tb;
1296 fd6ce8f6 bellard
1297 fd6ce8f6 bellard
    /* add in the page list */
1298 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1299 9fa3e853 bellard
    if (phys_page2 != -1)
1300 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1301 9fa3e853 bellard
    else
1302 9fa3e853 bellard
        tb->page_addr[1] = -1;
1303 9fa3e853 bellard
1304 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1305 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1306 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1307 d4e8164f bellard
1308 d4e8164f bellard
    /* init original jump addresses */
1309 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1310 d4e8164f bellard
        tb_reset_jump(tb, 0);
1311 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1312 d4e8164f bellard
        tb_reset_jump(tb, 1);
1313 8a40a180 bellard
1314 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1315 8a40a180 bellard
    tb_page_check();
1316 8a40a180 bellard
#endif
1317 c8a706fe pbrook
    mmap_unlock();
1318 fd6ce8f6 bellard
}
1319 fd6ce8f6 bellard
1320 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1321 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1322 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1323 fd6ce8f6 bellard
{
1324 9fa3e853 bellard
    int m_min, m_max, m;
1325 9fa3e853 bellard
    unsigned long v;
1326 9fa3e853 bellard
    TranslationBlock *tb;
1327 a513fe19 bellard
1328 a513fe19 bellard
    if (nb_tbs <= 0)
1329 a513fe19 bellard
        return NULL;
1330 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1331 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1332 a513fe19 bellard
        return NULL;
1333 a513fe19 bellard
    /* binary search (cf Knuth) */
1334 a513fe19 bellard
    m_min = 0;
1335 a513fe19 bellard
    m_max = nb_tbs - 1;
1336 a513fe19 bellard
    while (m_min <= m_max) {
1337 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1338 a513fe19 bellard
        tb = &tbs[m];
1339 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1340 a513fe19 bellard
        if (v == tc_ptr)
1341 a513fe19 bellard
            return tb;
1342 a513fe19 bellard
        else if (tc_ptr < v) {
1343 a513fe19 bellard
            m_max = m - 1;
1344 a513fe19 bellard
        } else {
1345 a513fe19 bellard
            m_min = m + 1;
1346 a513fe19 bellard
        }
1347 5fafdf24 ths
    }
1348 a513fe19 bellard
    return &tbs[m_max];
1349 a513fe19 bellard
}
1350 7501267e bellard
1351 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1352 ea041c0e bellard
1353 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1354 ea041c0e bellard
{
1355 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1356 ea041c0e bellard
    unsigned int n1;
1357 ea041c0e bellard
1358 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1359 ea041c0e bellard
    if (tb1 != NULL) {
1360 ea041c0e bellard
        /* find head of list */
1361 ea041c0e bellard
        for(;;) {
1362 ea041c0e bellard
            n1 = (long)tb1 & 3;
1363 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1364 ea041c0e bellard
            if (n1 == 2)
1365 ea041c0e bellard
                break;
1366 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1367 ea041c0e bellard
        }
1368 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1369 ea041c0e bellard
        tb_next = tb1;
1370 ea041c0e bellard
1371 ea041c0e bellard
        /* remove tb from the jmp_first list */
1372 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1373 ea041c0e bellard
        for(;;) {
1374 ea041c0e bellard
            tb1 = *ptb;
1375 ea041c0e bellard
            n1 = (long)tb1 & 3;
1376 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1377 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1378 ea041c0e bellard
                break;
1379 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1380 ea041c0e bellard
        }
1381 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1382 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1383 3b46e624 ths
1384 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1385 ea041c0e bellard
        tb_reset_jump(tb, n);
1386 ea041c0e bellard
1387 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1388 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1389 ea041c0e bellard
    }
1390 ea041c0e bellard
}
1391 ea041c0e bellard
1392 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1393 ea041c0e bellard
{
1394 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1395 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1396 ea041c0e bellard
}
1397 ea041c0e bellard
1398 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1399 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1400 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401 94df27fd Paul Brook
{
1402 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1403 94df27fd Paul Brook
}
1404 94df27fd Paul Brook
#else
1405 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406 d720b93d bellard
{
1407 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1408 9b3c35e0 j_mayer
    target_ulong pd;
1409 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1410 c2f07f81 pbrook
    PhysPageDesc *p;
1411 d720b93d bellard
1412 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1413 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1414 c2f07f81 pbrook
    if (!p) {
1415 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1416 c2f07f81 pbrook
    } else {
1417 c2f07f81 pbrook
        pd = p->phys_offset;
1418 c2f07f81 pbrook
    }
1419 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1420 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1421 d720b93d bellard
}
1422 c27004ec bellard
#endif
1423 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1424 d720b93d bellard
1425 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1426 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427 c527ee8f Paul Brook
1428 c527ee8f Paul Brook
{
1429 c527ee8f Paul Brook
}
1430 c527ee8f Paul Brook
1431 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1433 c527ee8f Paul Brook
{
1434 c527ee8f Paul Brook
    return -ENOSYS;
1435 c527ee8f Paul Brook
}
1436 c527ee8f Paul Brook
#else
1437 6658ffb8 pbrook
/* Add a watchpoint.  */
1438 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1440 6658ffb8 pbrook
{
1441 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1442 c0ce998e aliguori
    CPUWatchpoint *wp;
1443 6658ffb8 pbrook
1444 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 b4051334 aliguori
        return -EINVAL;
1449 b4051334 aliguori
    }
1450 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1451 a1d1bb31 aliguori
1452 a1d1bb31 aliguori
    wp->vaddr = addr;
1453 b4051334 aliguori
    wp->len_mask = len_mask;
1454 a1d1bb31 aliguori
    wp->flags = flags;
1455 a1d1bb31 aliguori
1456 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1457 c0ce998e aliguori
    if (flags & BP_GDB)
1458 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1459 c0ce998e aliguori
    else
1460 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1461 6658ffb8 pbrook
1462 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1463 a1d1bb31 aliguori
1464 a1d1bb31 aliguori
    if (watchpoint)
1465 a1d1bb31 aliguori
        *watchpoint = wp;
1466 a1d1bb31 aliguori
    return 0;
1467 6658ffb8 pbrook
}
1468 6658ffb8 pbrook
1469 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1470 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 a1d1bb31 aliguori
                          int flags)
1472 6658ffb8 pbrook
{
1473 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1474 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1475 6658ffb8 pbrook
1476 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1477 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1478 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1479 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1480 6658ffb8 pbrook
            return 0;
1481 6658ffb8 pbrook
        }
1482 6658ffb8 pbrook
    }
1483 a1d1bb31 aliguori
    return -ENOENT;
1484 6658ffb8 pbrook
}
1485 6658ffb8 pbrook
1486 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1487 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488 a1d1bb31 aliguori
{
1489 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1490 7d03f82f edgar_igl
1491 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1492 a1d1bb31 aliguori
1493 a1d1bb31 aliguori
    qemu_free(watchpoint);
1494 a1d1bb31 aliguori
}
1495 a1d1bb31 aliguori
1496 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1497 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498 a1d1bb31 aliguori
{
1499 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1500 a1d1bb31 aliguori
1501 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1502 a1d1bb31 aliguori
        if (wp->flags & mask)
1503 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1504 c0ce998e aliguori
    }
1505 7d03f82f edgar_igl
}
1506 c527ee8f Paul Brook
#endif
1507 7d03f82f edgar_igl
1508 a1d1bb31 aliguori
/* Add a breakpoint.  */
1509 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1511 4c3a88a2 bellard
{
1512 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1513 c0ce998e aliguori
    CPUBreakpoint *bp;
1514 3b46e624 ths
1515 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1516 4c3a88a2 bellard
1517 a1d1bb31 aliguori
    bp->pc = pc;
1518 a1d1bb31 aliguori
    bp->flags = flags;
1519 a1d1bb31 aliguori
1520 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1521 c0ce998e aliguori
    if (flags & BP_GDB)
1522 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1523 c0ce998e aliguori
    else
1524 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1525 3b46e624 ths
1526 d720b93d bellard
    breakpoint_invalidate(env, pc);
1527 a1d1bb31 aliguori
1528 a1d1bb31 aliguori
    if (breakpoint)
1529 a1d1bb31 aliguori
        *breakpoint = bp;
1530 4c3a88a2 bellard
    return 0;
1531 4c3a88a2 bellard
#else
1532 a1d1bb31 aliguori
    return -ENOSYS;
1533 4c3a88a2 bellard
#endif
1534 4c3a88a2 bellard
}
1535 4c3a88a2 bellard
1536 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1537 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538 a1d1bb31 aliguori
{
1539 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1540 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1541 a1d1bb31 aliguori
1542 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1543 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1544 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1545 a1d1bb31 aliguori
            return 0;
1546 a1d1bb31 aliguori
        }
1547 7d03f82f edgar_igl
    }
1548 a1d1bb31 aliguori
    return -ENOENT;
1549 a1d1bb31 aliguori
#else
1550 a1d1bb31 aliguori
    return -ENOSYS;
1551 7d03f82f edgar_igl
#endif
1552 7d03f82f edgar_igl
}
1553 7d03f82f edgar_igl
1554 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1555 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1556 4c3a88a2 bellard
{
1557 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1558 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1559 d720b93d bellard
1560 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1561 a1d1bb31 aliguori
1562 a1d1bb31 aliguori
    qemu_free(breakpoint);
1563 a1d1bb31 aliguori
#endif
1564 a1d1bb31 aliguori
}
1565 a1d1bb31 aliguori
1566 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1567 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568 a1d1bb31 aliguori
{
1569 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1570 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1571 a1d1bb31 aliguori
1572 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1573 a1d1bb31 aliguori
        if (bp->flags & mask)
1574 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1575 c0ce998e aliguori
    }
1576 4c3a88a2 bellard
#endif
1577 4c3a88a2 bellard
}
1578 4c3a88a2 bellard
1579 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 c33a346e bellard
   CPU loop after each instruction */
1581 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1582 c33a346e bellard
{
1583 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1584 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1585 c33a346e bellard
        env->singlestep_enabled = enabled;
1586 e22a25c9 aliguori
        if (kvm_enabled())
1587 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1588 e22a25c9 aliguori
        else {
1589 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1590 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1591 e22a25c9 aliguori
            tb_flush(env);
1592 e22a25c9 aliguori
        }
1593 c33a346e bellard
    }
1594 c33a346e bellard
#endif
1595 c33a346e bellard
}
1596 c33a346e bellard
1597 34865134 bellard
/* enable or disable low levels log */
1598 34865134 bellard
void cpu_set_log(int log_flags)
1599 34865134 bellard
{
1600 34865134 bellard
    loglevel = log_flags;
1601 34865134 bellard
    if (loglevel && !logfile) {
1602 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1603 34865134 bellard
        if (!logfile) {
1604 34865134 bellard
            perror(logfilename);
1605 34865134 bellard
            _exit(1);
1606 34865134 bellard
        }
1607 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1608 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 9fa3e853 bellard
        {
1610 b55266b5 blueswir1
            static char logfile_buf[4096];
1611 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 9fa3e853 bellard
        }
1613 bf65f53f Filip Navara
#elif !defined(_WIN32)
1614 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1615 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1616 9fa3e853 bellard
#endif
1617 e735b91c pbrook
        log_append = 1;
1618 e735b91c pbrook
    }
1619 e735b91c pbrook
    if (!loglevel && logfile) {
1620 e735b91c pbrook
        fclose(logfile);
1621 e735b91c pbrook
        logfile = NULL;
1622 34865134 bellard
    }
1623 34865134 bellard
}
1624 34865134 bellard
1625 34865134 bellard
void cpu_set_log_filename(const char *filename)
1626 34865134 bellard
{
1627 34865134 bellard
    logfilename = strdup(filename);
1628 e735b91c pbrook
    if (logfile) {
1629 e735b91c pbrook
        fclose(logfile);
1630 e735b91c pbrook
        logfile = NULL;
1631 e735b91c pbrook
    }
1632 e735b91c pbrook
    cpu_set_log(loglevel);
1633 34865134 bellard
}
1634 c33a346e bellard
1635 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1636 ea041c0e bellard
{
1637 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1638 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1639 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1640 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1641 ea041c0e bellard
    TranslationBlock *tb;
1642 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1643 59817ccb bellard
1644 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1645 3098dba0 aurel32
    tb = env->current_tb;
1646 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1647 3098dba0 aurel32
       all the potentially executing TB */
1648 f76cfe56 Riku Voipio
    if (tb) {
1649 3098dba0 aurel32
        env->current_tb = NULL;
1650 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1651 be214e6c aurel32
    }
1652 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1653 3098dba0 aurel32
}
1654 3098dba0 aurel32
1655 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1656 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1657 3098dba0 aurel32
{
1658 3098dba0 aurel32
    int old_mask;
1659 be214e6c aurel32
1660 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1661 68a79315 bellard
    env->interrupt_request |= mask;
1662 3098dba0 aurel32
1663 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1664 8edac960 aliguori
    /*
1665 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1666 8edac960 aliguori
     * case its halted.
1667 8edac960 aliguori
     */
1668 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1669 8edac960 aliguori
        qemu_cpu_kick(env);
1670 8edac960 aliguori
        return;
1671 8edac960 aliguori
    }
1672 8edac960 aliguori
#endif
1673 8edac960 aliguori
1674 2e70f6ef pbrook
    if (use_icount) {
1675 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1676 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1677 2e70f6ef pbrook
        if (!can_do_io(env)
1678 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1679 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1680 2e70f6ef pbrook
        }
1681 2e70f6ef pbrook
#endif
1682 2e70f6ef pbrook
    } else {
1683 3098dba0 aurel32
        cpu_unlink_tb(env);
1684 ea041c0e bellard
    }
1685 ea041c0e bellard
}
1686 ea041c0e bellard
1687 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1688 b54ad049 bellard
{
1689 b54ad049 bellard
    env->interrupt_request &= ~mask;
1690 b54ad049 bellard
}
1691 b54ad049 bellard
1692 3098dba0 aurel32
void cpu_exit(CPUState *env)
1693 3098dba0 aurel32
{
1694 3098dba0 aurel32
    env->exit_request = 1;
1695 3098dba0 aurel32
    cpu_unlink_tb(env);
1696 3098dba0 aurel32
}
1697 3098dba0 aurel32
1698 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1699 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1700 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1701 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1702 f193c797 bellard
      "show target assembly code for each compiled TB" },
1703 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1704 57fec1fe bellard
      "show micro ops for each compiled TB" },
1705 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1706 e01a1157 blueswir1
      "show micro ops "
1707 e01a1157 blueswir1
#ifdef TARGET_I386
1708 e01a1157 blueswir1
      "before eflags optimization and "
1709 f193c797 bellard
#endif
1710 e01a1157 blueswir1
      "after liveness analysis" },
1711 f193c797 bellard
    { CPU_LOG_INT, "int",
1712 f193c797 bellard
      "show interrupts/exceptions in short format" },
1713 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1714 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1715 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1716 e91c8a77 ths
      "show CPU state before block translation" },
1717 f193c797 bellard
#ifdef TARGET_I386
1718 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1719 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1720 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1721 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1722 f193c797 bellard
#endif
1723 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1724 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1725 fd872598 bellard
      "show all i/o ports accesses" },
1726 8e3a9fd2 bellard
#endif
1727 f193c797 bellard
    { 0, NULL, NULL },
1728 f193c797 bellard
};
1729 f193c797 bellard
1730 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1731 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1732 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1733 f6f3fbca Michael S. Tsirkin
1734 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1735 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1736 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1737 f6f3fbca Michael S. Tsirkin
{
1738 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1739 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1740 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1741 f6f3fbca Michael S. Tsirkin
    }
1742 f6f3fbca Michael S. Tsirkin
}
1743 f6f3fbca Michael S. Tsirkin
1744 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1745 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1746 f6f3fbca Michael S. Tsirkin
{
1747 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1748 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1749 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1750 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1751 f6f3fbca Michael S. Tsirkin
            return r;
1752 f6f3fbca Michael S. Tsirkin
    }
1753 f6f3fbca Michael S. Tsirkin
    return 0;
1754 f6f3fbca Michael S. Tsirkin
}
1755 f6f3fbca Michael S. Tsirkin
1756 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1757 f6f3fbca Michael S. Tsirkin
{
1758 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1759 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1760 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1761 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1762 f6f3fbca Michael S. Tsirkin
            return r;
1763 f6f3fbca Michael S. Tsirkin
    }
1764 f6f3fbca Michael S. Tsirkin
    return 0;
1765 f6f3fbca Michael S. Tsirkin
}
1766 f6f3fbca Michael S. Tsirkin
1767 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1768 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1769 f6f3fbca Michael S. Tsirkin
{
1770 5cd2c5b6 Richard Henderson
    int i;
1771 f6f3fbca Michael S. Tsirkin
1772 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1773 5cd2c5b6 Richard Henderson
        return;
1774 5cd2c5b6 Richard Henderson
    }
1775 5cd2c5b6 Richard Henderson
    if (level == 0) {
1776 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1777 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1778 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1779 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1780 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1781 f6f3fbca Michael S. Tsirkin
            }
1782 5cd2c5b6 Richard Henderson
        }
1783 5cd2c5b6 Richard Henderson
    } else {
1784 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1785 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1786 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1787 f6f3fbca Michael S. Tsirkin
        }
1788 f6f3fbca Michael S. Tsirkin
    }
1789 f6f3fbca Michael S. Tsirkin
}
1790 f6f3fbca Michael S. Tsirkin
1791 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1792 f6f3fbca Michael S. Tsirkin
{
1793 5cd2c5b6 Richard Henderson
    int i;
1794 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1795 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1796 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1797 f6f3fbca Michael S. Tsirkin
    }
1798 f6f3fbca Michael S. Tsirkin
}
1799 f6f3fbca Michael S. Tsirkin
1800 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1801 f6f3fbca Michael S. Tsirkin
{
1802 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1803 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1804 f6f3fbca Michael S. Tsirkin
}
1805 f6f3fbca Michael S. Tsirkin
1806 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1807 f6f3fbca Michael S. Tsirkin
{
1808 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1809 f6f3fbca Michael S. Tsirkin
}
1810 f6f3fbca Michael S. Tsirkin
#endif
1811 f6f3fbca Michael S. Tsirkin
1812 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1813 f193c797 bellard
{
1814 f193c797 bellard
    if (strlen(s2) != n)
1815 f193c797 bellard
        return 0;
1816 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1817 f193c797 bellard
}
1818 3b46e624 ths
1819 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1820 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1821 f193c797 bellard
{
1822 c7cd6a37 blueswir1
    const CPULogItem *item;
1823 f193c797 bellard
    int mask;
1824 f193c797 bellard
    const char *p, *p1;
1825 f193c797 bellard
1826 f193c797 bellard
    p = str;
1827 f193c797 bellard
    mask = 0;
1828 f193c797 bellard
    for(;;) {
1829 f193c797 bellard
        p1 = strchr(p, ',');
1830 f193c797 bellard
        if (!p1)
1831 f193c797 bellard
            p1 = p + strlen(p);
1832 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1833 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1834 8e3a9fd2 bellard
                        mask |= item->mask;
1835 8e3a9fd2 bellard
                }
1836 8e3a9fd2 bellard
        } else {
1837 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1838 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1839 f193c797 bellard
                goto found;
1840 f193c797 bellard
        }
1841 f193c797 bellard
        return 0;
1842 8e3a9fd2 bellard
        }
1843 f193c797 bellard
    found:
1844 f193c797 bellard
        mask |= item->mask;
1845 f193c797 bellard
        if (*p1 != ',')
1846 f193c797 bellard
            break;
1847 f193c797 bellard
        p = p1 + 1;
1848 f193c797 bellard
    }
1849 f193c797 bellard
    return mask;
1850 f193c797 bellard
}
1851 ea041c0e bellard
1852 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1853 7501267e bellard
{
1854 7501267e bellard
    va_list ap;
1855 493ae1f0 pbrook
    va_list ap2;
1856 7501267e bellard
1857 7501267e bellard
    va_start(ap, fmt);
1858 493ae1f0 pbrook
    va_copy(ap2, ap);
1859 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1860 7501267e bellard
    vfprintf(stderr, fmt, ap);
1861 7501267e bellard
    fprintf(stderr, "\n");
1862 7501267e bellard
#ifdef TARGET_I386
1863 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1864 7fe48483 bellard
#else
1865 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1866 7501267e bellard
#endif
1867 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1868 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1869 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1870 93fcfe39 aliguori
        qemu_log("\n");
1871 f9373291 j_mayer
#ifdef TARGET_I386
1872 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1873 f9373291 j_mayer
#else
1874 93fcfe39 aliguori
        log_cpu_state(env, 0);
1875 f9373291 j_mayer
#endif
1876 31b1a7b4 aliguori
        qemu_log_flush();
1877 93fcfe39 aliguori
        qemu_log_close();
1878 924edcae balrog
    }
1879 493ae1f0 pbrook
    va_end(ap2);
1880 f9373291 j_mayer
    va_end(ap);
1881 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1882 fd052bf6 Riku Voipio
    {
1883 fd052bf6 Riku Voipio
        struct sigaction act;
1884 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1885 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1886 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1887 fd052bf6 Riku Voipio
    }
1888 fd052bf6 Riku Voipio
#endif
1889 7501267e bellard
    abort();
1890 7501267e bellard
}
1891 7501267e bellard
1892 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1893 c5be9f08 ths
{
1894 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1895 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1896 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1897 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1898 5a38f081 aliguori
    CPUBreakpoint *bp;
1899 5a38f081 aliguori
    CPUWatchpoint *wp;
1900 5a38f081 aliguori
#endif
1901 5a38f081 aliguori
1902 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1903 5a38f081 aliguori
1904 5a38f081 aliguori
    /* Preserve chaining and index. */
1905 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1906 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1907 5a38f081 aliguori
1908 5a38f081 aliguori
    /* Clone all break/watchpoints.
1909 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1910 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1911 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1912 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1913 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1914 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1915 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1916 5a38f081 aliguori
    }
1917 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1918 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1919 5a38f081 aliguori
                              wp->flags, NULL);
1920 5a38f081 aliguori
    }
1921 5a38f081 aliguori
#endif
1922 5a38f081 aliguori
1923 c5be9f08 ths
    return new_env;
1924 c5be9f08 ths
}
1925 c5be9f08 ths
1926 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1927 0124311e bellard
1928 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1929 5c751e99 edgar_igl
{
1930 5c751e99 edgar_igl
    unsigned int i;
1931 5c751e99 edgar_igl
1932 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1933 5c751e99 edgar_igl
       overlap the flushed page.  */
1934 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1935 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1936 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1937 5c751e99 edgar_igl
1938 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1939 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1940 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1941 5c751e99 edgar_igl
}
1942 5c751e99 edgar_igl
1943 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1944 08738984 Igor Kovalenko
    .addr_read  = -1,
1945 08738984 Igor Kovalenko
    .addr_write = -1,
1946 08738984 Igor Kovalenko
    .addr_code  = -1,
1947 08738984 Igor Kovalenko
    .addend     = -1,
1948 08738984 Igor Kovalenko
};
1949 08738984 Igor Kovalenko
1950 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1951 ee8b7021 bellard
   implemented yet) */
1952 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1953 33417e70 bellard
{
1954 33417e70 bellard
    int i;
1955 0124311e bellard
1956 9fa3e853 bellard
#if defined(DEBUG_TLB)
1957 9fa3e853 bellard
    printf("tlb_flush:\n");
1958 9fa3e853 bellard
#endif
1959 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1960 0124311e bellard
       links while we are modifying them */
1961 0124311e bellard
    env->current_tb = NULL;
1962 0124311e bellard
1963 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1964 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1965 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1966 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1967 cfde4bd9 Isaku Yamahata
        }
1968 33417e70 bellard
    }
1969 9fa3e853 bellard
1970 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1971 9fa3e853 bellard
1972 d4c430a8 Paul Brook
    env->tlb_flush_addr = -1;
1973 d4c430a8 Paul Brook
    env->tlb_flush_mask = 0;
1974 e3db7226 bellard
    tlb_flush_count++;
1975 33417e70 bellard
}
1976 33417e70 bellard
1977 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1978 61382a50 bellard
{
1979 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1980 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1981 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1982 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1983 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1984 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1985 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1986 84b7b8e7 bellard
    }
1987 61382a50 bellard
}
1988 61382a50 bellard
1989 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1990 33417e70 bellard
{
1991 8a40a180 bellard
    int i;
1992 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1993 0124311e bellard
1994 9fa3e853 bellard
#if defined(DEBUG_TLB)
1995 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1996 9fa3e853 bellard
#endif
1997 d4c430a8 Paul Brook
    /* Check if we need to flush due to large pages.  */
1998 d4c430a8 Paul Brook
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1999 d4c430a8 Paul Brook
#if defined(DEBUG_TLB)
2000 d4c430a8 Paul Brook
        printf("tlb_flush_page: forced full flush ("
2001 d4c430a8 Paul Brook
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2002 d4c430a8 Paul Brook
               env->tlb_flush_addr, env->tlb_flush_mask);
2003 d4c430a8 Paul Brook
#endif
2004 d4c430a8 Paul Brook
        tlb_flush(env, 1);
2005 d4c430a8 Paul Brook
        return;
2006 d4c430a8 Paul Brook
    }
2007 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
2008 0124311e bellard
       links while we are modifying them */
2009 0124311e bellard
    env->current_tb = NULL;
2010 61382a50 bellard
2011 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
2012 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2013 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2014 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2015 0124311e bellard
2016 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
2017 9fa3e853 bellard
}
2018 9fa3e853 bellard
2019 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
2020 9fa3e853 bellard
   can be detected */
2021 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
2022 9fa3e853 bellard
{
2023 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
2024 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
2025 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
2026 9fa3e853 bellard
}
2027 9fa3e853 bellard
2028 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2029 3a7d929e bellard
   tested for self modifying code */
2030 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2031 3a7d929e bellard
                                    target_ulong vaddr)
2032 9fa3e853 bellard
{
2033 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2034 1ccde1cb bellard
}
2035 1ccde1cb bellard
2036 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2037 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
2038 1ccde1cb bellard
{
2039 1ccde1cb bellard
    unsigned long addr;
2040 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2041 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2042 1ccde1cb bellard
        if ((addr - start) < length) {
2043 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2044 1ccde1cb bellard
        }
2045 1ccde1cb bellard
    }
2046 1ccde1cb bellard
}
2047 1ccde1cb bellard
2048 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
2049 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2050 0a962c02 bellard
                                     int dirty_flags)
2051 1ccde1cb bellard
{
2052 1ccde1cb bellard
    CPUState *env;
2053 4f2ac237 bellard
    unsigned long length, start1;
2054 f7c11b53 Yoshiaki Tamura
    int i;
2055 1ccde1cb bellard
2056 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
2057 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
2058 1ccde1cb bellard
2059 1ccde1cb bellard
    length = end - start;
2060 1ccde1cb bellard
    if (length == 0)
2061 1ccde1cb bellard
        return;
2062 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2063 f23db169 bellard
2064 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2065 1ccde1cb bellard
       when accessing the range */
2066 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2067 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2068 5579c7f3 pbrook
       address comparisons below.  */
2069 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2070 5579c7f3 pbrook
            != (end - 1) - start) {
2071 5579c7f3 pbrook
        abort();
2072 5579c7f3 pbrook
    }
2073 5579c7f3 pbrook
2074 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2075 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2076 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2077 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2078 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2079 cfde4bd9 Isaku Yamahata
                                      start1, length);
2080 cfde4bd9 Isaku Yamahata
        }
2081 6a00d601 bellard
    }
2082 1ccde1cb bellard
}
2083 1ccde1cb bellard
2084 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2085 74576198 aliguori
{
2086 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2087 74576198 aliguori
    in_migration = enable;
2088 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2089 f6f3fbca Michael S. Tsirkin
    return ret;
2090 74576198 aliguori
}
2091 74576198 aliguori
2092 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2093 74576198 aliguori
{
2094 74576198 aliguori
    return in_migration;
2095 74576198 aliguori
}
2096 74576198 aliguori
2097 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2098 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2099 2bec46dc aliguori
{
2100 7b8f3b78 Michael S. Tsirkin
    int ret;
2101 151f7749 Jan Kiszka
2102 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2103 151f7749 Jan Kiszka
    return ret;
2104 2bec46dc aliguori
}
2105 2bec46dc aliguori
2106 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2107 3a7d929e bellard
{
2108 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2109 5579c7f3 pbrook
    void *p;
2110 3a7d929e bellard
2111 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2112 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2113 5579c7f3 pbrook
            + tlb_entry->addend);
2114 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2115 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2116 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2117 3a7d929e bellard
        }
2118 3a7d929e bellard
    }
2119 3a7d929e bellard
}
2120 3a7d929e bellard
2121 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2122 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2123 3a7d929e bellard
{
2124 3a7d929e bellard
    int i;
2125 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2126 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2127 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2128 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2129 cfde4bd9 Isaku Yamahata
    }
2130 3a7d929e bellard
}
2131 3a7d929e bellard
2132 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2133 1ccde1cb bellard
{
2134 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2135 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2136 1ccde1cb bellard
}
2137 1ccde1cb bellard
2138 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2139 0f459d16 pbrook
   so that it is no longer dirty */
2140 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2141 1ccde1cb bellard
{
2142 1ccde1cb bellard
    int i;
2143 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2144 1ccde1cb bellard
2145 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2146 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2147 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2148 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2149 9fa3e853 bellard
}
2150 9fa3e853 bellard
2151 d4c430a8 Paul Brook
/* Our TLB does not support large pages, so remember the area covered by
2152 d4c430a8 Paul Brook
   large pages and trigger a full TLB flush if these are invalidated.  */
2153 d4c430a8 Paul Brook
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2154 d4c430a8 Paul Brook
                               target_ulong size)
2155 d4c430a8 Paul Brook
{
2156 d4c430a8 Paul Brook
    target_ulong mask = ~(size - 1);
2157 d4c430a8 Paul Brook
2158 d4c430a8 Paul Brook
    if (env->tlb_flush_addr == (target_ulong)-1) {
2159 d4c430a8 Paul Brook
        env->tlb_flush_addr = vaddr & mask;
2160 d4c430a8 Paul Brook
        env->tlb_flush_mask = mask;
2161 d4c430a8 Paul Brook
        return;
2162 d4c430a8 Paul Brook
    }
2163 d4c430a8 Paul Brook
    /* Extend the existing region to include the new page.
2164 d4c430a8 Paul Brook
       This is a compromise between unnecessary flushes and the cost
2165 d4c430a8 Paul Brook
       of maintaining a full variable size TLB.  */
2166 d4c430a8 Paul Brook
    mask &= env->tlb_flush_mask;
2167 d4c430a8 Paul Brook
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2168 d4c430a8 Paul Brook
        mask <<= 1;
2169 d4c430a8 Paul Brook
    }
2170 d4c430a8 Paul Brook
    env->tlb_flush_addr &= mask;
2171 d4c430a8 Paul Brook
    env->tlb_flush_mask = mask;
2172 d4c430a8 Paul Brook
}
2173 d4c430a8 Paul Brook
2174 d4c430a8 Paul Brook
/* Add a new TLB entry. At most one entry for a given virtual address
2175 d4c430a8 Paul Brook
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2176 d4c430a8 Paul Brook
   supplied size is only used by tlb_flush_page.  */
2177 d4c430a8 Paul Brook
void tlb_set_page(CPUState *env, target_ulong vaddr,
2178 d4c430a8 Paul Brook
                  target_phys_addr_t paddr, int prot,
2179 d4c430a8 Paul Brook
                  int mmu_idx, target_ulong size)
2180 9fa3e853 bellard
{
2181 92e873b9 bellard
    PhysPageDesc *p;
2182 4f2ac237 bellard
    unsigned long pd;
2183 9fa3e853 bellard
    unsigned int index;
2184 4f2ac237 bellard
    target_ulong address;
2185 0f459d16 pbrook
    target_ulong code_address;
2186 355b1943 Paul Brook
    unsigned long addend;
2187 84b7b8e7 bellard
    CPUTLBEntry *te;
2188 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2189 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2190 9fa3e853 bellard
2191 d4c430a8 Paul Brook
    assert(size >= TARGET_PAGE_SIZE);
2192 d4c430a8 Paul Brook
    if (size != TARGET_PAGE_SIZE) {
2193 d4c430a8 Paul Brook
        tlb_add_large_page(env, vaddr, size);
2194 d4c430a8 Paul Brook
    }
2195 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2196 9fa3e853 bellard
    if (!p) {
2197 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2198 9fa3e853 bellard
    } else {
2199 9fa3e853 bellard
        pd = p->phys_offset;
2200 9fa3e853 bellard
    }
2201 9fa3e853 bellard
#if defined(DEBUG_TLB)
2202 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2203 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2204 9fa3e853 bellard
#endif
2205 9fa3e853 bellard
2206 0f459d16 pbrook
    address = vaddr;
2207 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2208 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2209 0f459d16 pbrook
        address |= TLB_MMIO;
2210 0f459d16 pbrook
    }
2211 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2212 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2213 0f459d16 pbrook
        /* Normal RAM.  */
2214 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2215 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2216 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2217 0f459d16 pbrook
        else
2218 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2219 0f459d16 pbrook
    } else {
2220 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2221 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2222 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2223 0f459d16 pbrook
           and avoid full address decoding in every device.
2224 0f459d16 pbrook
           We can't use the high bits of pd for this because
2225 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2226 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2227 8da3ff18 pbrook
        if (p) {
2228 8da3ff18 pbrook
            iotlb += p->region_offset;
2229 8da3ff18 pbrook
        } else {
2230 8da3ff18 pbrook
            iotlb += paddr;
2231 8da3ff18 pbrook
        }
2232 0f459d16 pbrook
    }
2233 0f459d16 pbrook
2234 0f459d16 pbrook
    code_address = address;
2235 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2236 0f459d16 pbrook
       watchpoint trap routines.  */
2237 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2238 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2239 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2240 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2241 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2242 0f459d16 pbrook
            address |= TLB_MMIO;
2243 6658ffb8 pbrook
        }
2244 0f459d16 pbrook
    }
2245 d79acba4 balrog
2246 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2247 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2248 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2249 0f459d16 pbrook
    te->addend = addend - vaddr;
2250 0f459d16 pbrook
    if (prot & PAGE_READ) {
2251 0f459d16 pbrook
        te->addr_read = address;
2252 0f459d16 pbrook
    } else {
2253 0f459d16 pbrook
        te->addr_read = -1;
2254 0f459d16 pbrook
    }
2255 5c751e99 edgar_igl
2256 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2257 0f459d16 pbrook
        te->addr_code = code_address;
2258 0f459d16 pbrook
    } else {
2259 0f459d16 pbrook
        te->addr_code = -1;
2260 0f459d16 pbrook
    }
2261 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2262 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2263 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2264 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2265 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2266 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2267 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2268 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2269 9fa3e853 bellard
        } else {
2270 0f459d16 pbrook
            te->addr_write = address;
2271 9fa3e853 bellard
        }
2272 0f459d16 pbrook
    } else {
2273 0f459d16 pbrook
        te->addr_write = -1;
2274 9fa3e853 bellard
    }
2275 9fa3e853 bellard
}
2276 9fa3e853 bellard
2277 0124311e bellard
#else
2278 0124311e bellard
2279 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2280 0124311e bellard
{
2281 0124311e bellard
}
2282 0124311e bellard
2283 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2284 0124311e bellard
{
2285 0124311e bellard
}
2286 0124311e bellard
2287 edf8e2af Mika Westerberg
/*
2288 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2289 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2290 edf8e2af Mika Westerberg
 */
2291 5cd2c5b6 Richard Henderson
2292 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2293 5cd2c5b6 Richard Henderson
{
2294 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2295 5cd2c5b6 Richard Henderson
    void *priv;
2296 5cd2c5b6 Richard Henderson
    unsigned long start;
2297 5cd2c5b6 Richard Henderson
    int prot;
2298 5cd2c5b6 Richard Henderson
};
2299 5cd2c5b6 Richard Henderson
2300 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2301 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2302 5cd2c5b6 Richard Henderson
{
2303 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2304 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2305 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2306 5cd2c5b6 Richard Henderson
            return rc;
2307 5cd2c5b6 Richard Henderson
        }
2308 5cd2c5b6 Richard Henderson
    }
2309 5cd2c5b6 Richard Henderson
2310 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2311 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2312 5cd2c5b6 Richard Henderson
2313 5cd2c5b6 Richard Henderson
    return 0;
2314 5cd2c5b6 Richard Henderson
}
2315 5cd2c5b6 Richard Henderson
2316 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2317 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2318 5cd2c5b6 Richard Henderson
{
2319 b480d9b7 Paul Brook
    abi_ulong pa;
2320 5cd2c5b6 Richard Henderson
    int i, rc;
2321 5cd2c5b6 Richard Henderson
2322 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2323 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2324 5cd2c5b6 Richard Henderson
    }
2325 5cd2c5b6 Richard Henderson
2326 5cd2c5b6 Richard Henderson
    if (level == 0) {
2327 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2328 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2329 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2330 5cd2c5b6 Richard Henderson
2331 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2332 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2333 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2334 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2335 5cd2c5b6 Richard Henderson
                    return rc;
2336 9fa3e853 bellard
                }
2337 9fa3e853 bellard
            }
2338 5cd2c5b6 Richard Henderson
        }
2339 5cd2c5b6 Richard Henderson
    } else {
2340 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2341 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2342 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2343 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2344 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2345 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2346 5cd2c5b6 Richard Henderson
                return rc;
2347 5cd2c5b6 Richard Henderson
            }
2348 5cd2c5b6 Richard Henderson
        }
2349 5cd2c5b6 Richard Henderson
    }
2350 5cd2c5b6 Richard Henderson
2351 5cd2c5b6 Richard Henderson
    return 0;
2352 5cd2c5b6 Richard Henderson
}
2353 5cd2c5b6 Richard Henderson
2354 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2355 5cd2c5b6 Richard Henderson
{
2356 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2357 5cd2c5b6 Richard Henderson
    unsigned long i;
2358 5cd2c5b6 Richard Henderson
2359 5cd2c5b6 Richard Henderson
    data.fn = fn;
2360 5cd2c5b6 Richard Henderson
    data.priv = priv;
2361 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2362 5cd2c5b6 Richard Henderson
    data.prot = 0;
2363 5cd2c5b6 Richard Henderson
2364 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2365 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2366 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2367 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2368 5cd2c5b6 Richard Henderson
            return rc;
2369 9fa3e853 bellard
        }
2370 33417e70 bellard
    }
2371 5cd2c5b6 Richard Henderson
2372 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2373 edf8e2af Mika Westerberg
}
2374 edf8e2af Mika Westerberg
2375 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2376 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2377 edf8e2af Mika Westerberg
{
2378 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2379 edf8e2af Mika Westerberg
2380 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2381 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2382 edf8e2af Mika Westerberg
        start, end, end - start,
2383 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2384 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2385 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2386 edf8e2af Mika Westerberg
2387 edf8e2af Mika Westerberg
    return (0);
2388 edf8e2af Mika Westerberg
}
2389 edf8e2af Mika Westerberg
2390 edf8e2af Mika Westerberg
/* dump memory mappings */
2391 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2392 edf8e2af Mika Westerberg
{
2393 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2394 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2395 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2396 33417e70 bellard
}
2397 33417e70 bellard
2398 53a5960a pbrook
int page_get_flags(target_ulong address)
2399 33417e70 bellard
{
2400 9fa3e853 bellard
    PageDesc *p;
2401 9fa3e853 bellard
2402 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2403 33417e70 bellard
    if (!p)
2404 9fa3e853 bellard
        return 0;
2405 9fa3e853 bellard
    return p->flags;
2406 9fa3e853 bellard
}
2407 9fa3e853 bellard
2408 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2409 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2410 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2411 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2412 9fa3e853 bellard
{
2413 376a7909 Richard Henderson
    target_ulong addr, len;
2414 376a7909 Richard Henderson
2415 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2416 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2417 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2418 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2419 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2420 376a7909 Richard Henderson
#endif
2421 376a7909 Richard Henderson
    assert(start < end);
2422 9fa3e853 bellard
2423 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2424 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2425 376a7909 Richard Henderson
2426 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2427 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2428 376a7909 Richard Henderson
    }
2429 376a7909 Richard Henderson
2430 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2431 376a7909 Richard Henderson
         len != 0;
2432 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2433 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2434 376a7909 Richard Henderson
2435 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2436 376a7909 Richard Henderson
           the code inside.  */
2437 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2438 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2439 9fa3e853 bellard
            p->first_tb) {
2440 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2441 9fa3e853 bellard
        }
2442 9fa3e853 bellard
        p->flags = flags;
2443 9fa3e853 bellard
    }
2444 33417e70 bellard
}
2445 33417e70 bellard
2446 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2447 3d97b40b ths
{
2448 3d97b40b ths
    PageDesc *p;
2449 3d97b40b ths
    target_ulong end;
2450 3d97b40b ths
    target_ulong addr;
2451 3d97b40b ths
2452 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2453 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2454 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2455 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2456 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2457 376a7909 Richard Henderson
#endif
2458 376a7909 Richard Henderson
2459 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2460 376a7909 Richard Henderson
        /* We've wrapped around.  */
2461 55f280c9 balrog
        return -1;
2462 376a7909 Richard Henderson
    }
2463 55f280c9 balrog
2464 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2465 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2466 3d97b40b ths
2467 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2468 376a7909 Richard Henderson
         len != 0;
2469 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2470 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2471 3d97b40b ths
        if( !p )
2472 3d97b40b ths
            return -1;
2473 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2474 3d97b40b ths
            return -1;
2475 3d97b40b ths
2476 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2477 3d97b40b ths
            return -1;
2478 dae3270c bellard
        if (flags & PAGE_WRITE) {
2479 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2480 dae3270c bellard
                return -1;
2481 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2482 dae3270c bellard
               contains translated code */
2483 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2484 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2485 dae3270c bellard
                    return -1;
2486 dae3270c bellard
            }
2487 dae3270c bellard
            return 0;
2488 dae3270c bellard
        }
2489 3d97b40b ths
    }
2490 3d97b40b ths
    return 0;
2491 3d97b40b ths
}
2492 3d97b40b ths
2493 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2494 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2495 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2496 9fa3e853 bellard
{
2497 45d679d6 Aurelien Jarno
    unsigned int prot;
2498 45d679d6 Aurelien Jarno
    PageDesc *p;
2499 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2500 9fa3e853 bellard
2501 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2502 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2503 c8a706fe pbrook
       practice it seems to be ok.  */
2504 c8a706fe pbrook
    mmap_lock();
2505 c8a706fe pbrook
2506 45d679d6 Aurelien Jarno
    p = page_find(address >> TARGET_PAGE_BITS);
2507 45d679d6 Aurelien Jarno
    if (!p) {
2508 c8a706fe pbrook
        mmap_unlock();
2509 9fa3e853 bellard
        return 0;
2510 c8a706fe pbrook
    }
2511 45d679d6 Aurelien Jarno
2512 9fa3e853 bellard
    /* if the page was really writable, then we change its
2513 9fa3e853 bellard
       protection back to writable */
2514 45d679d6 Aurelien Jarno
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2515 45d679d6 Aurelien Jarno
        host_start = address & qemu_host_page_mask;
2516 45d679d6 Aurelien Jarno
        host_end = host_start + qemu_host_page_size;
2517 45d679d6 Aurelien Jarno
2518 45d679d6 Aurelien Jarno
        prot = 0;
2519 45d679d6 Aurelien Jarno
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2520 45d679d6 Aurelien Jarno
            p = page_find(addr >> TARGET_PAGE_BITS);
2521 45d679d6 Aurelien Jarno
            p->flags |= PAGE_WRITE;
2522 45d679d6 Aurelien Jarno
            prot |= p->flags;
2523 45d679d6 Aurelien Jarno
2524 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2525 9fa3e853 bellard
               the corresponding translated code. */
2526 45d679d6 Aurelien Jarno
            tb_invalidate_phys_page(addr, pc, puc);
2527 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2528 45d679d6 Aurelien Jarno
            tb_invalidate_check(addr);
2529 9fa3e853 bellard
#endif
2530 9fa3e853 bellard
        }
2531 45d679d6 Aurelien Jarno
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2532 45d679d6 Aurelien Jarno
                 prot & PAGE_BITS);
2533 45d679d6 Aurelien Jarno
2534 45d679d6 Aurelien Jarno
        mmap_unlock();
2535 45d679d6 Aurelien Jarno
        return 1;
2536 9fa3e853 bellard
    }
2537 c8a706fe pbrook
    mmap_unlock();
2538 9fa3e853 bellard
    return 0;
2539 9fa3e853 bellard
}
2540 9fa3e853 bellard
2541 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2542 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2543 1ccde1cb bellard
{
2544 1ccde1cb bellard
}
2545 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2546 9fa3e853 bellard
2547 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2548 8da3ff18 pbrook
2549 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2550 c04b2b78 Paul Brook
typedef struct subpage_t {
2551 c04b2b78 Paul Brook
    target_phys_addr_t base;
2552 c04b2b78 Paul Brook
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2553 c04b2b78 Paul Brook
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2554 c04b2b78 Paul Brook
    void *opaque[TARGET_PAGE_SIZE][2][4];
2555 c04b2b78 Paul Brook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2556 c04b2b78 Paul Brook
} subpage_t;
2557 c04b2b78 Paul Brook
2558 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2559 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2560 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2561 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2562 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2563 db7b5426 blueswir1
                      need_subpage)                                     \
2564 db7b5426 blueswir1
    do {                                                                \
2565 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2566 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2567 db7b5426 blueswir1
        else {                                                          \
2568 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2569 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2570 db7b5426 blueswir1
                need_subpage = 1;                                       \
2571 db7b5426 blueswir1
        }                                                               \
2572 db7b5426 blueswir1
                                                                        \
2573 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2574 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2575 db7b5426 blueswir1
        else {                                                          \
2576 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2577 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2578 db7b5426 blueswir1
                need_subpage = 1;                                       \
2579 db7b5426 blueswir1
        }                                                               \
2580 db7b5426 blueswir1
    } while (0)
2581 db7b5426 blueswir1
2582 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2583 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2584 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2585 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2586 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2587 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2588 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2589 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2590 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2591 c227f099 Anthony Liguori
                                         ram_addr_t size,
2592 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2593 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2594 33417e70 bellard
{
2595 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2596 92e873b9 bellard
    PhysPageDesc *p;
2597 9d42037b bellard
    CPUState *env;
2598 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2599 db7b5426 blueswir1
    void *subpage;
2600 33417e70 bellard
2601 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2602 f6f3fbca Michael S. Tsirkin
2603 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2604 67c4d23c pbrook
        region_offset = start_addr;
2605 67c4d23c pbrook
    }
2606 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2607 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2608 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2609 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2610 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2611 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2612 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2613 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2614 db7b5426 blueswir1
            int need_subpage = 0;
2615 db7b5426 blueswir1
2616 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2617 db7b5426 blueswir1
                          need_subpage);
2618 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2619 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2620 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2621 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2622 8da3ff18 pbrook
                                           p->region_offset);
2623 db7b5426 blueswir1
                } else {
2624 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2625 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2626 db7b5426 blueswir1
                }
2627 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2628 8da3ff18 pbrook
                                 region_offset);
2629 8da3ff18 pbrook
                p->region_offset = 0;
2630 db7b5426 blueswir1
            } else {
2631 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2632 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2633 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2634 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2635 db7b5426 blueswir1
            }
2636 db7b5426 blueswir1
        } else {
2637 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2638 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2639 8da3ff18 pbrook
            p->region_offset = region_offset;
2640 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2641 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2642 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2643 0e8f0967 pbrook
            } else {
2644 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2645 db7b5426 blueswir1
                int need_subpage = 0;
2646 db7b5426 blueswir1
2647 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2648 db7b5426 blueswir1
                              end_addr2, need_subpage);
2649 db7b5426 blueswir1
2650 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2651 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2652 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2653 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2654 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2655 8da3ff18 pbrook
                                     phys_offset, region_offset);
2656 8da3ff18 pbrook
                    p->region_offset = 0;
2657 db7b5426 blueswir1
                }
2658 db7b5426 blueswir1
            }
2659 db7b5426 blueswir1
        }
2660 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2661 33417e70 bellard
    }
2662 3b46e624 ths
2663 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2664 9d42037b bellard
       reset the modified entries */
2665 9d42037b bellard
    /* XXX: slow ! */
2666 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2667 9d42037b bellard
        tlb_flush(env, 1);
2668 9d42037b bellard
    }
2669 33417e70 bellard
}
2670 33417e70 bellard
2671 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2672 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2673 ba863458 bellard
{
2674 ba863458 bellard
    PhysPageDesc *p;
2675 ba863458 bellard
2676 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677 ba863458 bellard
    if (!p)
2678 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2679 ba863458 bellard
    return p->phys_offset;
2680 ba863458 bellard
}
2681 ba863458 bellard
2682 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2683 f65ed4c1 aliguori
{
2684 f65ed4c1 aliguori
    if (kvm_enabled())
2685 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2686 f65ed4c1 aliguori
}
2687 f65ed4c1 aliguori
2688 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2689 f65ed4c1 aliguori
{
2690 f65ed4c1 aliguori
    if (kvm_enabled())
2691 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2692 f65ed4c1 aliguori
}
2693 f65ed4c1 aliguori
2694 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2695 62a2744c Sheng Yang
{
2696 62a2744c Sheng Yang
    if (kvm_enabled())
2697 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2698 62a2744c Sheng Yang
}
2699 62a2744c Sheng Yang
2700 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2701 c902760f Marcelo Tosatti
2702 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2703 c902760f Marcelo Tosatti
2704 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2705 c902760f Marcelo Tosatti
2706 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2707 c902760f Marcelo Tosatti
{
2708 c902760f Marcelo Tosatti
    struct statfs fs;
2709 c902760f Marcelo Tosatti
    int ret;
2710 c902760f Marcelo Tosatti
2711 c902760f Marcelo Tosatti
    do {
2712 c902760f Marcelo Tosatti
            ret = statfs(path, &fs);
2713 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2714 c902760f Marcelo Tosatti
2715 c902760f Marcelo Tosatti
    if (ret != 0) {
2716 6adc0549 Michael Tokarev
            perror(path);
2717 c902760f Marcelo Tosatti
            return 0;
2718 c902760f Marcelo Tosatti
    }
2719 c902760f Marcelo Tosatti
2720 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2721 c902760f Marcelo Tosatti
            fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2722 c902760f Marcelo Tosatti
2723 c902760f Marcelo Tosatti
    return fs.f_bsize;
2724 c902760f Marcelo Tosatti
}
2725 c902760f Marcelo Tosatti
2726 c902760f Marcelo Tosatti
static void *file_ram_alloc(ram_addr_t memory, const char *path)
2727 c902760f Marcelo Tosatti
{
2728 c902760f Marcelo Tosatti
    char *filename;
2729 c902760f Marcelo Tosatti
    void *area;
2730 c902760f Marcelo Tosatti
    int fd;
2731 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2732 c902760f Marcelo Tosatti
    int flags;
2733 c902760f Marcelo Tosatti
#endif
2734 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2735 c902760f Marcelo Tosatti
2736 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2737 c902760f Marcelo Tosatti
    if (!hpagesize) {
2738 c902760f Marcelo Tosatti
        return NULL;
2739 c902760f Marcelo Tosatti
    }
2740 c902760f Marcelo Tosatti
2741 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2742 c902760f Marcelo Tosatti
        return NULL;
2743 c902760f Marcelo Tosatti
    }
2744 c902760f Marcelo Tosatti
2745 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2746 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2747 c902760f Marcelo Tosatti
        return NULL;
2748 c902760f Marcelo Tosatti
    }
2749 c902760f Marcelo Tosatti
2750 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2751 c902760f Marcelo Tosatti
        return NULL;
2752 c902760f Marcelo Tosatti
    }
2753 c902760f Marcelo Tosatti
2754 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2755 c902760f Marcelo Tosatti
    if (fd < 0) {
2756 6adc0549 Michael Tokarev
        perror("unable to create backing store for hugepages");
2757 c902760f Marcelo Tosatti
        free(filename);
2758 c902760f Marcelo Tosatti
        return NULL;
2759 c902760f Marcelo Tosatti
    }
2760 c902760f Marcelo Tosatti
    unlink(filename);
2761 c902760f Marcelo Tosatti
    free(filename);
2762 c902760f Marcelo Tosatti
2763 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2764 c902760f Marcelo Tosatti
2765 c902760f Marcelo Tosatti
    /*
2766 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2767 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2768 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2769 c902760f Marcelo Tosatti
     * mmap will fail.
2770 c902760f Marcelo Tosatti
     */
2771 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2772 c902760f Marcelo Tosatti
        perror("ftruncate");
2773 c902760f Marcelo Tosatti
2774 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2775 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2776 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2777 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2778 c902760f Marcelo Tosatti
     */
2779 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2780 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2781 c902760f Marcelo Tosatti
#else
2782 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2783 c902760f Marcelo Tosatti
#endif
2784 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2785 c902760f Marcelo Tosatti
        perror("file_ram_alloc: can't mmap RAM pages");
2786 c902760f Marcelo Tosatti
        close(fd);
2787 c902760f Marcelo Tosatti
        return (NULL);
2788 c902760f Marcelo Tosatti
    }
2789 c902760f Marcelo Tosatti
    return area;
2790 c902760f Marcelo Tosatti
}
2791 c902760f Marcelo Tosatti
#endif
2792 c902760f Marcelo Tosatti
2793 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2794 94a6b54f pbrook
{
2795 94a6b54f pbrook
    RAMBlock *new_block;
2796 94a6b54f pbrook
2797 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2798 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2799 94a6b54f pbrook
2800 c902760f Marcelo Tosatti
    if (mem_path) {
2801 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2802 c902760f Marcelo Tosatti
        new_block->host = file_ram_alloc(size, mem_path);
2803 c902760f Marcelo Tosatti
        if (!new_block->host)
2804 c902760f Marcelo Tosatti
            exit(1);
2805 c902760f Marcelo Tosatti
#else
2806 c902760f Marcelo Tosatti
        fprintf(stderr, "-mem-path option unsupported\n");
2807 c902760f Marcelo Tosatti
        exit(1);
2808 c902760f Marcelo Tosatti
#endif
2809 c902760f Marcelo Tosatti
    } else {
2810 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2811 c902760f Marcelo Tosatti
        /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2812 c902760f Marcelo Tosatti
        new_block->host = mmap((void*)0x1000000, size,
2813 c902760f Marcelo Tosatti
                                PROT_EXEC|PROT_READ|PROT_WRITE,
2814 c902760f Marcelo Tosatti
                                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2815 6b02494d Alexander Graf
#else
2816 c902760f Marcelo Tosatti
        new_block->host = qemu_vmalloc(size);
2817 6b02494d Alexander Graf
#endif
2818 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2819 c902760f Marcelo Tosatti
        madvise(new_block->host, size, MADV_MERGEABLE);
2820 ccb167e9 Izik Eidus
#endif
2821 c902760f Marcelo Tosatti
    }
2822 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2823 94a6b54f pbrook
    new_block->length = size;
2824 94a6b54f pbrook
2825 94a6b54f pbrook
    new_block->next = ram_blocks;
2826 94a6b54f pbrook
    ram_blocks = new_block;
2827 94a6b54f pbrook
2828 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2829 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2830 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2831 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2832 94a6b54f pbrook
2833 94a6b54f pbrook
    last_ram_offset += size;
2834 94a6b54f pbrook
2835 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2836 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2837 6f0437e8 Jan Kiszka
2838 94a6b54f pbrook
    return new_block->offset;
2839 94a6b54f pbrook
}
2840 e9a1ab19 bellard
2841 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2842 e9a1ab19 bellard
{
2843 94a6b54f pbrook
    /* TODO: implement this.  */
2844 e9a1ab19 bellard
}
2845 e9a1ab19 bellard
2846 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2847 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2848 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2849 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2850 5579c7f3 pbrook

2851 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2852 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2853 5579c7f3 pbrook
 */
2854 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2855 dc828ca1 pbrook
{
2856 94a6b54f pbrook
    RAMBlock *prev;
2857 94a6b54f pbrook
    RAMBlock **prevp;
2858 94a6b54f pbrook
    RAMBlock *block;
2859 94a6b54f pbrook
2860 94a6b54f pbrook
    prev = NULL;
2861 94a6b54f pbrook
    prevp = &ram_blocks;
2862 94a6b54f pbrook
    block = ram_blocks;
2863 94a6b54f pbrook
    while (block && (block->offset > addr
2864 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2865 94a6b54f pbrook
        if (prev)
2866 94a6b54f pbrook
          prevp = &prev->next;
2867 94a6b54f pbrook
        prev = block;
2868 94a6b54f pbrook
        block = block->next;
2869 94a6b54f pbrook
    }
2870 94a6b54f pbrook
    if (!block) {
2871 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2872 94a6b54f pbrook
        abort();
2873 94a6b54f pbrook
    }
2874 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2875 94a6b54f pbrook
    if (prev) {
2876 94a6b54f pbrook
        prev->next = block->next;
2877 94a6b54f pbrook
        block->next = *prevp;
2878 94a6b54f pbrook
        *prevp = block;
2879 94a6b54f pbrook
    }
2880 94a6b54f pbrook
    return block->host + (addr - block->offset);
2881 dc828ca1 pbrook
}
2882 dc828ca1 pbrook
2883 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2884 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2885 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2886 5579c7f3 pbrook
{
2887 94a6b54f pbrook
    RAMBlock *prev;
2888 94a6b54f pbrook
    RAMBlock *block;
2889 94a6b54f pbrook
    uint8_t *host = ptr;
2890 94a6b54f pbrook
2891 94a6b54f pbrook
    prev = NULL;
2892 94a6b54f pbrook
    block = ram_blocks;
2893 94a6b54f pbrook
    while (block && (block->host > host
2894 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2895 94a6b54f pbrook
        prev = block;
2896 94a6b54f pbrook
        block = block->next;
2897 94a6b54f pbrook
    }
2898 94a6b54f pbrook
    if (!block) {
2899 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2900 94a6b54f pbrook
        abort();
2901 94a6b54f pbrook
    }
2902 94a6b54f pbrook
    return block->offset + (host - block->host);
2903 5579c7f3 pbrook
}
2904 5579c7f3 pbrook
2905 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2906 33417e70 bellard
{
2907 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2908 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2909 67d3b957 pbrook
#endif
2910 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2911 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2912 e18231a3 blueswir1
#endif
2913 e18231a3 blueswir1
    return 0;
2914 e18231a3 blueswir1
}
2915 e18231a3 blueswir1
2916 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2917 e18231a3 blueswir1
{
2918 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2919 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2920 e18231a3 blueswir1
#endif
2921 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2922 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2923 e18231a3 blueswir1
#endif
2924 e18231a3 blueswir1
    return 0;
2925 e18231a3 blueswir1
}
2926 e18231a3 blueswir1
2927 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2928 e18231a3 blueswir1
{
2929 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2930 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2931 e18231a3 blueswir1
#endif
2932 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2933 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2934 b4f0a316 blueswir1
#endif
2935 33417e70 bellard
    return 0;
2936 33417e70 bellard
}
2937 33417e70 bellard
2938 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2939 33417e70 bellard
{
2940 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2941 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2942 67d3b957 pbrook
#endif
2943 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2944 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2945 e18231a3 blueswir1
#endif
2946 e18231a3 blueswir1
}
2947 e18231a3 blueswir1
2948 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2949 e18231a3 blueswir1
{
2950 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2951 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2952 e18231a3 blueswir1
#endif
2953 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2954 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2955 e18231a3 blueswir1
#endif
2956 e18231a3 blueswir1
}
2957 e18231a3 blueswir1
2958 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2959 e18231a3 blueswir1
{
2960 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2961 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2962 e18231a3 blueswir1
#endif
2963 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2964 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2965 b4f0a316 blueswir1
#endif
2966 33417e70 bellard
}
2967 33417e70 bellard
2968 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2969 33417e70 bellard
    unassigned_mem_readb,
2970 e18231a3 blueswir1
    unassigned_mem_readw,
2971 e18231a3 blueswir1
    unassigned_mem_readl,
2972 33417e70 bellard
};
2973 33417e70 bellard
2974 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2975 33417e70 bellard
    unassigned_mem_writeb,
2976 e18231a3 blueswir1
    unassigned_mem_writew,
2977 e18231a3 blueswir1
    unassigned_mem_writel,
2978 33417e70 bellard
};
2979 33417e70 bellard
2980 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2981 0f459d16 pbrook
                                uint32_t val)
2982 9fa3e853 bellard
{
2983 3a7d929e bellard
    int dirty_flags;
2984 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2985 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2986 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2987 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2988 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2989 9fa3e853 bellard
#endif
2990 3a7d929e bellard
    }
2991 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2992 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2993 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2994 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2995 f23db169 bellard
       flushed */
2996 f23db169 bellard
    if (dirty_flags == 0xff)
2997 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2998 9fa3e853 bellard
}
2999 9fa3e853 bellard
3000 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3001 0f459d16 pbrook
                                uint32_t val)
3002 9fa3e853 bellard
{
3003 3a7d929e bellard
    int dirty_flags;
3004 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3005 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3006 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3007 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
3008 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3009 9fa3e853 bellard
#endif
3010 3a7d929e bellard
    }
3011 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3012 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3013 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3014 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3015 f23db169 bellard
       flushed */
3016 f23db169 bellard
    if (dirty_flags == 0xff)
3017 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3018 9fa3e853 bellard
}
3019 9fa3e853 bellard
3020 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3021 0f459d16 pbrook
                                uint32_t val)
3022 9fa3e853 bellard
{
3023 3a7d929e bellard
    int dirty_flags;
3024 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3025 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3026 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
3027 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
3028 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3029 9fa3e853 bellard
#endif
3030 3a7d929e bellard
    }
3031 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3032 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3033 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3034 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
3035 f23db169 bellard
       flushed */
3036 f23db169 bellard
    if (dirty_flags == 0xff)
3037 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3038 9fa3e853 bellard
}
3039 9fa3e853 bellard
3040 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
3041 9fa3e853 bellard
    NULL, /* never used */
3042 9fa3e853 bellard
    NULL, /* never used */
3043 9fa3e853 bellard
    NULL, /* never used */
3044 9fa3e853 bellard
};
3045 9fa3e853 bellard
3046 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3047 1ccde1cb bellard
    notdirty_mem_writeb,
3048 1ccde1cb bellard
    notdirty_mem_writew,
3049 1ccde1cb bellard
    notdirty_mem_writel,
3050 1ccde1cb bellard
};
3051 1ccde1cb bellard
3052 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
3053 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
3054 0f459d16 pbrook
{
3055 0f459d16 pbrook
    CPUState *env = cpu_single_env;
3056 06d55cc1 aliguori
    target_ulong pc, cs_base;
3057 06d55cc1 aliguori
    TranslationBlock *tb;
3058 0f459d16 pbrook
    target_ulong vaddr;
3059 a1d1bb31 aliguori
    CPUWatchpoint *wp;
3060 06d55cc1 aliguori
    int cpu_flags;
3061 0f459d16 pbrook
3062 06d55cc1 aliguori
    if (env->watchpoint_hit) {
3063 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
3064 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
3065 06d55cc1 aliguori
         * current instruction. */
3066 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3067 06d55cc1 aliguori
        return;
3068 06d55cc1 aliguori
    }
3069 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3070 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3071 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
3072 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3073 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
3074 6e140f28 aliguori
            if (!env->watchpoint_hit) {
3075 6e140f28 aliguori
                env->watchpoint_hit = wp;
3076 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
3077 6e140f28 aliguori
                if (!tb) {
3078 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3079 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3080 6e140f28 aliguori
                }
3081 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3082 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3083 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3084 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3085 6e140f28 aliguori
                } else {
3086 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3087 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3088 6e140f28 aliguori
                }
3089 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3090 06d55cc1 aliguori
            }
3091 6e140f28 aliguori
        } else {
3092 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3093 0f459d16 pbrook
        }
3094 0f459d16 pbrook
    }
3095 0f459d16 pbrook
}
3096 0f459d16 pbrook
3097 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3098 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3099 6658ffb8 pbrook
   phys routines.  */
3100 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3101 6658ffb8 pbrook
{
3102 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3103 6658ffb8 pbrook
    return ldub_phys(addr);
3104 6658ffb8 pbrook
}
3105 6658ffb8 pbrook
3106 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3107 6658ffb8 pbrook
{
3108 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3109 6658ffb8 pbrook
    return lduw_phys(addr);
3110 6658ffb8 pbrook
}
3111 6658ffb8 pbrook
3112 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3113 6658ffb8 pbrook
{
3114 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3115 6658ffb8 pbrook
    return ldl_phys(addr);
3116 6658ffb8 pbrook
}
3117 6658ffb8 pbrook
3118 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3119 6658ffb8 pbrook
                             uint32_t val)
3120 6658ffb8 pbrook
{
3121 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3122 6658ffb8 pbrook
    stb_phys(addr, val);
3123 6658ffb8 pbrook
}
3124 6658ffb8 pbrook
3125 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3126 6658ffb8 pbrook
                             uint32_t val)
3127 6658ffb8 pbrook
{
3128 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3129 6658ffb8 pbrook
    stw_phys(addr, val);
3130 6658ffb8 pbrook
}
3131 6658ffb8 pbrook
3132 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3133 6658ffb8 pbrook
                             uint32_t val)
3134 6658ffb8 pbrook
{
3135 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3136 6658ffb8 pbrook
    stl_phys(addr, val);
3137 6658ffb8 pbrook
}
3138 6658ffb8 pbrook
3139 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3140 6658ffb8 pbrook
    watch_mem_readb,
3141 6658ffb8 pbrook
    watch_mem_readw,
3142 6658ffb8 pbrook
    watch_mem_readl,
3143 6658ffb8 pbrook
};
3144 6658ffb8 pbrook
3145 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3146 6658ffb8 pbrook
    watch_mem_writeb,
3147 6658ffb8 pbrook
    watch_mem_writew,
3148 6658ffb8 pbrook
    watch_mem_writel,
3149 6658ffb8 pbrook
};
3150 6658ffb8 pbrook
3151 c227f099 Anthony Liguori
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3152 db7b5426 blueswir1
                                 unsigned int len)
3153 db7b5426 blueswir1
{
3154 db7b5426 blueswir1
    uint32_t ret;
3155 db7b5426 blueswir1
    unsigned int idx;
3156 db7b5426 blueswir1
3157 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
3158 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3159 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3160 db7b5426 blueswir1
           mmio, len, addr, idx);
3161 db7b5426 blueswir1
#endif
3162 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3163 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
3164 db7b5426 blueswir1
3165 db7b5426 blueswir1
    return ret;
3166 db7b5426 blueswir1
}
3167 db7b5426 blueswir1
3168 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3169 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
3170 db7b5426 blueswir1
{
3171 db7b5426 blueswir1
    unsigned int idx;
3172 db7b5426 blueswir1
3173 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
3174 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3175 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3176 db7b5426 blueswir1
           mmio, len, addr, idx, value);
3177 db7b5426 blueswir1
#endif
3178 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3179 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
3180 8da3ff18 pbrook
                                  value);
3181 db7b5426 blueswir1
}
3182 db7b5426 blueswir1
3183 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3184 db7b5426 blueswir1
{
3185 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3186 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3187 db7b5426 blueswir1
#endif
3188 db7b5426 blueswir1
3189 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3190 db7b5426 blueswir1
}
3191 db7b5426 blueswir1
3192 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3193 db7b5426 blueswir1
                            uint32_t value)
3194 db7b5426 blueswir1
{
3195 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3196 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3197 db7b5426 blueswir1
#endif
3198 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3199 db7b5426 blueswir1
}
3200 db7b5426 blueswir1
3201 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3202 db7b5426 blueswir1
{
3203 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3204 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3205 db7b5426 blueswir1
#endif
3206 db7b5426 blueswir1
3207 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3208 db7b5426 blueswir1
}
3209 db7b5426 blueswir1
3210 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3211 db7b5426 blueswir1
                            uint32_t value)
3212 db7b5426 blueswir1
{
3213 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3214 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3215 db7b5426 blueswir1
#endif
3216 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3217 db7b5426 blueswir1
}
3218 db7b5426 blueswir1
3219 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3220 db7b5426 blueswir1
{
3221 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3222 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3223 db7b5426 blueswir1
#endif
3224 db7b5426 blueswir1
3225 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3226 db7b5426 blueswir1
}
3227 db7b5426 blueswir1
3228 db7b5426 blueswir1
static void subpage_writel (void *opaque,
3229 c227f099 Anthony Liguori
                         target_phys_addr_t addr, uint32_t value)
3230 db7b5426 blueswir1
{
3231 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3232 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3233 db7b5426 blueswir1
#endif
3234 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3235 db7b5426 blueswir1
}
3236 db7b5426 blueswir1
3237 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3238 db7b5426 blueswir1
    &subpage_readb,
3239 db7b5426 blueswir1
    &subpage_readw,
3240 db7b5426 blueswir1
    &subpage_readl,
3241 db7b5426 blueswir1
};
3242 db7b5426 blueswir1
3243 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3244 db7b5426 blueswir1
    &subpage_writeb,
3245 db7b5426 blueswir1
    &subpage_writew,
3246 db7b5426 blueswir1
    &subpage_writel,
3247 db7b5426 blueswir1
};
3248 db7b5426 blueswir1
3249 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3250 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3251 db7b5426 blueswir1
{
3252 db7b5426 blueswir1
    int idx, eidx;
3253 4254fab8 blueswir1
    unsigned int i;
3254 db7b5426 blueswir1
3255 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3256 db7b5426 blueswir1
        return -1;
3257 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3258 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3259 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3260 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3261 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3262 db7b5426 blueswir1
#endif
3263 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
3264 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3265 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
3266 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
3267 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3268 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3269 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
3270 3ee89922 blueswir1
            }
3271 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
3272 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3273 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3274 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
3275 3ee89922 blueswir1
            }
3276 4254fab8 blueswir1
        }
3277 db7b5426 blueswir1
    }
3278 db7b5426 blueswir1
3279 db7b5426 blueswir1
    return 0;
3280 db7b5426 blueswir1
}
3281 db7b5426 blueswir1
3282 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3283 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset)
3284 db7b5426 blueswir1
{
3285 c227f099 Anthony Liguori
    subpage_t *mmio;
3286 db7b5426 blueswir1
    int subpage_memory;
3287 db7b5426 blueswir1
3288 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3289 1eec614b aliguori
3290 1eec614b aliguori
    mmio->base = base;
3291 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3292 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3293 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3294 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3295 db7b5426 blueswir1
#endif
3296 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3297 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3298 8da3ff18 pbrook
                         region_offset);
3299 db7b5426 blueswir1
3300 db7b5426 blueswir1
    return mmio;
3301 db7b5426 blueswir1
}
3302 db7b5426 blueswir1
3303 88715657 aliguori
static int get_free_io_mem_idx(void)
3304 88715657 aliguori
{
3305 88715657 aliguori
    int i;
3306 88715657 aliguori
3307 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3308 88715657 aliguori
        if (!io_mem_used[i]) {
3309 88715657 aliguori
            io_mem_used[i] = 1;
3310 88715657 aliguori
            return i;
3311 88715657 aliguori
        }
3312 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3313 88715657 aliguori
    return -1;
3314 88715657 aliguori
}
3315 88715657 aliguori
3316 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3317 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3318 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3319 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3320 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3321 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3322 4254fab8 blueswir1
   returned if error. */
3323 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3324 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3325 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3326 1eed09cb Avi Kivity
                                        void *opaque)
3327 33417e70 bellard
{
3328 4254fab8 blueswir1
    int i, subwidth = 0;
3329 33417e70 bellard
3330 33417e70 bellard
    if (io_index <= 0) {
3331 88715657 aliguori
        io_index = get_free_io_mem_idx();
3332 88715657 aliguori
        if (io_index == -1)
3333 88715657 aliguori
            return io_index;
3334 33417e70 bellard
    } else {
3335 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3336 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3337 33417e70 bellard
            return -1;
3338 33417e70 bellard
    }
3339 b5ff1b31 bellard
3340 33417e70 bellard
    for(i = 0;i < 3; i++) {
3341 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3342 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3343 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3344 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3345 33417e70 bellard
    }
3346 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3347 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3348 33417e70 bellard
}
3349 61382a50 bellard
3350 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3351 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3352 1eed09cb Avi Kivity
                           void *opaque)
3353 1eed09cb Avi Kivity
{
3354 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3355 1eed09cb Avi Kivity
}
3356 1eed09cb Avi Kivity
3357 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3358 88715657 aliguori
{
3359 88715657 aliguori
    int i;
3360 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3361 88715657 aliguori
3362 88715657 aliguori
    for (i=0;i < 3; i++) {
3363 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3364 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3365 88715657 aliguori
    }
3366 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3367 88715657 aliguori
    io_mem_used[io_index] = 0;
3368 88715657 aliguori
}
3369 88715657 aliguori
3370 e9179ce1 Avi Kivity
static void io_mem_init(void)
3371 e9179ce1 Avi Kivity
{
3372 e9179ce1 Avi Kivity
    int i;
3373 e9179ce1 Avi Kivity
3374 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3375 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3376 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3377 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3378 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3379 e9179ce1 Avi Kivity
3380 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3381 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3382 e9179ce1 Avi Kivity
}
3383 e9179ce1 Avi Kivity
3384 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3385 e2eef170 pbrook
3386 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3387 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3388 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3389 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3390 13eb76e0 bellard
{
3391 13eb76e0 bellard
    int l, flags;
3392 13eb76e0 bellard
    target_ulong page;
3393 53a5960a pbrook
    void * p;
3394 13eb76e0 bellard
3395 13eb76e0 bellard
    while (len > 0) {
3396 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3397 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3398 13eb76e0 bellard
        if (l > len)
3399 13eb76e0 bellard
            l = len;
3400 13eb76e0 bellard
        flags = page_get_flags(page);
3401 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3402 a68fe89c Paul Brook
            return -1;
3403 13eb76e0 bellard
        if (is_write) {
3404 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3405 a68fe89c Paul Brook
                return -1;
3406 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3407 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3408 a68fe89c Paul Brook
                return -1;
3409 72fb7daa aurel32
            memcpy(p, buf, l);
3410 72fb7daa aurel32
            unlock_user(p, addr, l);
3411 13eb76e0 bellard
        } else {
3412 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3413 a68fe89c Paul Brook
                return -1;
3414 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3415 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3416 a68fe89c Paul Brook
                return -1;
3417 72fb7daa aurel32
            memcpy(buf, p, l);
3418 5b257578 aurel32
            unlock_user(p, addr, 0);
3419 13eb76e0 bellard
        }
3420 13eb76e0 bellard
        len -= l;
3421 13eb76e0 bellard
        buf += l;
3422 13eb76e0 bellard
        addr += l;
3423 13eb76e0 bellard
    }
3424 a68fe89c Paul Brook
    return 0;
3425 13eb76e0 bellard
}
3426 8df1cd07 bellard
3427 13eb76e0 bellard
#else
3428 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3429 13eb76e0 bellard
                            int len, int is_write)
3430 13eb76e0 bellard
{
3431 13eb76e0 bellard
    int l, io_index;
3432 13eb76e0 bellard
    uint8_t *ptr;
3433 13eb76e0 bellard
    uint32_t val;
3434 c227f099 Anthony Liguori
    target_phys_addr_t page;
3435 2e12669a bellard
    unsigned long pd;
3436 92e873b9 bellard
    PhysPageDesc *p;
3437 3b46e624 ths
3438 13eb76e0 bellard
    while (len > 0) {
3439 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3440 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3441 13eb76e0 bellard
        if (l > len)
3442 13eb76e0 bellard
            l = len;
3443 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3444 13eb76e0 bellard
        if (!p) {
3445 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3446 13eb76e0 bellard
        } else {
3447 13eb76e0 bellard
            pd = p->phys_offset;
3448 13eb76e0 bellard
        }
3449 3b46e624 ths
3450 13eb76e0 bellard
        if (is_write) {
3451 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3452 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3453 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3454 8da3ff18 pbrook
                if (p)
3455 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3456 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3457 6a00d601 bellard
                   potential bugs */
3458 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3459 1c213d19 bellard
                    /* 32 bit write access */
3460 c27004ec bellard
                    val = ldl_p(buf);
3461 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3462 13eb76e0 bellard
                    l = 4;
3463 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3464 1c213d19 bellard
                    /* 16 bit write access */
3465 c27004ec bellard
                    val = lduw_p(buf);
3466 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3467 13eb76e0 bellard
                    l = 2;
3468 13eb76e0 bellard
                } else {
3469 1c213d19 bellard
                    /* 8 bit write access */
3470 c27004ec bellard
                    val = ldub_p(buf);
3471 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3472 13eb76e0 bellard
                    l = 1;
3473 13eb76e0 bellard
                }
3474 13eb76e0 bellard
            } else {
3475 b448f2f3 bellard
                unsigned long addr1;
3476 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3477 13eb76e0 bellard
                /* RAM case */
3478 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3479 13eb76e0 bellard
                memcpy(ptr, buf, l);
3480 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3481 3a7d929e bellard
                    /* invalidate code */
3482 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3483 3a7d929e bellard
                    /* set dirty bit */
3484 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3485 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3486 3a7d929e bellard
                }
3487 13eb76e0 bellard
            }
3488 13eb76e0 bellard
        } else {
3489 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3490 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3491 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3492 13eb76e0 bellard
                /* I/O case */
3493 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3494 8da3ff18 pbrook
                if (p)
3495 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3496 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3497 13eb76e0 bellard
                    /* 32 bit read access */
3498 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3499 c27004ec bellard
                    stl_p(buf, val);
3500 13eb76e0 bellard
                    l = 4;
3501 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3502 13eb76e0 bellard
                    /* 16 bit read access */
3503 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3504 c27004ec bellard
                    stw_p(buf, val);
3505 13eb76e0 bellard
                    l = 2;
3506 13eb76e0 bellard
                } else {
3507 1c213d19 bellard
                    /* 8 bit read access */
3508 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3509 c27004ec bellard
                    stb_p(buf, val);
3510 13eb76e0 bellard
                    l = 1;
3511 13eb76e0 bellard
                }
3512 13eb76e0 bellard
            } else {
3513 13eb76e0 bellard
                /* RAM case */
3514 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3515 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3516 13eb76e0 bellard
                memcpy(buf, ptr, l);
3517 13eb76e0 bellard
            }
3518 13eb76e0 bellard
        }
3519 13eb76e0 bellard
        len -= l;
3520 13eb76e0 bellard
        buf += l;
3521 13eb76e0 bellard
        addr += l;
3522 13eb76e0 bellard
    }
3523 13eb76e0 bellard
}
3524 8df1cd07 bellard
3525 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3526 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3527 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3528 d0ecd2aa bellard
{
3529 d0ecd2aa bellard
    int l;
3530 d0ecd2aa bellard
    uint8_t *ptr;
3531 c227f099 Anthony Liguori
    target_phys_addr_t page;
3532 d0ecd2aa bellard
    unsigned long pd;
3533 d0ecd2aa bellard
    PhysPageDesc *p;
3534 3b46e624 ths
3535 d0ecd2aa bellard
    while (len > 0) {
3536 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3537 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3538 d0ecd2aa bellard
        if (l > len)
3539 d0ecd2aa bellard
            l = len;
3540 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3541 d0ecd2aa bellard
        if (!p) {
3542 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3543 d0ecd2aa bellard
        } else {
3544 d0ecd2aa bellard
            pd = p->phys_offset;
3545 d0ecd2aa bellard
        }
3546 3b46e624 ths
3547 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3548 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3549 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3550 d0ecd2aa bellard
            /* do nothing */
3551 d0ecd2aa bellard
        } else {
3552 d0ecd2aa bellard
            unsigned long addr1;
3553 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3554 d0ecd2aa bellard
            /* ROM/RAM case */
3555 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3556 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3557 d0ecd2aa bellard
        }
3558 d0ecd2aa bellard
        len -= l;
3559 d0ecd2aa bellard
        buf += l;
3560 d0ecd2aa bellard
        addr += l;
3561 d0ecd2aa bellard
    }
3562 d0ecd2aa bellard
}
3563 d0ecd2aa bellard
3564 6d16c2f8 aliguori
typedef struct {
3565 6d16c2f8 aliguori
    void *buffer;
3566 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3567 c227f099 Anthony Liguori
    target_phys_addr_t len;
3568 6d16c2f8 aliguori
} BounceBuffer;
3569 6d16c2f8 aliguori
3570 6d16c2f8 aliguori
static BounceBuffer bounce;
3571 6d16c2f8 aliguori
3572 ba223c29 aliguori
typedef struct MapClient {
3573 ba223c29 aliguori
    void *opaque;
3574 ba223c29 aliguori
    void (*callback)(void *opaque);
3575 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3576 ba223c29 aliguori
} MapClient;
3577 ba223c29 aliguori
3578 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3579 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3580 ba223c29 aliguori
3581 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3582 ba223c29 aliguori
{
3583 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3584 ba223c29 aliguori
3585 ba223c29 aliguori
    client->opaque = opaque;
3586 ba223c29 aliguori
    client->callback = callback;
3587 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3588 ba223c29 aliguori
    return client;
3589 ba223c29 aliguori
}
3590 ba223c29 aliguori
3591 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3592 ba223c29 aliguori
{
3593 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3594 ba223c29 aliguori
3595 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3596 34d5e948 Isaku Yamahata
    qemu_free(client);
3597 ba223c29 aliguori
}
3598 ba223c29 aliguori
3599 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3600 ba223c29 aliguori
{
3601 ba223c29 aliguori
    MapClient *client;
3602 ba223c29 aliguori
3603 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3604 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3605 ba223c29 aliguori
        client->callback(client->opaque);
3606 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3607 ba223c29 aliguori
    }
3608 ba223c29 aliguori
}
3609 ba223c29 aliguori
3610 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3611 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3612 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3613 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3614 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3615 ba223c29 aliguori
 * likely to succeed.
3616 6d16c2f8 aliguori
 */
3617 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3618 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3619 6d16c2f8 aliguori
                              int is_write)
3620 6d16c2f8 aliguori
{
3621 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3622 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3623 6d16c2f8 aliguori
    int l;
3624 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3625 6d16c2f8 aliguori
    uint8_t *ptr;
3626 c227f099 Anthony Liguori
    target_phys_addr_t page;
3627 6d16c2f8 aliguori
    unsigned long pd;
3628 6d16c2f8 aliguori
    PhysPageDesc *p;
3629 6d16c2f8 aliguori
    unsigned long addr1;
3630 6d16c2f8 aliguori
3631 6d16c2f8 aliguori
    while (len > 0) {
3632 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3633 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3634 6d16c2f8 aliguori
        if (l > len)
3635 6d16c2f8 aliguori
            l = len;
3636 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3637 6d16c2f8 aliguori
        if (!p) {
3638 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3639 6d16c2f8 aliguori
        } else {
3640 6d16c2f8 aliguori
            pd = p->phys_offset;
3641 6d16c2f8 aliguori
        }
3642 6d16c2f8 aliguori
3643 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3644 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3645 6d16c2f8 aliguori
                break;
3646 6d16c2f8 aliguori
            }
3647 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3648 6d16c2f8 aliguori
            bounce.addr = addr;
3649 6d16c2f8 aliguori
            bounce.len = l;
3650 6d16c2f8 aliguori
            if (!is_write) {
3651 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3652 6d16c2f8 aliguori
            }
3653 6d16c2f8 aliguori
            ptr = bounce.buffer;
3654 6d16c2f8 aliguori
        } else {
3655 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3656 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3657 6d16c2f8 aliguori
        }
3658 6d16c2f8 aliguori
        if (!done) {
3659 6d16c2f8 aliguori
            ret = ptr;
3660 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3661 6d16c2f8 aliguori
            break;
3662 6d16c2f8 aliguori
        }
3663 6d16c2f8 aliguori
3664 6d16c2f8 aliguori
        len -= l;
3665 6d16c2f8 aliguori
        addr += l;
3666 6d16c2f8 aliguori
        done += l;
3667 6d16c2f8 aliguori
    }
3668 6d16c2f8 aliguori
    *plen = done;
3669 6d16c2f8 aliguori
    return ret;
3670 6d16c2f8 aliguori
}
3671 6d16c2f8 aliguori
3672 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3673 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3674 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3675 6d16c2f8 aliguori
 */
3676 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3677 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3678 6d16c2f8 aliguori
{
3679 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3680 6d16c2f8 aliguori
        if (is_write) {
3681 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3682 6d16c2f8 aliguori
            while (access_len) {
3683 6d16c2f8 aliguori
                unsigned l;
3684 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3685 6d16c2f8 aliguori
                if (l > access_len)
3686 6d16c2f8 aliguori
                    l = access_len;
3687 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3688 6d16c2f8 aliguori
                    /* invalidate code */
3689 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3690 6d16c2f8 aliguori
                    /* set dirty bit */
3691 f7c11b53 Yoshiaki Tamura
                    cpu_physical_memory_set_dirty_flags(
3692 f7c11b53 Yoshiaki Tamura
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3693 6d16c2f8 aliguori
                }
3694 6d16c2f8 aliguori
                addr1 += l;
3695 6d16c2f8 aliguori
                access_len -= l;
3696 6d16c2f8 aliguori
            }
3697 6d16c2f8 aliguori
        }
3698 6d16c2f8 aliguori
        return;
3699 6d16c2f8 aliguori
    }
3700 6d16c2f8 aliguori
    if (is_write) {
3701 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3702 6d16c2f8 aliguori
    }
3703 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3704 6d16c2f8 aliguori
    bounce.buffer = NULL;
3705 ba223c29 aliguori
    cpu_notify_map_clients();
3706 6d16c2f8 aliguori
}
3707 d0ecd2aa bellard
3708 8df1cd07 bellard
/* warning: addr must be aligned */
3709 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3710 8df1cd07 bellard
{
3711 8df1cd07 bellard
    int io_index;
3712 8df1cd07 bellard
    uint8_t *ptr;
3713 8df1cd07 bellard
    uint32_t val;
3714 8df1cd07 bellard
    unsigned long pd;
3715 8df1cd07 bellard
    PhysPageDesc *p;
3716 8df1cd07 bellard
3717 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3718 8df1cd07 bellard
    if (!p) {
3719 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3720 8df1cd07 bellard
    } else {
3721 8df1cd07 bellard
        pd = p->phys_offset;
3722 8df1cd07 bellard
    }
3723 3b46e624 ths
3724 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3725 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3726 8df1cd07 bellard
        /* I/O case */
3727 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3728 8da3ff18 pbrook
        if (p)
3729 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3730 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3731 8df1cd07 bellard
    } else {
3732 8df1cd07 bellard
        /* RAM case */
3733 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3734 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3735 8df1cd07 bellard
        val = ldl_p(ptr);
3736 8df1cd07 bellard
    }
3737 8df1cd07 bellard
    return val;
3738 8df1cd07 bellard
}
3739 8df1cd07 bellard
3740 84b7b8e7 bellard
/* warning: addr must be aligned */
3741 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3742 84b7b8e7 bellard
{
3743 84b7b8e7 bellard
    int io_index;
3744 84b7b8e7 bellard
    uint8_t *ptr;
3745 84b7b8e7 bellard
    uint64_t val;
3746 84b7b8e7 bellard
    unsigned long pd;
3747 84b7b8e7 bellard
    PhysPageDesc *p;
3748 84b7b8e7 bellard
3749 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3750 84b7b8e7 bellard
    if (!p) {
3751 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3752 84b7b8e7 bellard
    } else {
3753 84b7b8e7 bellard
        pd = p->phys_offset;
3754 84b7b8e7 bellard
    }
3755 3b46e624 ths
3756 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3757 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3758 84b7b8e7 bellard
        /* I/O case */
3759 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3760 8da3ff18 pbrook
        if (p)
3761 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3762 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3763 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3764 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3765 84b7b8e7 bellard
#else
3766 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3767 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3768 84b7b8e7 bellard
#endif
3769 84b7b8e7 bellard
    } else {
3770 84b7b8e7 bellard
        /* RAM case */
3771 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3772 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3773 84b7b8e7 bellard
        val = ldq_p(ptr);
3774 84b7b8e7 bellard
    }
3775 84b7b8e7 bellard
    return val;
3776 84b7b8e7 bellard
}
3777 84b7b8e7 bellard
3778 aab33094 bellard
/* XXX: optimize */
3779 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3780 aab33094 bellard
{
3781 aab33094 bellard
    uint8_t val;
3782 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3783 aab33094 bellard
    return val;
3784 aab33094 bellard
}
3785 aab33094 bellard
3786 aab33094 bellard
/* XXX: optimize */
3787 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3788 aab33094 bellard
{
3789 aab33094 bellard
    uint16_t val;
3790 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3791 aab33094 bellard
    return tswap16(val);
3792 aab33094 bellard
}
3793 aab33094 bellard
3794 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3795 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3796 8df1cd07 bellard
   bits are used to track modified PTEs */
3797 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3798 8df1cd07 bellard
{
3799 8df1cd07 bellard
    int io_index;
3800 8df1cd07 bellard
    uint8_t *ptr;
3801 8df1cd07 bellard
    unsigned long pd;
3802 8df1cd07 bellard
    PhysPageDesc *p;
3803 8df1cd07 bellard
3804 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3805 8df1cd07 bellard
    if (!p) {
3806 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3807 8df1cd07 bellard
    } else {
3808 8df1cd07 bellard
        pd = p->phys_offset;
3809 8df1cd07 bellard
    }
3810 3b46e624 ths
3811 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3812 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3813 8da3ff18 pbrook
        if (p)
3814 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3815 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3816 8df1cd07 bellard
    } else {
3817 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3818 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3819 8df1cd07 bellard
        stl_p(ptr, val);
3820 74576198 aliguori
3821 74576198 aliguori
        if (unlikely(in_migration)) {
3822 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3823 74576198 aliguori
                /* invalidate code */
3824 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3825 74576198 aliguori
                /* set dirty bit */
3826 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
3827 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3828 74576198 aliguori
            }
3829 74576198 aliguori
        }
3830 8df1cd07 bellard
    }
3831 8df1cd07 bellard
}
3832 8df1cd07 bellard
3833 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3834 bc98a7ef j_mayer
{
3835 bc98a7ef j_mayer
    int io_index;
3836 bc98a7ef j_mayer
    uint8_t *ptr;
3837 bc98a7ef j_mayer
    unsigned long pd;
3838 bc98a7ef j_mayer
    PhysPageDesc *p;
3839 bc98a7ef j_mayer
3840 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3841 bc98a7ef j_mayer
    if (!p) {
3842 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3843 bc98a7ef j_mayer
    } else {
3844 bc98a7ef j_mayer
        pd = p->phys_offset;
3845 bc98a7ef j_mayer
    }
3846 3b46e624 ths
3847 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3848 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3849 8da3ff18 pbrook
        if (p)
3850 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3851 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3852 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3853 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3854 bc98a7ef j_mayer
#else
3855 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3856 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3857 bc98a7ef j_mayer
#endif
3858 bc98a7ef j_mayer
    } else {
3859 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3860 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3861 bc98a7ef j_mayer
        stq_p(ptr, val);
3862 bc98a7ef j_mayer
    }
3863 bc98a7ef j_mayer
}
3864 bc98a7ef j_mayer
3865 8df1cd07 bellard
/* warning: addr must be aligned */
3866 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3867 8df1cd07 bellard
{
3868 8df1cd07 bellard
    int io_index;
3869 8df1cd07 bellard
    uint8_t *ptr;
3870 8df1cd07 bellard
    unsigned long pd;
3871 8df1cd07 bellard
    PhysPageDesc *p;
3872 8df1cd07 bellard
3873 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3874 8df1cd07 bellard
    if (!p) {
3875 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3876 8df1cd07 bellard
    } else {
3877 8df1cd07 bellard
        pd = p->phys_offset;
3878 8df1cd07 bellard
    }
3879 3b46e624 ths
3880 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3881 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3882 8da3ff18 pbrook
        if (p)
3883 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3884 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3885 8df1cd07 bellard
    } else {
3886 8df1cd07 bellard
        unsigned long addr1;
3887 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3888 8df1cd07 bellard
        /* RAM case */
3889 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3890 8df1cd07 bellard
        stl_p(ptr, val);
3891 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3892 3a7d929e bellard
            /* invalidate code */
3893 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3894 3a7d929e bellard
            /* set dirty bit */
3895 f7c11b53 Yoshiaki Tamura
            cpu_physical_memory_set_dirty_flags(addr1,
3896 f7c11b53 Yoshiaki Tamura
                (0xff & ~CODE_DIRTY_FLAG));
3897 3a7d929e bellard
        }
3898 8df1cd07 bellard
    }
3899 8df1cd07 bellard
}
3900 8df1cd07 bellard
3901 aab33094 bellard
/* XXX: optimize */
3902 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3903 aab33094 bellard
{
3904 aab33094 bellard
    uint8_t v = val;
3905 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3906 aab33094 bellard
}
3907 aab33094 bellard
3908 aab33094 bellard
/* XXX: optimize */
3909 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3910 aab33094 bellard
{
3911 aab33094 bellard
    uint16_t v = tswap16(val);
3912 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3913 aab33094 bellard
}
3914 aab33094 bellard
3915 aab33094 bellard
/* XXX: optimize */
3916 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3917 aab33094 bellard
{
3918 aab33094 bellard
    val = tswap64(val);
3919 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3920 aab33094 bellard
}
3921 aab33094 bellard
3922 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3923 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3924 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3925 13eb76e0 bellard
{
3926 13eb76e0 bellard
    int l;
3927 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3928 9b3c35e0 j_mayer
    target_ulong page;
3929 13eb76e0 bellard
3930 13eb76e0 bellard
    while (len > 0) {
3931 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3932 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3933 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3934 13eb76e0 bellard
        if (phys_addr == -1)
3935 13eb76e0 bellard
            return -1;
3936 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3937 13eb76e0 bellard
        if (l > len)
3938 13eb76e0 bellard
            l = len;
3939 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3940 5e2972fd aliguori
        if (is_write)
3941 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3942 5e2972fd aliguori
        else
3943 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3944 13eb76e0 bellard
        len -= l;
3945 13eb76e0 bellard
        buf += l;
3946 13eb76e0 bellard
        addr += l;
3947 13eb76e0 bellard
    }
3948 13eb76e0 bellard
    return 0;
3949 13eb76e0 bellard
}
3950 a68fe89c Paul Brook
#endif
3951 13eb76e0 bellard
3952 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3953 2e70f6ef pbrook
   must be at the end of the TB */
3954 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3955 2e70f6ef pbrook
{
3956 2e70f6ef pbrook
    TranslationBlock *tb;
3957 2e70f6ef pbrook
    uint32_t n, cflags;
3958 2e70f6ef pbrook
    target_ulong pc, cs_base;
3959 2e70f6ef pbrook
    uint64_t flags;
3960 2e70f6ef pbrook
3961 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3962 2e70f6ef pbrook
    if (!tb) {
3963 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3964 2e70f6ef pbrook
                  retaddr);
3965 2e70f6ef pbrook
    }
3966 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3967 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3968 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3969 bf20dc07 ths
       occurred.  */
3970 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3971 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3972 2e70f6ef pbrook
    n++;
3973 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3974 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3975 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3976 2e70f6ef pbrook
       branch.  */
3977 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3978 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3979 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3980 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3981 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3982 2e70f6ef pbrook
    }
3983 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3984 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3985 2e70f6ef pbrook
            && n > 1) {
3986 2e70f6ef pbrook
        env->pc -= 2;
3987 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3988 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3989 2e70f6ef pbrook
    }
3990 2e70f6ef pbrook
#endif
3991 2e70f6ef pbrook
    /* This should never happen.  */
3992 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3993 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3994 2e70f6ef pbrook
3995 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3996 2e70f6ef pbrook
    pc = tb->pc;
3997 2e70f6ef pbrook
    cs_base = tb->cs_base;
3998 2e70f6ef pbrook
    flags = tb->flags;
3999 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
4000 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
4001 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
4002 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
4003 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4004 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
4005 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
4006 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
4007 2e70f6ef pbrook
       second new TB.  */
4008 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
4009 2e70f6ef pbrook
}
4010 2e70f6ef pbrook
4011 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
4012 b3755a91 Paul Brook
4013 e3db7226 bellard
void dump_exec_info(FILE *f,
4014 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4015 e3db7226 bellard
{
4016 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
4017 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
4018 e3db7226 bellard
    TranslationBlock *tb;
4019 3b46e624 ths
4020 e3db7226 bellard
    target_code_size = 0;
4021 e3db7226 bellard
    max_target_code_size = 0;
4022 e3db7226 bellard
    cross_page = 0;
4023 e3db7226 bellard
    direct_jmp_count = 0;
4024 e3db7226 bellard
    direct_jmp2_count = 0;
4025 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
4026 e3db7226 bellard
        tb = &tbs[i];
4027 e3db7226 bellard
        target_code_size += tb->size;
4028 e3db7226 bellard
        if (tb->size > max_target_code_size)
4029 e3db7226 bellard
            max_target_code_size = tb->size;
4030 e3db7226 bellard
        if (tb->page_addr[1] != -1)
4031 e3db7226 bellard
            cross_page++;
4032 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
4033 e3db7226 bellard
            direct_jmp_count++;
4034 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
4035 e3db7226 bellard
                direct_jmp2_count++;
4036 e3db7226 bellard
            }
4037 e3db7226 bellard
        }
4038 e3db7226 bellard
    }
4039 e3db7226 bellard
    /* XXX: avoid using doubles ? */
4040 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
4041 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
4042 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4043 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
4044 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
4045 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4046 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
4047 e3db7226 bellard
                max_target_code_size);
4048 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4049 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4050 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4051 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4052 5fafdf24 ths
            cross_page,
4053 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4054 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4055 5fafdf24 ths
                direct_jmp_count,
4056 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4057 e3db7226 bellard
                direct_jmp2_count,
4058 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4059 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
4060 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4061 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4062 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4063 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
4064 e3db7226 bellard
}
4065 e3db7226 bellard
4066 61382a50 bellard
#define MMUSUFFIX _cmmu
4067 61382a50 bellard
#define GETPC() NULL
4068 61382a50 bellard
#define env cpu_single_env
4069 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
4070 61382a50 bellard
4071 61382a50 bellard
#define SHIFT 0
4072 61382a50 bellard
#include "softmmu_template.h"
4073 61382a50 bellard
4074 61382a50 bellard
#define SHIFT 1
4075 61382a50 bellard
#include "softmmu_template.h"
4076 61382a50 bellard
4077 61382a50 bellard
#define SHIFT 2
4078 61382a50 bellard
#include "softmmu_template.h"
4079 61382a50 bellard
4080 61382a50 bellard
#define SHIFT 3
4081 61382a50 bellard
#include "softmmu_template.h"
4082 61382a50 bellard
4083 61382a50 bellard
#undef env
4084 61382a50 bellard
4085 61382a50 bellard
#endif